xor lists

classic Classic list List threaded Threaded
4 messages Options
Reply | Threaded
Open this post in threaded view
|

xor lists

Ted Unangst-6
At some point, every young programmer hears a story about how you can
use xor to store the pointers of a list. It's a stupid story for many
reasons, not least of which is the fact that nobody ever does this.
Sounds like a challenge.

This diff doesn't actually implement anything like what you're
probably thinking of when somebody says XORed list pointers. What I've
done is create an alternate universe version of the SIMPLEQ queue.h
macros that automatically XORs all the list pointers with a magic
cookie stored in the list head.

The reason we do this is because malloc and pool maintain their free
lists as simple linked lists, with internal pointers. If the naughty
people discover a use after free bug, this basically gives them a
write anything to the kernel's memory ticket. That's bad. If we
encrypt all the pointers, that overwrite suddenly becomes a lot less
reliable. The cookie is stored in a different part of memory, we have to
hope it doesn't leak out somehow.

I'm not the first to think of this, apparently even my phone already
does something similar. In fact, it may even be doing something
better, but the approach taken here is drop in simple. Anywhere you
have a simpleq list, now you can have an xor protected simpleq list.

If you are concerned about performance, I recommend you upgrade to a
computer with a hardware accelerated xor instruction.

Index: kern/subr_pool.c
===================================================================
RCS file: /cvs/src/sys/kern/subr_pool.c,v
retrieving revision 1.118
diff -u -p -r1.118 subr_pool.c
--- kern/subr_pool.c 6 Apr 2013 13:41:11 -0000 1.118
+++ kern/subr_pool.c 15 Apr 2013 13:06:34 -0000
@@ -67,7 +67,7 @@ struct pool_item_header {
  /* Page headers */
  LIST_ENTRY(pool_item_header)
  ph_pagelist; /* pool page list */
- SIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
+ XSIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
  RB_ENTRY(pool_item_header)
  ph_node; /* Off-page page headers */
  int ph_nmissing; /* # of chunks in use */
@@ -80,7 +80,7 @@ struct pool_item_header {
 struct pool_item {
  u_int32_t pi_magic;
  /* Other entries use only this list entry */
- SIMPLEQ_ENTRY(pool_item) pi_list;
+ XSIMPLEQ_ENTRY(pool_item) pi_list;
 };
 
 #ifdef POOL_DEBUG
@@ -620,7 +620,7 @@ startover:
  /* Start the allocation process over. */
  goto startover;
  }
- if ((v = pi = SIMPLEQ_FIRST(&ph->ph_itemlist)) == NULL) {
+ if ((v = pi = XSIMPLEQ_FIRST(&ph->ph_itemlist)) == NULL) {
  panic("pool_do_get: %s: page empty", pp->pr_wchan);
  }
 #ifdef DIAGNOSTIC
@@ -653,7 +653,7 @@ startover:
  /*
  * Remove from item list.
  */
- SIMPLEQ_REMOVE_HEAD(&ph->ph_itemlist, pi_list);
+ XSIMPLEQ_REMOVE_HEAD(&ph->ph_itemlist, pi_list);
  pp->pr_nitems--;
  pp->pr_nout++;
  if (ph->ph_nmissing == 0) {
@@ -671,7 +671,7 @@ startover:
  LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
  }
  ph->ph_nmissing++;
- if (SIMPLEQ_EMPTY(&ph->ph_itemlist)) {
+ if (XSIMPLEQ_EMPTY(&ph->ph_itemlist)) {
 #ifdef DIAGNOSTIC
  if (ph->ph_nmissing != pp->pr_itemsperpage) {
  panic("pool_do_get: %s: nmissing inconsistent",
@@ -771,7 +771,7 @@ pool_do_put(struct pool *pp, void *v)
  }
 #endif /* DIAGNOSTIC */
 
- SIMPLEQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
+ XSIMPLEQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
  ph->ph_nmissing--;
  pp->pr_nitems++;
  pp->pr_nout--;
@@ -874,7 +874,7 @@ pool_prime_page(struct pool *pp, caddr_t
  * Insert page header.
  */
  LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
- SIMPLEQ_INIT(&ph->ph_itemlist);
+ XSIMPLEQ_INIT(&ph->ph_itemlist);
  ph->ph_page = storage;
  ph->ph_pagesize = pp->pr_alloc->pa_pagesz;
  ph->ph_nmissing = 0;
@@ -909,7 +909,7 @@ pool_prime_page(struct pool *pp, caddr_t
  KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
 
  /* Insert on page list */
- SIMPLEQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
+ XSIMPLEQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
 
 #ifdef DIAGNOSTIC
  pi->pi_magic = poison_value(pi);
@@ -1130,7 +1130,7 @@ pool_print_pagelist(struct pool_pagelist
  (*pr)("\t\tpage %p, nmissing %d\n",
     ph->ph_page, ph->ph_nmissing);
 #ifdef DIAGNOSTIC
- SIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
+ XSIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
  if (pi->pi_magic != poison_value(pi)) {
  (*pr)("\t\t\titem %p, magic 0x%x\n",
     pi, pi->pi_magic);
@@ -1281,9 +1281,9 @@ pool_chk_page(struct pool *pp, struct po
  return 1;
  }
 
- for (pi = SIMPLEQ_FIRST(&ph->ph_itemlist), n = 0;
+ for (pi = XSIMPLEQ_FIRST(&ph->ph_itemlist), n = 0;
      pi != NULL;
-     pi = SIMPLEQ_NEXT(pi,pi_list), n++) {
+     pi = XSIMPLEQ_NEXT(&ph->ph_itemlist, pi, pi_list), n++) {
 
 #ifdef DIAGNOSTIC
  if (pi->pi_magic != poison_value(pi)) {
@@ -1379,7 +1379,7 @@ pool_walk(struct pool *pp, int full,
  n = ph->ph_nmissing;
 
  do {
- SIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
+ XSIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
  if (cp == (caddr_t)pi)
  break;
  }
Index: kern/kern_malloc.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_malloc.c,v
retrieving revision 1.99
diff -u -p -r1.99 kern_malloc.c
--- kern/kern_malloc.c 6 Apr 2013 03:53:25 -0000 1.99
+++ kern/kern_malloc.c 15 Apr 2013 13:06:34 -0000
@@ -41,6 +41,8 @@
 #include <sys/time.h>
 #include <sys/rwlock.h>
 
+#include <dev/rndvar.h>
+
 #include <uvm/uvm.h>
 
 static __inline__ long BUCKETINDX(size_t sz)
@@ -132,7 +134,7 @@ struct kmem_freelist {
  int32_t kf_spare0;
  int16_t kf_type;
  int16_t kf_spare1;
- SIMPLEQ_ENTRY(kmem_freelist) kf_flist;
+ XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
 };
 
 #ifdef DIAGNOSTIC
@@ -221,7 +223,7 @@ malloc(unsigned long size, int type, int
  allocsize = round_page(size);
  else
  allocsize = 1 << indx;
- if (SIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
+ if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
  npg = atop(round_page(allocsize));
  va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
     (vsize_t)ptoa(npg), 0,
@@ -273,7 +275,7 @@ malloc(unsigned long size, int type, int
  poison_mem(cp, allocsize);
  freep->kf_type = M_FREE;
 #endif /* DIAGNOSTIC */
- SIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
+ XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
  if (cp <= va)
  break;
  cp -= allocsize;
@@ -283,15 +285,15 @@ malloc(unsigned long size, int type, int
  freshalloc = 0;
 #endif
  }
- freep = SIMPLEQ_FIRST(&kbp->kb_freelist);
- SIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
+ freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
+ XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
  va = (caddr_t)freep;
 #ifdef DIAGNOSTIC
  savedtype = (unsigned)freep->kf_type < M_LAST ?
  memname[freep->kf_type] : "???";
- if (freshalloc == 0 && SIMPLEQ_FIRST(&kbp->kb_freelist)) {
+ if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
  int rv;
- vaddr_t addr = (vaddr_t)SIMPLEQ_FIRST(&kbp->kb_freelist);
+ vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
 
  vm_map_lock(kmem_map);
  rv = uvm_map_checkprot(kmem_map, addr,
@@ -420,7 +422,7 @@ free(void *addr, int type)
  */
  if (freep->kf_spare0 == poison_value(freep)) {
  struct kmem_freelist *fp;
- SIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
+ XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
  if (addr != fp)
  continue;
  printf("multiply freed item %p\n", addr);
@@ -453,7 +455,7 @@ free(void *addr, int type)
  wakeup(ksp);
  ksp->ks_inuse--;
 #endif
- SIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
+ XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
  splx(s);
 }
 
@@ -538,7 +540,7 @@ kmeminit(void)
  kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
  (vsize_t)(nkmempages * sizeof(struct kmemusage)));
  for (indx = 0; indx < MINBUCKET + 16; indx++) {
- SIMPLEQ_INIT(&bucket[indx].kb_freelist);
+ XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
  }
 #ifdef KMEMSTATS
  for (indx = 0; indx < MINBUCKET + 16; indx++) {
Index: sys/malloc.h
===================================================================
RCS file: /cvs/src/sys/sys/malloc.h,v
retrieving revision 1.104
diff -u -p -r1.104 malloc.h
--- sys/malloc.h 6 Apr 2013 03:53:25 -0000 1.104
+++ sys/malloc.h 15 Apr 2013 13:06:34 -0000
@@ -347,7 +347,7 @@ struct kmem_freelist;
  * Set of buckets for each size of memory block that is retained
  */
 struct kmembuckets {
- SIMPLEQ_HEAD(, kmem_freelist) kb_freelist; /* list of free blocks */
+ XSIMPLEQ_HEAD(, kmem_freelist) kb_freelist; /* list of free blocks */
  u_int64_t kb_calls; /* total calls to allocate this size */
  u_int64_t kb_total; /* total number of blocks allocated */
  u_int64_t kb_totalfree; /* # of free elements in this bucket */
Index: sys/queue.h
===================================================================
RCS file: /cvs/src/sys/sys/queue.h,v
retrieving revision 1.36
diff -u -p -r1.36 queue.h
--- sys/queue.h 11 Apr 2012 13:29:14 -0000 1.36
+++ sys/queue.h 15 Apr 2013 13:06:34 -0000
@@ -317,6 +317,84 @@ struct { \
 } while (0)
 
 /*
+ * XOR Simple queue definitions.
+ */
+#define XSIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqx_first; /* first element */ \
+ struct type **sqx_last; /* addr of last next element */ \
+ unsigned long sqx_cookie; \
+}
+
+#define XSIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqx_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define XSIMPLEQ_XOR(head, ptr)    ((typeof(ptr))((head)->sqx_cookie ^ (unsigned long)(ptr)))
+#define XSIMPLEQ_FIRST(head)    XSIMPLEQ_XOR(head, ((head)->sqx_first))
+#define XSIMPLEQ_END(head)    NULL
+#define XSIMPLEQ_EMPTY(head)    (XSIMPLEQ_FIRST(head) == XSIMPLEQ_END(head))
+#define XSIMPLEQ_NEXT(head, elm, field)    XSIMPLEQ_XOR(head, ((elm)->field.sqx_next))
+
+
+#define XSIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = XSIMPLEQ_FIRST(head); \
+    (var) != XSIMPLEQ_END(head); \
+    (var) = XSIMPLEQ_NEXT(head, var, field))
+
+#define XSIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = XSIMPLEQ_FIRST(head); \
+    (var) && ((tvar) = XSIMPLEQ_NEXT(head, var, field), 1); \
+    (var) = (tvar))
+
+/*
+ * Simple queue functions.
+ */
+#define XSIMPLEQ_INIT(head) do { \
+ (head)->sqx_cookie = arc4random(); \
+ if (sizeof(unsigned long) == 8) \
+ (head)->sqx_cookie |= ((unsigned long)arc4random()) << sizeof(long) * 4; \
+ (head)->sqx_first = XSIMPLEQ_XOR(head, NULL); \
+ (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \
+} while (0)
+
+#define XSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqx_next = (head)->sqx_first) == XSIMPLEQ_XOR(head, NULL)) \
+ (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+ (head)->sqx_first = XSIMPLEQ_XOR(head, (elm)); \
+} while (0)
+
+#define XSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqx_next = XSIMPLEQ_XOR(head, NULL); \
+ *(XSIMPLEQ_XOR(head, (head)->sqx_last)) = XSIMPLEQ_XOR(head, (elm)); \
+ (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+} while (0)
+
+#define XSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqx_next = (listelm)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \
+ (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+ (listelm)->field.sqx_next = XSIMPLEQ_XOR(head, (elm)); \
+} while (0)
+
+#define XSIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqx_first = XSIMPLEQ_XOR(head, (head)->sqx_first)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \
+ (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \
+} while (0)
+
+#define XSIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
+ if (((elm)->field.sqx_next = XSIMPLEQ_XOR(head, \
+    (elm)->field.sqx_next)->field.sqx_next) \
+    == XSIMPLEQ_XOR(head, NULL)) \
+ (head)->sqx_last = \
+    XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+} while (0)
+
+    
+/*
  * Tail queue definitions.
  */
 #define TAILQ_HEAD(name, type) \


Reply | Threaded
Open this post in threaded view
|

Re: xor lists

Otto Moerbeek
On Wed, Apr 17, 2013 at 08:06:43PM -0400, Ted Unangst wrote:

> At some point, every young programmer hears a story about how you can
> use xor to store the pointers of a list. It's a stupid story for many
> reasons, not least of which is the fact that nobody ever does this.
> Sounds like a challenge.
>
> This diff doesn't actually implement anything like what you're
> probably thinking of when somebody says XORed list pointers. What I've
> done is create an alternate universe version of the SIMPLEQ queue.h
> macros that automatically XORs all the list pointers with a magic
> cookie stored in the list head.
>
> The reason we do this is because malloc and pool maintain their free
> lists as simple linked lists, with internal pointers. If the naughty
> people discover a use after free bug, this basically gives them a
> write anything to the kernel's memory ticket. That's bad. If we
> encrypt all the pointers, that overwrite suddenly becomes a lot less
> reliable. The cookie is stored in a different part of memory, we have to
> hope it doesn't leak out somehow.
>
> I'm not the first to think of this, apparently even my phone already
> does something similar. In fact, it may even be doing something
> better, but the approach taken here is drop in simple. Anywhere you
> have a simpleq list, now you can have an xor protected simpleq list.
>
> If you are concerned about performance, I recommend you upgrade to a
> computer with a hardware accelerated xor instruction.

By the only hardware accelarated operation my computer has is NAND...

        -Ottto

>
> Index: kern/subr_pool.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/subr_pool.c,v
> retrieving revision 1.118
> diff -u -p -r1.118 subr_pool.c
> --- kern/subr_pool.c 6 Apr 2013 13:41:11 -0000 1.118
> +++ kern/subr_pool.c 15 Apr 2013 13:06:34 -0000
> @@ -67,7 +67,7 @@ struct pool_item_header {
>   /* Page headers */
>   LIST_ENTRY(pool_item_header)
>   ph_pagelist; /* pool page list */
> - SIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
> + XSIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
>   RB_ENTRY(pool_item_header)
>   ph_node; /* Off-page page headers */
>   int ph_nmissing; /* # of chunks in use */
> @@ -80,7 +80,7 @@ struct pool_item_header {
>  struct pool_item {
>   u_int32_t pi_magic;
>   /* Other entries use only this list entry */
> - SIMPLEQ_ENTRY(pool_item) pi_list;
> + XSIMPLEQ_ENTRY(pool_item) pi_list;
>  };
>  
>  #ifdef POOL_DEBUG
> @@ -620,7 +620,7 @@ startover:
>   /* Start the allocation process over. */
>   goto startover;
>   }
> - if ((v = pi = SIMPLEQ_FIRST(&ph->ph_itemlist)) == NULL) {
> + if ((v = pi = XSIMPLEQ_FIRST(&ph->ph_itemlist)) == NULL) {
>   panic("pool_do_get: %s: page empty", pp->pr_wchan);
>   }
>  #ifdef DIAGNOSTIC
> @@ -653,7 +653,7 @@ startover:
>   /*
>   * Remove from item list.
>   */
> - SIMPLEQ_REMOVE_HEAD(&ph->ph_itemlist, pi_list);
> + XSIMPLEQ_REMOVE_HEAD(&ph->ph_itemlist, pi_list);
>   pp->pr_nitems--;
>   pp->pr_nout++;
>   if (ph->ph_nmissing == 0) {
> @@ -671,7 +671,7 @@ startover:
>   LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
>   }
>   ph->ph_nmissing++;
> - if (SIMPLEQ_EMPTY(&ph->ph_itemlist)) {
> + if (XSIMPLEQ_EMPTY(&ph->ph_itemlist)) {
>  #ifdef DIAGNOSTIC
>   if (ph->ph_nmissing != pp->pr_itemsperpage) {
>   panic("pool_do_get: %s: nmissing inconsistent",
> @@ -771,7 +771,7 @@ pool_do_put(struct pool *pp, void *v)
>   }
>  #endif /* DIAGNOSTIC */
>  
> - SIMPLEQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
> + XSIMPLEQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
>   ph->ph_nmissing--;
>   pp->pr_nitems++;
>   pp->pr_nout--;
> @@ -874,7 +874,7 @@ pool_prime_page(struct pool *pp, caddr_t
>   * Insert page header.
>   */
>   LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
> - SIMPLEQ_INIT(&ph->ph_itemlist);
> + XSIMPLEQ_INIT(&ph->ph_itemlist);
>   ph->ph_page = storage;
>   ph->ph_pagesize = pp->pr_alloc->pa_pagesz;
>   ph->ph_nmissing = 0;
> @@ -909,7 +909,7 @@ pool_prime_page(struct pool *pp, caddr_t
>   KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
>  
>   /* Insert on page list */
> - SIMPLEQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
> + XSIMPLEQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
>  
>  #ifdef DIAGNOSTIC
>   pi->pi_magic = poison_value(pi);
> @@ -1130,7 +1130,7 @@ pool_print_pagelist(struct pool_pagelist
>   (*pr)("\t\tpage %p, nmissing %d\n",
>      ph->ph_page, ph->ph_nmissing);
>  #ifdef DIAGNOSTIC
> - SIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
> + XSIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
>   if (pi->pi_magic != poison_value(pi)) {
>   (*pr)("\t\t\titem %p, magic 0x%x\n",
>      pi, pi->pi_magic);
> @@ -1281,9 +1281,9 @@ pool_chk_page(struct pool *pp, struct po
>   return 1;
>   }
>  
> - for (pi = SIMPLEQ_FIRST(&ph->ph_itemlist), n = 0;
> + for (pi = XSIMPLEQ_FIRST(&ph->ph_itemlist), n = 0;
>       pi != NULL;
> -     pi = SIMPLEQ_NEXT(pi,pi_list), n++) {
> +     pi = XSIMPLEQ_NEXT(&ph->ph_itemlist, pi, pi_list), n++) {
>  
>  #ifdef DIAGNOSTIC
>   if (pi->pi_magic != poison_value(pi)) {
> @@ -1379,7 +1379,7 @@ pool_walk(struct pool *pp, int full,
>   n = ph->ph_nmissing;
>  
>   do {
> - SIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
> + XSIMPLEQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
>   if (cp == (caddr_t)pi)
>   break;
>   }
> Index: kern/kern_malloc.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/kern_malloc.c,v
> retrieving revision 1.99
> diff -u -p -r1.99 kern_malloc.c
> --- kern/kern_malloc.c 6 Apr 2013 03:53:25 -0000 1.99
> +++ kern/kern_malloc.c 15 Apr 2013 13:06:34 -0000
> @@ -41,6 +41,8 @@
>  #include <sys/time.h>
>  #include <sys/rwlock.h>
>  
> +#include <dev/rndvar.h>
> +
>  #include <uvm/uvm.h>
>  
>  static __inline__ long BUCKETINDX(size_t sz)
> @@ -132,7 +134,7 @@ struct kmem_freelist {
>   int32_t kf_spare0;
>   int16_t kf_type;
>   int16_t kf_spare1;
> - SIMPLEQ_ENTRY(kmem_freelist) kf_flist;
> + XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
>  };
>  
>  #ifdef DIAGNOSTIC
> @@ -221,7 +223,7 @@ malloc(unsigned long size, int type, int
>   allocsize = round_page(size);
>   else
>   allocsize = 1 << indx;
> - if (SIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
> + if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
>   npg = atop(round_page(allocsize));
>   va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
>      (vsize_t)ptoa(npg), 0,
> @@ -273,7 +275,7 @@ malloc(unsigned long size, int type, int
>   poison_mem(cp, allocsize);
>   freep->kf_type = M_FREE;
>  #endif /* DIAGNOSTIC */
> - SIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
> + XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
>   if (cp <= va)
>   break;
>   cp -= allocsize;
> @@ -283,15 +285,15 @@ malloc(unsigned long size, int type, int
>   freshalloc = 0;
>  #endif
>   }
> - freep = SIMPLEQ_FIRST(&kbp->kb_freelist);
> - SIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
> + freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
> + XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
>   va = (caddr_t)freep;
>  #ifdef DIAGNOSTIC
>   savedtype = (unsigned)freep->kf_type < M_LAST ?
>   memname[freep->kf_type] : "???";
> - if (freshalloc == 0 && SIMPLEQ_FIRST(&kbp->kb_freelist)) {
> + if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
>   int rv;
> - vaddr_t addr = (vaddr_t)SIMPLEQ_FIRST(&kbp->kb_freelist);
> + vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
>  
>   vm_map_lock(kmem_map);
>   rv = uvm_map_checkprot(kmem_map, addr,
> @@ -420,7 +422,7 @@ free(void *addr, int type)
>   */
>   if (freep->kf_spare0 == poison_value(freep)) {
>   struct kmem_freelist *fp;
> - SIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
> + XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
>   if (addr != fp)
>   continue;
>   printf("multiply freed item %p\n", addr);
> @@ -453,7 +455,7 @@ free(void *addr, int type)
>   wakeup(ksp);
>   ksp->ks_inuse--;
>  #endif
> - SIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
> + XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
>   splx(s);
>  }
>  
> @@ -538,7 +540,7 @@ kmeminit(void)
>   kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
>   (vsize_t)(nkmempages * sizeof(struct kmemusage)));
>   for (indx = 0; indx < MINBUCKET + 16; indx++) {
> - SIMPLEQ_INIT(&bucket[indx].kb_freelist);
> + XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
>   }
>  #ifdef KMEMSTATS
>   for (indx = 0; indx < MINBUCKET + 16; indx++) {
> Index: sys/malloc.h
> ===================================================================
> RCS file: /cvs/src/sys/sys/malloc.h,v
> retrieving revision 1.104
> diff -u -p -r1.104 malloc.h
> --- sys/malloc.h 6 Apr 2013 03:53:25 -0000 1.104
> +++ sys/malloc.h 15 Apr 2013 13:06:34 -0000
> @@ -347,7 +347,7 @@ struct kmem_freelist;
>   * Set of buckets for each size of memory block that is retained
>   */
>  struct kmembuckets {
> - SIMPLEQ_HEAD(, kmem_freelist) kb_freelist; /* list of free blocks */
> + XSIMPLEQ_HEAD(, kmem_freelist) kb_freelist; /* list of free blocks */
>   u_int64_t kb_calls; /* total calls to allocate this size */
>   u_int64_t kb_total; /* total number of blocks allocated */
>   u_int64_t kb_totalfree; /* # of free elements in this bucket */
> Index: sys/queue.h
> ===================================================================
> RCS file: /cvs/src/sys/sys/queue.h,v
> retrieving revision 1.36
> diff -u -p -r1.36 queue.h
> --- sys/queue.h 11 Apr 2012 13:29:14 -0000 1.36
> +++ sys/queue.h 15 Apr 2013 13:06:34 -0000
> @@ -317,6 +317,84 @@ struct { \
>  } while (0)
>  
>  /*
> + * XOR Simple queue definitions.
> + */
> +#define XSIMPLEQ_HEAD(name, type) \
> +struct name { \
> + struct type *sqx_first; /* first element */ \
> + struct type **sqx_last; /* addr of last next element */ \
> + unsigned long sqx_cookie; \
> +}
> +
> +#define XSIMPLEQ_ENTRY(type) \
> +struct { \
> + struct type *sqx_next; /* next element */ \
> +}
> +
> +/*
> + * Simple queue access methods.
> + */
> +#define XSIMPLEQ_XOR(head, ptr)    ((typeof(ptr))((head)->sqx_cookie ^ (unsigned long)(ptr)))
> +#define XSIMPLEQ_FIRST(head)    XSIMPLEQ_XOR(head, ((head)->sqx_first))
> +#define XSIMPLEQ_END(head)    NULL
> +#define XSIMPLEQ_EMPTY(head)    (XSIMPLEQ_FIRST(head) == XSIMPLEQ_END(head))
> +#define XSIMPLEQ_NEXT(head, elm, field)    XSIMPLEQ_XOR(head, ((elm)->field.sqx_next))
> +
> +
> +#define XSIMPLEQ_FOREACH(var, head, field) \
> + for ((var) = XSIMPLEQ_FIRST(head); \
> +    (var) != XSIMPLEQ_END(head); \
> +    (var) = XSIMPLEQ_NEXT(head, var, field))
> +
> +#define XSIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \
> + for ((var) = XSIMPLEQ_FIRST(head); \
> +    (var) && ((tvar) = XSIMPLEQ_NEXT(head, var, field), 1); \
> +    (var) = (tvar))
> +
> +/*
> + * Simple queue functions.
> + */
> +#define XSIMPLEQ_INIT(head) do { \
> + (head)->sqx_cookie = arc4random(); \
> + if (sizeof(unsigned long) == 8) \
> + (head)->sqx_cookie |= ((unsigned long)arc4random()) << sizeof(long) * 4; \
> + (head)->sqx_first = XSIMPLEQ_XOR(head, NULL); \
> + (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \
> +} while (0)
> +
> +#define XSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
> + if (((elm)->field.sqx_next = (head)->sqx_first) == XSIMPLEQ_XOR(head, NULL)) \
> + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
> + (head)->sqx_first = XSIMPLEQ_XOR(head, (elm)); \
> +} while (0)
> +
> +#define XSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
> + (elm)->field.sqx_next = XSIMPLEQ_XOR(head, NULL); \
> + *(XSIMPLEQ_XOR(head, (head)->sqx_last)) = XSIMPLEQ_XOR(head, (elm)); \
> + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
> +} while (0)
> +
> +#define XSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
> + if (((elm)->field.sqx_next = (listelm)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \
> + (head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
> + (listelm)->field.sqx_next = XSIMPLEQ_XOR(head, (elm)); \
> +} while (0)
> +
> +#define XSIMPLEQ_REMOVE_HEAD(head, field) do { \
> + if (((head)->sqx_first = XSIMPLEQ_XOR(head, (head)->sqx_first)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \
> + (head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \
> +} while (0)
> +
> +#define XSIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
> + if (((elm)->field.sqx_next = XSIMPLEQ_XOR(head, \
> +    (elm)->field.sqx_next)->field.sqx_next) \
> +    == XSIMPLEQ_XOR(head, NULL)) \
> + (head)->sqx_last = \
> +    XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
> +} while (0)
> +
> +    
> +/*
>   * Tail queue definitions.
>   */
>  #define TAILQ_HEAD(name, type) \
>

Reply | Threaded
Open this post in threaded view
|

Re: xor lists

Miod Vallat
> > If you are concerned about performance, I recommend you upgrade to a
> > computer with a hardware accelerated xor instruction.
>
> By the only hardware accelarated operation my computer has is NAND...

Well, consider yourself lucky, mine only accelerates NOP.

Reply | Threaded
Open this post in threaded view
|

Re: xor lists

Ted Unangst-6
In reply to this post by Ted Unangst-6
On Wed, Apr 17, 2013 at 20:06, Ted Unangst wrote:

> +#define XSIMPLEQ_INIT(head) do { \
> + (head)->sqx_cookie = arc4random(); \
> + if (sizeof(unsigned long) == 8) \
> + (head)->sqx_cookie |= ((unsigned long)arc4random()) << sizeof(long) * 4; \

Since a few people have noticed, there was a reason why I didn't use
arc4random_buf here. Unfortunately, I have forgotten what that reason
is. But I did discover people are at least skimming the diff, so maybe
my plan worked after all. In any case, yes, arc4random_buf would be a
better option here.

Along those lines, one thing I forgot to mention is that this diff is
of course only effective if the random generator is operational. pool
keeps many free lists and initializes them late, so it's safe, but
malloc initializes its lists very early, so the cookies are likely to
be fixed values. There's a way to correct that, but it's not critical
for the first iteration.