gnash-commit
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gnash-commit] [SCM] Gnash branch, master, updated. release_0_8_9_start-


From: Bastiaan Jacques
Subject: [Gnash-commit] [SCM] Gnash branch, master, updated. release_0_8_9_start-422-gf615871
Date: Tue, 29 Mar 2011 16:14:01 +0000

This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Gnash".

The branch, master has been updated
       via  f61587132b85768bf00fa3d187f2635cda4639c8 (commit)
       via  7f970ded6f8c02297638a0f1d93b3fe8659378bb (commit)
       via  334bfb50cdf7447086591ea68cc3151969e207bd (commit)
       via  4d758c3602d03669a2ed1ef35c9014f7602f70c4 (commit)
       via  2ba50bc27c9ec8f9c6de821f7ba015f07838df66 (commit)
       via  c7cc42df9169d9abf1c65393c33eb7d2abdea2ee (commit)
       via  4cf9ecffd43e7aee9672594d25a27beccbfd4cf1 (commit)
      from  26ce2b052b8e7d3ff8c13678320fad91bad00407 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://git.savannah.gnu.org/cgit//commit/?id=f61587132b85768bf00fa3d187f2635cda4639c8


commit f61587132b85768bf00fa3d187f2635cda4639c8
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 18:09:07 2011 +0200

    Provide pointer alignment requirements to jemalloc.

diff --git a/configure.ac b/configure.ac
index 22e15c6..f31d54d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1320,6 +1320,13 @@ AC_ARG_ENABLE(jemalloc,
   *)   AC_MSG_ERROR([bad value ${enableval} for --enable-jemalloc option]) ;;
 esac],jemalloc=yes)
 
+AC_CHECK_SIZEOF([void *], [4])
+
+if test "x${ac_cv_sizeof_void_p}" != x4 -a x"${ac_cv_sizeof_void_p}" != x8; 
then
+  dnl jemalloc doesn't support unusual pointer sizes
+  jemalloc=no
+fi
+
 dnl There is some weird stuff going on with NetBSD and jemalloc, so don't 
 dnl build it for now.
 if test x"${netbsd}" = x"yes" -o x"${windows}" = x"yes" -o x"${freebsd}" = 
x"yes" -o x"${haiku}" = x"yes" -o x"${gnu}" = x"yes"; then
diff --git a/libbase/jemalloc_gnash.h b/libbase/jemalloc_gnash.h
index 95769f4..bbd0b37 100644
--- a/libbase/jemalloc_gnash.h
+++ b/libbase/jemalloc_gnash.h
@@ -37,6 +37,12 @@
 #define MOZ_MEMORY_ANDROID
 #endif
 
+#if SIZEOF_VOID_P == 4
+# define MOZ_MEMORY_SIZEOF_PTR_2POW 2
+#elif SIZEOF_VOID_P == 8
+# define MOZ_MEMORY_SIZEOF_PTR_2POW 3
+#endif
+
 #include "jemalloc.h"
 
 #ifdef USE_STATS_MEMORY

http://git.savannah.gnu.org/cgit//commit/?id=7f970ded6f8c02297638a0f1d93b3fe8659378bb


commit 7f970ded6f8c02297638a0f1d93b3fe8659378bb
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 18:08:09 2011 +0200

    Only compile the stats interface if it's enabled.

diff --git a/libbase/jemalloc.c b/libbase/jemalloc.c
index 158ee7a..18dfc6d 100644
--- a/libbase/jemalloc.c
+++ b/libbase/jemalloc.c
@@ -6157,6 +6157,7 @@ malloc_usable_size(const void *ptr)
 #endif
 }
 
+#ifdef MALLOC_STATS
 void
 jemalloc_stats(jemalloc_stats_t *stats)
 {
@@ -6257,6 +6258,7 @@ jemalloc_stats(jemalloc_stats_t *stats)
        assert(stats->mapped >= stats->committed);
        assert(stats->committed >= stats->allocated);
 }
+#endif /* MALLOC_STATS */
 
 #ifdef MOZ_MEMORY_WINDOWS
 void*

http://git.savannah.gnu.org/cgit//commit/?id=334bfb50cdf7447086591ea68cc3151969e207bd


commit 334bfb50cdf7447086591ea68cc3151969e207bd
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 12:57:49 2011 +0200

    Comment fix and whitespace cleanup.

diff --git a/libbase/jemalloc_gnash.h b/libbase/jemalloc_gnash.h
index 6e62b1c..95769f4 100644
--- a/libbase/jemalloc_gnash.h
+++ b/libbase/jemalloc_gnash.h
@@ -41,7 +41,7 @@
 
 #ifdef USE_STATS_MEMORY
 
-/* Enable statistics tracking in jemalloc. */
+/* Enable statistics tracking plus API in jemalloc. */
 #define MALLOC_STATS
 
 /* Borrowed from malloc.h, as this is Linux specific. This has been
@@ -70,17 +70,17 @@ mallinfo(void)
 {
     struct mallinfo mi;
     jemalloc_stats_t stats;
-    
+
     jemalloc_stats(&stats);
 
     /* clear unused fields */
     mi.arena = mi.ordblks = mi.smblks = mi.usmblks = mi.fsmblks =
         mi.hblks = mi.hblkhd = 0;
-  
+
     mi.uordblks = stats.allocated;
     mi.fordblks = stats.mapped - mi.uordblks;
     mi.keepcost = stats.mapped;
-    
+
     return mi;
 }
 

http://git.savannah.gnu.org/cgit//commit/?id=4d758c3602d03669a2ed1ef35c9014f7602f70c4


commit 4d758c3602d03669a2ed1ef35c9014f7602f70c4
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 03:18:53 2011 +0200

    Don't define MALLOC_STATS unless specifically enabled.

diff --git a/libbase/jemalloc.c b/libbase/jemalloc.c
index 224cb84..158ee7a 100644
--- a/libbase/jemalloc.c
+++ b/libbase/jemalloc.c
@@ -118,12 +118,6 @@
  */
 #define        MOZ_MEMORY_NARENAS_DEFAULT_ONE
 
-/*
- * MALLOC_STATS enables statistics calculation, and is required for
- * jemalloc_stats().
- */
-#define MALLOC_STATS
-
 #ifndef MALLOC_PRODUCTION
    /*
     * MALLOC_DEBUG enables assertions and other sanity checks, and disables
@@ -131,6 +125,12 @@
     */
 #  define MALLOC_DEBUG
 
+   /*
+    * MALLOC_STATS enables statistics calculation, and is required for
+    * jemalloc_stats().
+    */
+#  define MALLOC_STATS
+
    /* Memory filling (junk/zero). */
 #  define MALLOC_FILL
 

http://git.savannah.gnu.org/cgit//commit/?id=2ba50bc27c9ec8f9c6de821f7ba015f07838df66


commit 2ba50bc27c9ec8f9c6de821f7ba015f07838df66
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 03:17:02 2011 +0200

    Port over mallinfo(), use jemalloc stats API.

diff --git a/libbase/jemalloc_gnash.h b/libbase/jemalloc_gnash.h
index d73a0c5..6e62b1c 100644
--- a/libbase/jemalloc_gnash.h
+++ b/libbase/jemalloc_gnash.h
@@ -37,7 +37,53 @@
 #define MOZ_MEMORY_ANDROID
 #endif
 
-
 #include "jemalloc.h"
 
+#ifdef USE_STATS_MEMORY
+
+/* Enable statistics tracking in jemalloc. */
+#define MALLOC_STATS
+
+/* Borrowed from malloc.h, as this is Linux specific. This has been
+ * added to jemalloc so the existing memory profiling in Gnash will
+ * continue to work. Most of these fields aren't used by the Gnash
+ * memory profiling, but we leave them here for a semblance of
+ * portability. The only fields Gnash uses are arena, uordblks. and
+ * fordblks.
+ */
+struct mallinfo {
+  int arena;    /* non-mmapped space allocated from system */
+  int ordblks;  /* number of free chunks UNUSED */
+  int smblks;   /* number of fastbin blocks UNUSED */
+  int hblks;    /* number of mmapped regions UNUSED */
+  int hblkhd;   /* space in mmapped regions UNUSED */
+  int usmblks;  /* maximum total allocated space UNUSED */
+  int fsmblks;  /* space available in freed fastbin blocks UNUSED */
+  int uordblks; /* total allocated space */
+  int fordblks; /* total free space */
+  int keepcost; /* top-most, releasable space UNUSED */
+};
+
+static
+struct mallinfo
+mallinfo(void)
+{
+    struct mallinfo mi;
+    jemalloc_stats_t stats;
+    
+    jemalloc_stats(&stats);
+
+    /* clear unused fields */
+    mi.arena = mi.ordblks = mi.smblks = mi.usmblks = mi.fsmblks =
+        mi.hblks = mi.hblkhd = 0;
+  
+    mi.uordblks = stats.allocated;
+    mi.fordblks = stats.mapped - mi.uordblks;
+    mi.keepcost = stats.mapped;
+    
+    return mi;
+}
+
+#endif /* USE_STATS_MEMORY */
+
 #endif /* GNASH_JEMALLOC_H */

http://git.savannah.gnu.org/cgit//commit/?id=c7cc42df9169d9abf1c65393c33eb7d2abdea2ee


commit c7cc42df9169d9abf1c65393c33eb7d2abdea2ee
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 02:01:53 2011 +0200

    Add macros to allow jemalloc to build correctly using the Gnash build 
system.

diff --git a/configure.ac b/configure.ac
index f94fb5f..22e15c6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -188,6 +188,19 @@ case "${host}" in
     gnu=yes
     AC_DEFINE([GNU_HOST], [1], [this is a GNU platform])
     ;;
+  *-*wince)
+    wince=yes
+    AC_DEFINE([WINCE_HOST], [1], [this is a WINCE platform])
+    ;;
+  *-*winmo)
+    wince=yes
+    AC_DEFINE([WINCE_HOST], [1], [this is a WINCE platform])
+    AC_DEFINE([WINCE6_HOST], [1], [this is a WINCE6 platform])
+    ;;
+  *-android*)
+    android=yes
+    AC_DEFINE([ANDROID_HOST], [1], [this is an Android platform])
+    ;;
 esac
 
 
diff --git a/libbase/Makefile.am b/libbase/Makefile.am
index 66c0bb7..15278e9 100644
--- a/libbase/Makefile.am
+++ b/libbase/Makefile.am
@@ -155,7 +155,9 @@ inst_HEADERS = \
        accumulator.h \
        SimpleBuffer.h \
        GnashNumeric.h \
-       jemtree.h \
+       jemalloc_gnash.h \
+       jemalloc_types.h \
+       jemalloc.h \
        GnashSleep.h \
        gmemory.h \
        SharedMem.h \
diff --git a/libbase/jemalloc.c b/libbase/jemalloc.c
index dbcfdfc..224cb84 100644
--- a/libbase/jemalloc.c
+++ b/libbase/jemalloc.c
@@ -95,6 +95,7 @@
  *
  
*******************************************************************************
  */
+#include "jemalloc_gnash.h"
 
 #ifdef MOZ_MEMORY_ANDROID
 #define NO_TLS
@@ -376,7 +377,6 @@ __FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 
2008-07-18 19:35:44Z ja
 
 #endif
 
-#include "jemalloc.h"
 
 /* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
  * happen to override mmap() and call dlsym() from their overridden
@@ -443,7 +443,7 @@ static const bool __isthreaded = true;
    /* MSVC++ does not support C99 variable-length arrays. */
 #  define RB_NO_C99_VARARRAYS
 #endif
-#include "rb.h"
+#include "jemalloc_rb.h"
 
 #ifdef MALLOC_DEBUG
    /* Disable inlining to make debugging easier. */
diff --git a/libbase/jemalloc_gnash.h b/libbase/jemalloc_gnash.h
new file mode 100644
index 0000000..d73a0c5
--- /dev/null
+++ b/libbase/jemalloc_gnash.h
@@ -0,0 +1,43 @@
+#ifndef GNASH_JEMALLOC_H
+#define GNASH_JEMALLOC_H
+
+#ifdef HAVE_CONFIG_H
+# include "gnashconfig.h"
+#endif
+
+/* Compiling for user mode application, not operating system */
+#define MOZ_MEMORY
+
+#ifdef LINUX_HOST
+#define MOZ_MEMORY_LINUX
+#endif
+
+#ifdef WIN32_HOST
+#define MOZ_MEMORY_WINDOWS
+#endif
+
+/* OpenBSD and others are excluded here to mimic what Mozilla does. */
+#if defined(FREEBSD_HOST) || defined(NETBSD_HOST)
+#define MOZ_MEMORY_BSD
+#endif
+
+#ifdef SOLARIS_HOST
+#define MOZ_MEMORY_SOLARIS
+#endif
+
+#ifdef WINCE_HOST
+#define MOZ_MEMORY_WINCE
+#endif
+
+#ifdef WINCE6_HOST
+#define MOZ_MEMORY_WINCE6
+#endif
+
+#ifdef ANDROID_HOST
+#define MOZ_MEMORY_ANDROID
+#endif
+
+
+#include "jemalloc.h"
+
+#endif /* GNASH_JEMALLOC_H */

http://git.savannah.gnu.org/cgit//commit/?id=4cf9ecffd43e7aee9672594d25a27beccbfd4cf1


commit 4cf9ecffd43e7aee9672594d25a27beccbfd4cf1
Author: Bastiaan Jacques <address@hidden>
Date:   Tue Mar 29 00:26:26 2011 +0200

    Update to upstream Mozilla jemalloc (87f5bb93303c); no local changes.

diff --git a/libbase/jemalloc.c b/libbase/jemalloc.c
index 3f578db..dbcfdfc 100644
--- a/libbase/jemalloc.c
+++ b/libbase/jemalloc.c
@@ -1,3 +1,5 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8; indent-tabs-mode: t -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
 /*-
  * Copyright (C) 2006-2008 Jason Evans <address@hidden>.
  * All rights reserved.
@@ -45,7 +47,8 @@
  * Allocation requests are rounded up to the nearest size class, and no record
  * of the original request size is maintained.  Allocations are broken into
  * categories according to size class.  Assuming runtime defaults, 4 kB pages
- * and a 16 byte quantum, the size classes in each category are as follows:
+ * and a 16 byte quantum on a 32-bit system, the size classes in each category
+ * are as follows:
  *
  *   |=====================================|
  *   | Category | Subcategory    |    Size |
@@ -93,122 +96,154 @@
  
*******************************************************************************
  */
 
-/*
- * This has been hacked on heavily by Rob Savoye, so it compiles
- * within Gnash, using the same configuration settings as
- * everything else in Gnash.
- */
-#ifdef HAVE_CONFIG_H
-# include "gnashconfig.h"
+#ifdef MOZ_MEMORY_ANDROID
+#define NO_TLS
+#define _pthread_self() pthread_self()
 #endif
 
-#include "dsodefs.h"
-
 /*
  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
  * defaults the A and J runtime options to off.  These settings are appropriate
  * for production systems.
  */
-#ifndef USE_STATS_MEMORY
-# define MALLOC_PRODUCTION 1
+#ifndef MOZ_MEMORY_DEBUG
+#  define      MALLOC_PRODUCTION
 #endif
 
+/*
+ * Use only one arena by default.  Mozilla does not currently make extensive
+ * use of concurrent allocation, so the increased fragmentation associated with
+ * multiple arenas is not warranted.
+ */
+#define        MOZ_MEMORY_NARENAS_DEFAULT_ONE
+
+/*
+ * MALLOC_STATS enables statistics calculation, and is required for
+ * jemalloc_stats().
+ */
+#define MALLOC_STATS
+
 #ifndef MALLOC_PRODUCTION
    /*
     * MALLOC_DEBUG enables assertions and other sanity checks, and disables
     * inline functions.
     */
-#  define MALLOC_DEBUG 1
-
-   /* MALLOC_STATS enables statistics calculation. */
-#  define MALLOC_STATS 1
+#  define MALLOC_DEBUG
 
    /* Memory filling (junk/zero). */
-#  define MALLOC_FILL 1
+#  define MALLOC_FILL
 
    /* Allocation tracing. */
-#  define MALLOC_UTRACE 1
+#  ifndef MOZ_MEMORY_WINDOWS
+#    define MALLOC_UTRACE
+#  endif
 
    /* Support optional abort() on OOM. */
-#  define MALLOC_XMALLOC 1
+#  define MALLOC_XMALLOC
 
    /* Support SYSV semantics. */
-#  define MALLOC_SYSV 1
+#  define MALLOC_SYSV
 #endif
 
 /*
- * MALLOC_LAZY_FREE enables the use of a per-thread vector of slots that free()
- * can atomically stuff object pointers into.  This can reduce arena lock
- * contention.
+ * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
+ * validation.  There are many possible errors that validation does not even
+ * attempt to detect.
  */
-/* #define     MALLOC_LAZY_FREE 1 */
+#define MALLOC_VALIDATE
+
+/* Embed no-op macros that support memory allocation tracking via valgrind. */
+#ifdef MOZ_VALGRIND
+#  define MALLOC_VALGRIND
+#endif
+#ifdef MALLOC_VALGRIND
+#  include <valgrind/valgrind.h>
+#else
+#  define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
+#  define VALGRIND_FREELIKE_BLOCK(addr, rzB)
+#endif
 
 /*
  * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
  * re-balances arena load if exponentially averaged contention exceeds a
  * certain threshold.
  */
-/* #define     MALLOC_BALANCE 1 */
+/* #define     MALLOC_BALANCE */
 
-/*
- * MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
- * segment (DSS).  In an ideal world, this functionality would be completely
- * unnecessary, but we are burdened by history and the lack of resource limits
- * for anonymous mapped memory.
- */
-#if (!defined(DARWIN) && !defined(WIN32))
-# define MALLOC_DSS 
+#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
+   /*
+    * MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
+    * files, so that if a chunk is mapped, it is guaranteed to be swappable.
+    * This avoids asynchronous OOM failures that are due to VM over-commit.
+    *
+    * XXX OS X over-commits, so we should probably use mmap() instead of
+    * vm_allocate(), so that MALLOC_PAGEFILE works.
+    */
+#define MALLOC_PAGEFILE
 #endif
 
-#ifdef LINUX_HOST
-# define       _GNU_SOURCE /* For mremap(2). */
-# define       issetugid() 0
-# if 0 /* Enable in order to test decommit code on Linux. */
-#  define MALLOC_DECOMMIT 1
-/*
- * The decommit code for Unix doesn't bother to make sure deallocated DSS
- * chunks are writable.
- */
-# undef MALLOC_DSS
-# endif
+#ifdef MALLOC_PAGEFILE
+/* Write size when initializing a page file. */
+#  define MALLOC_PAGEFILE_WRITE_SIZE 512
+#endif
+
+#if defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
+#define        _GNU_SOURCE /* For mremap(2). */
+#define        issetugid() 0
+#if 0 /* Enable in order to test decommit code on Linux. */
+#  define MALLOC_DECOMMIT
+#endif
 #endif
 
+#ifndef MOZ_MEMORY_WINCE
 #include <sys/types.h>
 
 #include <errno.h>
+#include <stdlib.h>
+#endif
 #include <limits.h>
 #include <stdarg.h>
 #include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 
-#ifdef WIN32
-# include <cruntime.h>
-# include <internal.h>
-# include <windows.h>
-# include <io.h>
-# include "jemtree.h"
-
-# pragma warning( disable: 4267 4996 4146 )
-
-# define       bool BOOL
-# define       false FALSE
-# define       true TRUE
-# define       inline __inline
-# define       SIZE_T_MAX SIZE_MAX
-# define       STDERR_FILENO 2
-# define       PATH_MAX MAX_PATH
-# define       vsnprintf _vsnprintf
-# define       assert(f) /* we can't assert in the CRT */
+#ifdef MOZ_MEMORY_WINDOWS
+#ifndef MOZ_MEMORY_WINCE
+#include <cruntime.h>
+#include <internal.h>
+#include <io.h>
+#else
+#include <cmnintrin.h>
+#include <crtdefs.h>
+#define SIZE_MAX UINT_MAX
+#endif
+#include <windows.h>
+
+#pragma warning( disable: 4267 4996 4146 )
+
+#define        bool BOOL
+#define        false FALSE
+#define        true TRUE
+#define        inline __inline
+#define        SIZE_T_MAX SIZE_MAX
+#define        STDERR_FILENO 2
+#define        PATH_MAX MAX_PATH
+#define        vsnprintf _vsnprintf
 
+#ifndef NO_TLS
 static unsigned long tlsIndex = 0xffffffff;
+#endif 
 
-# define       __thread
-# define       _pthread_self() __threadid()
-# define       issetugid() 0
+#define        __thread
+#ifdef MOZ_MEMORY_WINCE
+#define        _pthread_self() GetCurrentThreadId()
+#else
+#define        _pthread_self() __threadid()
+#endif
+#define        issetugid() 0
 
+#ifndef MOZ_MEMORY_WINCE
 /* use MSVC intrinsics */
-# pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanForward)
 static __forceinline int
 ffs(int x)
 {
@@ -220,132 +255,220 @@ ffs(int x)
        return (0);
 }
 
+/* Implement getenv without using malloc */
+static char mozillaMallocOptionsBuf[64];
+
+#define        getenv xgetenv
+static char *
+getenv(const char *name)
+{
+
+       if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
+                   sizeof(mozillaMallocOptionsBuf)) > 0)
+               return (mozillaMallocOptionsBuf);
+
+       return (NULL);
+}
+
+#else /* WIN CE */
+
+#define ENOMEM          12
+#define EINVAL          22
+
+static __forceinline int
+ffs(int x)
+{
+
+       return 32 - _CountLeadingZeros((-x) & x);
+}
+#endif
+
 typedef unsigned char uint8_t;
 typedef unsigned uint32_t;
 typedef unsigned long long uint64_t;
 typedef unsigned long long uintmax_t;
+#if defined(MOZ_MEMORY_SIZEOF_PTR_2POW) && (MOZ_MEMORY_SIZEOF_PTR_2POW == 3)
+typedef long long ssize_t;
+#else
+typedef long ssize_t;
+#endif
 
-# define       MALLOC_DECOMMIT
-#endif  /* end of WIN32 */
-
-#ifdef HAVE_PTHREADS
-# include <pthread.h>
+#define        MALLOC_DECOMMIT
 #endif
 
-#ifndef WIN32
-# include <sys/cdefs.h>
-# ifndef __DECONST
+#ifndef MOZ_MEMORY_WINDOWS
+#ifndef MOZ_MEMORY_SOLARIS
+#include <sys/cdefs.h>
+#endif
+#ifndef __DECONST
 #  define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-# endif
-# include <sys/mman.h>
-# ifndef MADV_FREE
+#endif
+#ifndef MOZ_MEMORY
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z 
jasone $");
+#include "libc_private.h"
+#ifdef MALLOC_DEBUG
+#  define _LOCK_DEBUG
+#endif
+#include "spinlock.h"
+#include "namespace.h"
+#endif
+#include <sys/mman.h>
+#ifndef MADV_FREE
 #  define MADV_FREE    MADV_DONTNEED
-# endif
-# include <sys/param.h>
-# include <sys/time.h>
-# include <sys/types.h>
-# include <sys/sysctl.h>
-# include "jemtree.h"
-# include <sys/uio.h>
-
-# include <errno.h>
-# include <limits.h>
-# ifndef SIZE_T_MAX
+#endif
+#ifndef MAP_NOSYNC
+#  define MAP_NOSYNC   0
+#endif
+#include <sys/param.h>
+#ifndef MOZ_MEMORY
+#include <sys/stddef.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h>
+#if !defined(MOZ_MEMORY_SOLARIS) && !defined(MOZ_MEMORY_ANDROID)
+#include <sys/sysctl.h>
+#endif
+#include <sys/uio.h>
+#ifndef MOZ_MEMORY
+#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
+
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/vmparam.h>
+#endif
+
+#include <errno.h>
+#include <limits.h>
+#ifndef SIZE_T_MAX
 #  define SIZE_T_MAX   SIZE_MAX
-# endif
-# if defined(DARWIN) || defined(LINUX_HOST)
-#  define _pthread_self pthread_self
-#  define _pthread_mutex_init pthread_mutex_init
-#  define _pthread_mutex_trylock pthread_mutex_trylock
-#  define _pthread_mutex_lock pthread_mutex_lock
-#  define _pthread_mutex_unlock pthread_mutex_unlock
-# endif
-# include <sched.h>
-# include <stdarg.h>
-# include <stdbool.h>
-# include <stdio.h>
-# include <stdint.h>
-# include <stdlib.h>
-# include <string.h>
-# ifndef DARWIN
-#  include <strings.h>
-# endif
-# include <unistd.h>
-
-# ifdef DARWIN
-#  include <libkern/OSAtomic.h>
-#  include <mach/mach_error.h>
-#  include <mach/mach_init.h>
-#  include <mach/vm_map.h>
-#  include <malloc/malloc.h>
-# endif  /* end of DARWIN */
-#endif   /* end of WIN32 */
-
-
-#ifdef DARWIN
-DSOEXPORT void *moz_memalign(size_t alignment, size_t size);
-DSOEXPORT size_t moz_malloc_usable_size(const void *ptr)
-DSOEXPORT void *moz_calloc(size_t num, size_t size);
-DSOEXPORT void *moz_realloc(void *ptr, size_t size);
-DSOEXPORT void *moz_valloc(size_t size);
-DSOEXPORT size_t moz_malloc_usable_size(const void *ptr);
-#else
-DSOEXPORT void *memalign(size_t alignment, size_t size);
-DSOEXPORT size_t malloc_usable_size(const void *ptr);
-DSOEXPORT void *calloc(size_t num, size_t size);
-DSOEXPORT void *realloc(void *ptr, size_t size);
-DSOEXPORT void *valloc(size_t size);
-DSOEXPORT size_t malloc_usable_size(const void *ptr);
 #endif
-void _malloc_postfork(void);
-void _malloc_prefork(void);
+#include <pthread.h>
+#ifdef MOZ_MEMORY_DARWIN
+#define _pthread_self pthread_self
+#define _pthread_mutex_init pthread_mutex_init
+#define _pthread_mutex_trylock pthread_mutex_trylock
+#define _pthread_mutex_lock pthread_mutex_lock
+#define _pthread_mutex_unlock pthread_mutex_unlock
+#endif
+#include <sched.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef MOZ_MEMORY_DARWIN
+#include <strings.h>
+#endif
+#include <unistd.h>
 
-#ifdef DARWIN
-static const bool g_isthreaded = true;
+#ifdef MOZ_MEMORY_DARWIN
+#include <libkern/OSAtomic.h>
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <malloc/malloc.h>
 #endif
 
-#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#ifndef MOZ_MEMORY
+#include "un-namespace.h"
+#endif
 
-#ifdef MALLOC_DEBUG
-# ifdef NDEBUG
-#  undef NDEBUG
-# endif
+#endif
+
+#include "jemalloc.h"
+
+/* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
+ * happen to override mmap() and call dlsym() from their overridden
+ * mmap(). The problem is that dlsym() calls malloc(), and this ends
+ * up in a dead lock in jemalloc.
+ * On these systems, we prefer to directly use the system call.
+ * We do that for Linux systems and kfreebsd with GNU userland.
+ * Note sanity checks are not done (alignment of offset, ...) because
+ * the uses of mmap are pretty limited, in jemalloc.
+ *
+ * On Alpha, glibc has a bug that prevents syscall() to work for system
+ * calls with 6 arguments
+ */
+#if (defined(MOZ_MEMORY_LINUX) && !defined(__alpha__)) || \
+    (defined(MOZ_MEMORY_BSD) && defined(__GLIBC__))
+#include <sys/syscall.h>
+#if defined(SYS_mmap) || defined(SYS_mmap2)
+static inline
+void *_mmap(void *addr, size_t length, int prot, int flags,
+            int fd, off_t offset)
+{
+/* S390 only passes one argument to the mmap system call, which is a
+ * pointer to a structure containing the arguments */
+#ifdef __s390__
+       struct {
+               void *addr;
+               size_t length;
+               int prot;
+               int flags;
+               int fd;
+               off_t offset;
+       } args = { addr, length, prot, flags, fd, offset };
+       return (void *) syscall(SYS_mmap, &args);
+#else
+#ifdef SYS_mmap2
+       return (void *) syscall(SYS_mmap2, addr, length, prot, flags,
+                              fd, offset >> 12);
 #else
-# ifndef NDEBUG
-#  define NDEBUG
-# endif
+       return (void *) syscall(SYS_mmap, addr, length, prot, flags,
+                               fd, offset);
+#endif
+#endif
+}
+#define mmap _mmap
+#define munmap(a, l) syscall(SYS_munmap, a, l)
+#endif
+#endif
+
+#ifdef MOZ_MEMORY_DARWIN
+static const bool __isthreaded = true;
+#endif
+
+#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && 
!defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
+#define JEMALLOC_USES_MAP_ALIGN         /* Required on Solaris 10. Might 
improve performance elsewhere. */
 #endif
-#ifndef WIN32
-# include <assert.h>
+
+#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
+#define JEMALLOC_USES_MAP_ALIGN         /* Required for Windows CE < 6 */
+#endif
+
+#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+
+#ifdef MOZ_MEMORY_WINDOWS
+   /* MSVC++ does not support C99 variable-length arrays. */
+#  define RB_NO_C99_VARARRAYS
 #endif
+#include "rb.h"
 
 #ifdef MALLOC_DEBUG
-/* Disable inlining to make debugging easier. */
-# ifdef inline
-#  undef inline
-# endif
+   /* Disable inlining to make debugging easier. */
+#ifdef inline
+#undef inline
+#endif
 
-# define inline
-#endif  /* end of MALLOC_DEBUG */
+#  define inline
+#endif
 
 /* Size of stack-allocated buffer passed to strerror_r(). */
 #define        STRERROR_BUF            64
 
 /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
-#define QUANTUM_2POW_MIN      4
+#  define QUANTUM_2POW_MIN      4
 #ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
-# define SIZEOF_PTR_2POW               MOZ_MEMORY_SIZEOF_PTR_2POW
+#  define SIZEOF_PTR_2POW              MOZ_MEMORY_SIZEOF_PTR_2POW
 #else
-# define SIZEOF_PTR_2POW       2
-#endif
-// __as __isthreaded is already defined on FreeBSD with a different value that 
does
-// the same thing, rename our version to be unique. Althought jemalloc is the 
default
-// allocator on FreeBSD< we still want to use our own version, as it has 
additional Gnash
-// specific tweaks.
-#ifndef DARWIN
-static const bool g_isthreaded = true;
+#  define SIZEOF_PTR_2POW       2
+#endif
+#define PIC
+#ifndef MOZ_MEMORY_DARWIN
+static const bool __isthreaded = true;
 #else
-# define NO_TLS
+#  define NO_TLS
 #endif
 #if 0
 #ifdef __i386__
@@ -377,6 +500,11 @@ static const bool g_isthreaded = true;
 #  define SIZEOF_PTR_2POW      2
 #  define NO_TLS
 #endif
+#ifdef __mips__
+#  define QUANTUM_2POW_MIN     3
+#  define SIZEOF_PTR_2POW      2
+#  define NO_TLS
+#endif
 #ifdef __powerpc__
 #  define QUANTUM_2POW_MIN     4
 #  define SIZEOF_PTR_2POW      2
@@ -387,12 +515,12 @@ static const bool g_isthreaded = true;
 
 /* sizeof(int) == (1U << SIZEOF_INT_2POW). */
 #ifndef SIZEOF_INT_2POW
-# define SIZEOF_INT_2POW       2
+#  define SIZEOF_INT_2POW      2
 #endif
 
 /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
 #if (!defined(PIC) && !defined(NO_TLS))
-# define NO_TLS
+#  define NO_TLS
 #endif
 
 #ifdef NO_TLS
@@ -400,20 +528,19 @@ static const bool g_isthreaded = true;
 #  ifdef MALLOC_BALANCE
 #    undef MALLOC_BALANCE
 #  endif
-   /* MALLOC_LAZY_FREE requires TLS. */
-#  ifdef MALLOC_LAZY_FREE
-#    undef MALLOC_LAZY_FREE
-#  endif
 #endif
 
 /*
  * Size and alignment of memory chunks that are allocated by the OS's virtual
  * memory system.
  */
+#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
+#define        CHUNK_2POW_DEFAULT      21
+#else
 #define        CHUNK_2POW_DEFAULT      20
-
+#endif
 /* Maximum number of dirty pages per arena. */
-#define        DIRTY_MAX_DEFAULT       (1U << 9)
+#define        DIRTY_MAX_DEFAULT       (1U << 10)
 
 /*
  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
@@ -455,27 +582,10 @@ static const bool g_isthreaded = true;
 #define        RUN_MAX_OVRHD           0x0000003dU
 #define        RUN_MAX_OVRHD_RELAX     0x00001800U
 
-/*
- * Put a cap on small object run size.  This overrides RUN_MAX_OVRHD.  Note
- * that small runs must be small enough that page offsets can fit within the
- * CHUNK_MAP_POS_MASK bits.
- */
+/* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
 #define        RUN_MAX_SMALL_2POW      15
 #define        RUN_MAX_SMALL           (1U << RUN_MAX_SMALL_2POW)
 
-#ifdef MALLOC_LAZY_FREE
-   /* Default size of each arena's lazy free cache. */
-#  define LAZY_FREE_2POW_DEFAULT 8
-   /*
-    * Number of pseudo-random probes to conduct before considering the cache to
-    * be overly full.  It takes on average n probes to detect fullness of
-    * (n-1)/n.  However, we are effectively doing multiple non-independent
-    * trials (each deallocation is a trial), so the actual average threshold
-    * for clearing the cache is somewhat lower.
-    */
-#  define LAZY_FREE_NPROBES    5
-#endif
-
 /*
  * Hyper-threaded CPUs may need a special instruction inside spin loops in
  * order to yield to another virtual CPU.  If no such instruction is defined
@@ -524,17 +634,17 @@ static const bool g_isthreaded = true;
  * places, because they require malloc()ed memory, which causes bootstrapping
  * issues in some cases.
  */
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
 #define malloc_mutex_t CRITICAL_SECTION
 #define malloc_spinlock_t CRITICAL_SECTION
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
 typedef struct {
        OSSpinLock      lock;
 } malloc_mutex_t;
 typedef struct {
        OSSpinLock      lock;
 } malloc_spinlock_t;
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
 typedef pthread_mutex_t malloc_mutex_t;
 typedef pthread_mutex_t malloc_spinlock_t;
 #else
@@ -548,13 +658,13 @@ typedef malloc_spinlock_t malloc_mutex_t;
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
-#ifdef WIN32
+#if defined(MOZ_MEMORY_WINDOWS)
 /* No init lock for Windows. */
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
 static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
-#elif defined(LINUX_HOST)
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
 static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
 static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
 #else
 static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
@@ -567,31 +677,6 @@ static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
 
 #ifdef MALLOC_STATS
 
-/* Borrowed from malloc.h, as this is Linux specific. This has been
- * added to jemalloc so the existing memory profiling in Gnash will
- * continue to work. Most of these fields aren't used by the Gnash
- * memory profiling, but we leave them here for a semblance of
- * portability. The only fields Gnash uses are arena, uordblks. and
- * fordblks.
- *
- * This gets more interesting, as jemalloc maintains multiple
- * arenas, one for each CPU in a multiprocessor system. We cheat
- * by just accumulating the stats for all arenas, since our primary
- * purpose is to track memory leaks.
- */
-struct mallinfo {
-  int arena;    /* non-mmapped space allocated from system */
-  int ordblks;  /* number of free chunks UNUSED */
-  int smblks;   /* number of fastbin blocks UNUSED */
-  int hblks;    /* number of mmapped regions UNUSED */
-  int hblkhd;   /* space in mmapped regions UNUSED */
-  int usmblks;  /* maximum total allocated space UNUSED */
-  int fsmblks;  /* space available in freed fastbin blocks UNUSED */
-  int uordblks; /* total allocated space */
-  int fordblks; /* total free space */
-  int keepcost; /* top-most, releasable space UNUSED */
-};
-
 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
 struct malloc_bin_stats_s {
        /*
@@ -637,6 +722,9 @@ struct arena_stats_s {
        uint64_t        ndecommit;
        uint64_t        ncommit;
        uint64_t        decommitted;
+
+       /* Current number of committed pages. */
+       size_t          committed;
 #endif
 
        /* Per-size-category statistics. */
@@ -681,10 +769,10 @@ struct chunk_stats_s {
 typedef struct extent_node_s extent_node_t;
 struct extent_node_s {
        /* Linkage for the size/address-ordered tree. */
-       RB_ENTRY(extent_node_s) link_szad;
+       rb_node(extent_node_t) link_szad;
 
        /* Linkage for the address-ordered tree. */
-       RB_ENTRY(extent_node_s) link_ad;
+       rb_node(extent_node_t) link_ad;
 
        /* Pointer to the extent that this tree node is responsible for. */
        void    *addr;
@@ -692,10 +780,32 @@ struct extent_node_s {
        /* Total region size. */
        size_t  size;
 };
-typedef struct extent_tree_szad_s extent_tree_szad_t;
-RB_HEAD(extent_tree_szad_s, extent_node_s);
-typedef struct extent_tree_ad_s extent_tree_ad_t;
-RB_HEAD(extent_tree_ad_s, extent_node_s);
+typedef rb_tree(extent_node_t) extent_tree_t;
+
+/******************************************************************************/
+/*
+ * Radix tree data structures.
+ */
+
+#ifdef MALLOC_VALIDATE
+   /*
+    * Size of each radix tree node (must be a power of 2).  This impacts tree
+    * depth.
+    */
+#  if (SIZEOF_PTR == 4)
+#    define MALLOC_RTREE_NODESIZE (1U << 14)
+#  else
+#    define MALLOC_RTREE_NODESIZE CACHELINE
+#  endif
+
+typedef struct malloc_rtree_s malloc_rtree_t;
+struct malloc_rtree_s {
+       malloc_spinlock_t       lock;
+       void                    **root;
+       unsigned                height;
+       unsigned                level2bits[1]; /* Dynamically sized. */
+};
+#endif
 
 
/******************************************************************************/
 /*
@@ -705,20 +815,71 @@ RB_HEAD(extent_tree_ad_s, extent_node_s);
 typedef struct arena_s arena_t;
 typedef struct arena_bin_s arena_bin_t;
 
-/*
- * Each map element contains several flags, plus page position for runs that
- * service small allocations.
- */
-typedef uint8_t arena_chunk_map_t;
-#define        CHUNK_MAP_UNTOUCHED     0x80U
-#define        CHUNK_MAP_DIRTY         0x40U
-#define        CHUNK_MAP_LARGE         0x20U
+/* Each element of the chunk map corresponds to one page within the chunk. */
+typedef struct arena_chunk_map_s arena_chunk_map_t;
+struct arena_chunk_map_s {
+       /*
+        * Linkage for run trees.  There are two disjoint uses:
+        *
+        * 1) arena_t's runs_avail tree.
+        * 2) arena_run_t conceptually uses this linkage for in-use non-full
+        *    runs, rather than directly embedding linkage.
+        */
+       rb_node(arena_chunk_map_t)      link;
+
+       /*
+        * Run address (or size) and various flags are stored together.  The bit
+        * layout looks like (assuming 32-bit system):
+        *
+        *   ???????? ???????? ????---- --ckdzla
+        *
+        * ? : Unallocated: Run address for first/last pages, unset for internal
+        *                  pages.
+        *     Small: Run address.
+        *     Large: Run size for first page, unset for trailing pages.
+        * - : Unused.
+        * c : decommitted?
+        * k : key?
+        * d : dirty?
+        * z : zeroed?
+        * l : large?
+        * a : allocated?
+        *
+        * Following are example bit patterns for the three types of runs.
+        *
+        * r : run address
+        * s : run size
+        * x : don't care
+        * - : 0
+        * [cdzla] : bit set
+        *
+        *   Unallocated:
+        *     ssssssss ssssssss ssss---- --c-----
+        *     xxxxxxxx xxxxxxxx xxxx---- ----d---
+        *     ssssssss ssssssss ssss---- -----z--
+        *
+        *   Small:
+        *     rrrrrrrr rrrrrrrr rrrr---- -------a
+        *     rrrrrrrr rrrrrrrr rrrr---- -------a
+        *     rrrrrrrr rrrrrrrr rrrr---- -------a
+        *
+        *   Large:
+        *     ssssssss ssssssss ssss---- ------la
+        *     -------- -------- -------- ------la
+        *     -------- -------- -------- ------la
+        */
+       size_t                          bits;
 #ifdef MALLOC_DECOMMIT
-#define        CHUNK_MAP_DECOMMITTED   0x10U
-#define        CHUNK_MAP_POS_MASK      0x0fU
-#else
-#define        CHUNK_MAP_POS_MASK      0x1fU
+#define        CHUNK_MAP_DECOMMITTED   ((size_t)0x20U)
 #endif
+#define        CHUNK_MAP_KEY           ((size_t)0x10U)
+#define        CHUNK_MAP_DIRTY         ((size_t)0x08U)
+#define        CHUNK_MAP_ZEROED        ((size_t)0x04U)
+#define        CHUNK_MAP_LARGE         ((size_t)0x02U)
+#define        CHUNK_MAP_ALLOCATED     ((size_t)0x01U)
+};
+typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
 
 /* Arena chunk header. */
 typedef struct arena_chunk_s arena_chunk_t;
@@ -726,40 +887,19 @@ struct arena_chunk_s {
        /* Arena that owns the chunk. */
        arena_t         *arena;
 
-       /* Linkage for the arena's chunk tree. */
-       RB_ENTRY(arena_chunk_s) link;
-
-       /*
-        * Number of pages in use.  This is maintained in order to make
-        * detection of empty chunks fast.
-        */
-       size_t          pages_used;
+       /* Linkage for the arena's chunks_dirty tree. */
+       rb_node(arena_chunk_t) link_dirty;
 
        /* Number of dirty pages. */
        size_t          ndirty;
 
-       /*
-        * Tree of extent nodes that are embedded in the arena chunk header
-        * page(s).  These nodes are used by arena_chunk_node_alloc().
-        */
-       extent_tree_ad_t nodes;
-       extent_node_t   *nodes_past;
-
-       /*
-        * Map of pages within chunk that keeps track of free/large/small.  For
-        * free runs, only the map entries for the first and last pages are
-        * kept up to date, so that free runs can be quickly coalesced.
-        */
+       /* Map of pages within chunk that keeps track of free/large/small. */
        arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
-typedef struct arena_chunk_tree_s arena_chunk_tree_t;
-RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
+typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
 typedef struct arena_run_s arena_run_t;
 struct arena_run_s {
-       /* Linkage for run trees. */
-       RB_ENTRY(arena_run_s) link;
-
 #ifdef MALLOC_DEBUG
        uint32_t        magic;
 #  define ARENA_RUN_MAGIC 0x384adf93
@@ -777,8 +917,6 @@ struct arena_run_s {
        /* Bitmask of in-use regions (0: in use, 1: free). */
        unsigned        regs_mask[1]; /* Dynamically sized. */
 };
-typedef struct arena_run_tree_s arena_run_tree_t;
-RB_HEAD(arena_run_tree_s, arena_run_s);
 
 struct arena_bin_s {
        /*
@@ -824,21 +962,23 @@ struct arena_s {
 #endif
 
        /* All operations on this arena require that lock be locked. */
+#ifdef MOZ_MEMORY
        malloc_spinlock_t       lock;
+#else
+       pthread_mutex_t         lock;
+#endif
 
 #ifdef MALLOC_STATS
        arena_stats_t           stats;
 #endif
 
-       /*
-        * Tree of chunks this arena manages.
-        */
-       arena_chunk_tree_t      chunks;
+       /* Tree of dirty-page-containing chunks this arena manages. */
+       arena_chunk_tree_t      chunks_dirty;
 
        /*
         * In order to avoid rapid chunk allocation/deallocation when an arena
         * oscillates right on the cusp of needing a new chunk, cache the most
-        * recently freed chunk.  The spare is left in the arena's chunk tree
+        * recently freed chunk.  The spare is left in the arena's chunk trees
         * until it is deleted.
         *
         * There is one spare chunk per arena, rather than one spare total, in
@@ -856,15 +996,10 @@ struct arena_s {
        size_t                  ndirty;
 
        /*
-        * Trees of this arena's available runs.  Two trees are maintained
-        * using one set of nodes, since one is needed for first-best-fit run
-        * allocation, and the other is needed for coalescing.
+        * Size/address-ordered tree of this arena's available runs.  This tree
+        * is used for first-best-fit run allocation.
         */
-       extent_tree_szad_t      runs_avail_szad;
-       extent_tree_ad_t        runs_avail_ad;
-
-       /* Tree of this arena's allocated (in-use) runs. */
-       extent_tree_ad_t        runs_alloced_ad;
+       arena_avail_tree_t      runs_avail;
 
 #ifdef MALLOC_BALANCE
        /*
@@ -874,16 +1009,6 @@ struct arena_s {
        uint32_t                contention;
 #endif
 
-#ifdef MALLOC_LAZY_FREE
-       /*
-        * Deallocation of small objects can be lazy, in which case free_cache
-        * stores pointers to those objects that have not yet been deallocated.
-        * In order to avoid lock contention, slots are chosen randomly.  Empty
-        * slots contain NULL.
-        */
-       void                    **free_cache;
-#endif
-
        /*
         * bins is used to store rings of free regions of the following sizes,
         * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
@@ -915,8 +1040,10 @@ struct arena_s {
  * Data.
  */
 
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
 /* Number of CPUs. */
 static unsigned                ncpus;
+#endif
 
 /* VM page size. */
 static size_t          pagesize;
@@ -947,34 +1074,15 @@ static size_t            arena_maxclass; /* Max size 
class for arenas. */
  * Chunks.
  */
 
+#ifdef MALLOC_VALIDATE
+static malloc_rtree_t *chunk_rtree;
+#endif
+
 /* Protects chunk-related data structures. */
 static malloc_mutex_t  huge_mtx;
 
 /* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_ad_t        huge;
-
-#ifdef MALLOC_DSS
-/*
- * Protects sbrk() calls.  This avoids malloc races among threads, though it
- * does not protect against races with threads that call sbrk() directly.
- */
-static malloc_mutex_t  dss_mtx;
-/* Base address of the DSS. */
-static void            *dss_base;
-/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
-static void            *dss_prev;
-/* Current upper limit on DSS addresses. */
-static void            *dss_max;
-
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering).  These are used when allocating chunks, in an attempt to re-use
- * address space.  Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_szad_t dss_chunks_szad;
-static extent_tree_ad_t        dss_chunks_ad;
-#endif
+static extent_tree_t   huge;
 
 #ifdef MALLOC_STATS
 /* Huge allocation statistics. */
@@ -983,6 +1091,10 @@ static uint64_t           huge_ndalloc;
 static size_t          huge_allocated;
 #endif
 
+#ifdef MALLOC_PAGEFILE
+static char            pagefile_templ[PATH_MAX];
+#endif
+
 /****************************/
 /*
  * base (internal allocation).
@@ -995,11 +1107,17 @@ static size_t            huge_allocated;
  */
 static void            *base_pages;
 static void            *base_next_addr;
+#ifdef MALLOC_DECOMMIT
+static void            *base_next_decommitted;
+#endif
 static void            *base_past_addr; /* Addr immediately past base_pages. */
 static extent_node_t   *base_nodes;
 static malloc_mutex_t  base_mtx;
 #ifdef MALLOC_STATS
 static size_t          base_mapped;
+#  ifdef MALLOC_DECOMMIT
+static size_t          base_committed;
+#  endif
 #endif
 
 /********/
@@ -1020,14 +1138,18 @@ static unsigned         narenas_2pow;
 static unsigned                next_arena;
 #  endif
 #endif
+#ifdef MOZ_MEMORY
 static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
+#else
+static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
+#endif
 
 #ifndef NO_TLS
 /*
  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
  * for allocations.
  */
-#ifdef HAVE_LOCAL_THREAD_STORAGE
+#ifndef MOZ_MEMORY_WINDOWS
 static __thread arena_t        *arenas_map;
 #endif
 #endif
@@ -1041,15 +1163,7 @@ static chunk_stats_t     stats_chunks;
 /*
  * Runtime configuration options.
  */
-const char     *_malloc_options
-#ifdef WIN32
-= "A10n2F"
-#elif (defined(DARWIN))
-= "AP10n"
-#elif (defined(LINUX_HOST))
-= "A10n2F"
-#endif
-;
+const char     *_malloc_options;
 
 #ifndef MALLOC_PRODUCTION
 static bool    opt_abort = true;
@@ -1062,24 +1176,17 @@ static bool     opt_abort = false;
 static bool    opt_junk = false;
 #endif
 #endif
-#ifdef MALLOC_DSS
-static bool    opt_dss = true;
-static bool    opt_mmap = true;
-#endif
 static size_t  opt_dirty_max = DIRTY_MAX_DEFAULT;
-#ifdef MALLOC_LAZY_FREE
-static int     opt_lazy_free_2pow = LAZY_FREE_2POW_DEFAULT;
-#endif
 #ifdef MALLOC_BALANCE
 static uint64_t        opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
 #endif
-/*
- * this toggles the printing of statistics when the program exists.
- */
 static bool    opt_print_stats = false;
 static size_t  opt_quantum_2pow = QUANTUM_2POW_MIN;
 static size_t  opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
 static size_t  opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+#ifdef MALLOC_PAGEFILE
+static bool    opt_pagefile = false;
+#endif
 #ifdef MALLOC_UTRACE
 static bool    opt_utrace = false;
 #endif
@@ -1118,21 +1225,18 @@ typedef struct {
  * Begin function prototypes for non-inline static functions.
  */
 
+static char    *umax2s(uintmax_t x, char *s);
 static bool    malloc_mutex_init(malloc_mutex_t *mutex);
 static bool    malloc_spin_init(malloc_spinlock_t *lock);
 static void    wrtmessage(const char *p1, const char *p2, const char *p3,
                const char *p4);
 #ifdef MALLOC_STATS
-#ifdef DARWIN
+#ifdef MOZ_MEMORY_DARWIN
 /* Avoid namespace collision with OS X's malloc APIs. */
-#define malloc_printf xmalloc_printf
+#define malloc_printf moz_malloc_printf
 #endif
 static void    malloc_printf(const char *format, ...);
 #endif
-static char    *umax2s(uintmax_t x, char *s);
-#ifdef MALLOC_DSS
-static bool    base_pages_alloc_dss(size_t minsize);
-#endif
 static bool    base_pages_alloc_mmap(size_t minsize);
 static bool    base_pages_alloc(size_t minsize);
 static void    *base_alloc(size_t size);
@@ -1142,39 +1246,31 @@ static void     base_node_dealloc(extent_node_t *node);
 #ifdef MALLOC_STATS
 static void    stats_print(arena_t *arena);
 #endif
-static void    *pages_map(void *addr, size_t size);
+static void    *pages_map(void *addr, size_t size, int pfd);
 static void    pages_unmap(void *addr, size_t size);
-#ifdef MALLOC_DSS
-static void    *chunk_alloc_dss(size_t size);
-static void    *chunk_recycle_dss(size_t size, bool zero);
-#endif
-static void    *chunk_alloc_mmap(size_t size);
-static void    *chunk_alloc(size_t size, bool zero);
-#ifdef MALLOC_DSS
-static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
-static bool    chunk_dealloc_dss(void *chunk, size_t size);
+static void    *chunk_alloc_mmap(size_t size, bool pagefile);
+#ifdef MALLOC_PAGEFILE
+static int     pagefile_init(size_t size);
+static void    pagefile_close(int pfd);
 #endif
+static void    *chunk_alloc(size_t size, bool zero, bool pagefile);
 static void    chunk_dealloc_mmap(void *chunk, size_t size);
 static void    chunk_dealloc(void *chunk, size_t size);
 #ifndef NO_TLS
 static arena_t *choose_arena_hard(void);
 #endif
-static extent_node_t *arena_chunk_node_alloc(arena_chunk_t *chunk);
-static void    arena_chunk_node_dealloc(arena_chunk_t *chunk,
-    extent_node_t *node);
 static void    arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
-    bool small, bool zero);
-static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+    bool large, bool zero);
+static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
 static void    arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool small,
-    bool zero);
+static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
+    size_t size, bool large, bool zero);
 static void    arena_purge(arena_t *arena);
 static void    arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
 static void    arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
-    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize);
+    arena_run_t *run, size_t oldsize, size_t newsize);
 static void    arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
-    extent_node_t *nodeA, arena_run_t *run, size_t oldsize, size_t newsize,
-    bool dirty);
+    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t 
*bin);
 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
 static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
@@ -1185,10 +1281,6 @@ static void      *arena_malloc_large(arena_t *arena, 
size_t size, bool zero);
 static void    *arena_palloc(arena_t *arena, size_t alignment, size_t size,
     size_t alloc_size);
 static size_t  arena_salloc(const void *ptr);
-#ifdef MALLOC_LAZY_FREE
-static void    arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk,
-    void *ptr, size_t pageind, arena_chunk_map_t *mapelm);
-#endif
 static void    arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
     void *ptr);
 static void    arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
@@ -1204,15 +1296,86 @@ static void     *huge_palloc(size_t alignment, size_t 
size);
 static void    *huge_ralloc(void *ptr, size_t size, size_t oldsize);
 static void    huge_dalloc(void *ptr);
 static void    malloc_print_stats(void);
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
 static
 #endif
 bool           malloc_init_hard(void);
 
+void           _malloc_prefork(void);
+void           _malloc_postfork(void);
+
 /*
  * End function prototypes.
  */
 
/******************************************************************************/
+
+/*
+ * umax2s() provides minimal integer printing functionality, which is
+ * especially useful for situations where allocation in vsnprintf() calls would
+ * potentially cause deadlock.
+ */
+#define        UMAX2S_BUFSIZE  21
+static char *
+umax2s(uintmax_t x, char *s)
+{
+       unsigned i;
+
+       i = UMAX2S_BUFSIZE - 1;
+       s[i] = '\0';
+       do {
+               i--;
+               s[i] = "0123456789"[x % 10];
+               x /= 10;
+       } while (x > 0);
+
+       return (&s[i]);
+}
+
+static void
+wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
+{
+#ifdef MOZ_MEMORY_WINCE
+       wchar_t buf[1024];
+#define WRT_PRINT(s) \
+       MultiByteToWideChar(CP_ACP, 0, s, -1, buf, 1024); \
+       OutputDebugStringW(buf)
+
+       WRT_PRINT(p1);
+       WRT_PRINT(p2);
+       WRT_PRINT(p3);
+       WRT_PRINT(p4);
+#else
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
+#define        _write  write
+#endif
+       _write(STDERR_FILENO, p1, (unsigned int) strlen(p1));
+       _write(STDERR_FILENO, p2, (unsigned int) strlen(p2));
+       _write(STDERR_FILENO, p3, (unsigned int) strlen(p3));
+       _write(STDERR_FILENO, p4, (unsigned int) strlen(p4));
+#endif
+
+}
+
+#define _malloc_message malloc_message
+
+void   (*_malloc_message)(const char *p1, const char *p2, const char *p3,
+           const char *p4) = wrtmessage;
+
+#ifdef MALLOC_DEBUG
+#  define assert(e) do {                                               \
+       if (!(e)) {                                                     \
+               char line_buf[UMAX2S_BUFSIZE];                          \
+               _malloc_message(__FILE__, ":", umax2s(__LINE__,         \
+                   line_buf), ": Failed assertion: ");                 \
+               _malloc_message("\"", #e, "\"\n", "");                  \
+               abort();                                                \
+       }                                                               \
+} while (0)
+#else
+#define assert(e)
+#endif
+
+/******************************************************************************/
 /*
  * Begin mutex.  We can't use normal pthread mutexes in all places, because
  * they require malloc()ed memory, which causes bootstrapping issues in some
@@ -1222,13 +1385,15 @@ bool            malloc_init_hard(void);
 static bool
 malloc_mutex_init(malloc_mutex_t *mutex)
 {
-#if defined(WIN32)
-       if (g_isthreaded)
+#if defined(MOZ_MEMORY_WINCE)
+       InitializeCriticalSection(mutex);
+#elif defined(MOZ_MEMORY_WINDOWS)
+       if (__isthreaded)
                if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
                        return (true);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        mutex->lock = OS_SPINLOCK_INIT;
-#elif defined(LINUX_HOST)
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
        pthread_mutexattr_t attr;
        if (pthread_mutexattr_init(&attr) != 0)
                return (true);
@@ -1238,7 +1403,7 @@ malloc_mutex_init(malloc_mutex_t *mutex)
                return (true);
        }
        pthread_mutexattr_destroy(&attr);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        if (pthread_mutex_init(mutex, NULL) != 0)
                return (true);
 #else
@@ -1253,14 +1418,14 @@ static inline void
 malloc_mutex_lock(malloc_mutex_t *mutex)
 {
 
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
        EnterCriticalSection(mutex);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        OSSpinLockLock(&mutex->lock);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        pthread_mutex_lock(mutex);
 #else
-       if (g_isthreaded)
+       if (__isthreaded)
                _SPINLOCK(&mutex->lock);
 #endif
 }
@@ -1269,14 +1434,14 @@ static inline void
 malloc_mutex_unlock(malloc_mutex_t *mutex)
 {
 
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
        LeaveCriticalSection(mutex);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        OSSpinLockUnlock(&mutex->lock);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        pthread_mutex_unlock(mutex);
 #else
-       if (g_isthreaded)
+       if (__isthreaded)
                _SPINUNLOCK(&mutex->lock);
 #endif
 }
@@ -1284,13 +1449,15 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
 static bool
 malloc_spin_init(malloc_spinlock_t *lock)
 {
-#if defined(WIN32)
-       if (g_isthreaded)
+#if defined(MOZ_MEMORY_WINCE)
+       InitializeCriticalSection(lock);
+#elif defined(MOZ_MEMORY_WINDOWS)
+       if (__isthreaded)
                if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
                        return (true);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        lock->lock = OS_SPINLOCK_INIT;
-#elif defined(LINUX_HOST)
+#elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
        pthread_mutexattr_t attr;
        if (pthread_mutexattr_init(&attr) != 0)
                return (true);
@@ -1300,7 +1467,7 @@ malloc_spin_init(malloc_spinlock_t *lock)
                return (true);
        }
        pthread_mutexattr_destroy(&attr);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        if (pthread_mutex_init(lock, NULL) != 0)
                return (true);
 #else
@@ -1313,14 +1480,14 @@ static inline void
 malloc_spin_lock(malloc_spinlock_t *lock)
 {
 
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
        EnterCriticalSection(lock);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        OSSpinLockLock(&lock->lock);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        pthread_mutex_lock(lock);
 #else
-       if (g_isthreaded)
+       if (__isthreaded)
                _SPINLOCK(&lock->lock);
 #endif
 }
@@ -1328,14 +1495,14 @@ malloc_spin_lock(malloc_spinlock_t *lock)
 static inline void
 malloc_spin_unlock(malloc_spinlock_t *lock)
 {
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
        LeaveCriticalSection(lock);
-#elif defined(DARWIN)
+#elif defined(MOZ_MEMORY_DARWIN)
        OSSpinLockUnlock(&lock->lock);
-#elif defined(USE_JEMALLOC)
+#elif defined(MOZ_MEMORY)
        pthread_mutex_unlock(lock);
 #else
-       if (g_isthreaded)
+       if (__isthreaded)
                _SPINUNLOCK(&lock->lock);
 #endif
 }
@@ -1350,28 +1517,99 @@ malloc_spin_unlock(malloc_spinlock_t *lock)
  * priority inversion.
  */
 
-#ifndef DARWIN
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
 #  define      malloc_spin_init        malloc_mutex_init
 #  define      malloc_spin_lock        malloc_mutex_lock
 #  define      malloc_spin_unlock      malloc_mutex_unlock
 #endif
 
+#ifndef MOZ_MEMORY
 /*
- * End spin lock.
+ * We use an unpublished interface to initialize pthread mutexes with an
+ * allocation callback, in order to avoid infinite recursion.
  */
+int    _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t));
 
-/******************************************************************************/
-/*
- * Begin Utility functions/macros.
- */
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+    _pthread_mutex_init_calloc_cb);
 
-/* Return the chunk address for allocation address a. */
-#define        CHUNK_ADDR2BASE(a)                                              
\
-       ((void *)((uintptr_t)(a) & ~chunksize_mask))
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t))
+{
 
-/* Return the chunk offset of address a. */
-#define        CHUNK_ADDR2OFFSET(a)                                            
\
-       ((size_t)((uintptr_t)(a) & chunksize_mask))
+       return (0);
+}
+
+static bool
+malloc_spin_init(pthread_mutex_t *lock)
+{
+
+       if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
+               return (true);
+
+       return (false);
+}
+
+static inline unsigned
+malloc_spin_lock(pthread_mutex_t *lock)
+{
+       unsigned ret = 0;
+
+       if (__isthreaded) {
+               if (_pthread_mutex_trylock(lock) != 0) {
+                       unsigned i;
+                       volatile unsigned j;
+
+                       /* Exponentially back off. */
+                       for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
+                               for (j = 0; j < (1U << i); j++)
+                                       ret++;
+
+                               CPU_SPINWAIT;
+                               if (_pthread_mutex_trylock(lock) == 0)
+                                       return (ret);
+                       }
+
+                       /*
+                        * Spinning failed.  Block until the lock becomes
+                        * available, in order to avoid indefinite priority
+                        * inversion.
+                        */
+                       _pthread_mutex_lock(lock);
+                       assert((ret << BLOCK_COST_2POW) != 0);
+                       return (ret << BLOCK_COST_2POW);
+               }
+       }
+
+       return (ret);
+}
+
+static inline void
+malloc_spin_unlock(pthread_mutex_t *lock)
+{
+
+       if (__isthreaded)
+               _pthread_mutex_unlock(lock);
+}
+#endif
+
+/*
+ * End spin lock.
+ */
+/******************************************************************************/
+/*
+ * Begin Utility functions/macros.
+ */
+
+/* Return the chunk address for allocation address a. */
+#define        CHUNK_ADDR2BASE(a)                                              
\
+       ((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define        CHUNK_ADDR2OFFSET(a)                                            
\
+       ((size_t)((uintptr_t)(a) & chunksize_mask))
 
 /* Return the smallest chunk multiple that is >= s. */
 #define        CHUNK_CEILING(s)                                                
\
@@ -1407,7 +1645,7 @@ pow2_ceil(size_t x)
        return (x);
 }
 
-#if (defined(MALLOC_LAZY_FREE) || defined(MALLOC_BALANCE))
+#ifdef MALLOC_BALANCE
 /*
  * Use a simple linear congruential pseudo-random number generator:
  *
@@ -1451,17 +1689,6 @@ prn_##suffix(uint32_t lg_range)                          
                \
 #  define PRN(suffix, lg_range)        prn_##suffix(lg_range)
 #endif
 
-/*
- * Define PRNGs, one for each purpose, in order to avoid auto-correlation
- * problems.
- */
-
-#ifdef MALLOC_LAZY_FREE
-/* Define the per-thread PRNG used for lazy deallocation. */
-static __thread uint32_t lazy_free_x;
-PRN_DEFINE(lazy_free, lazy_free_x, 12345, 12347)
-#endif
-
 #ifdef MALLOC_BALANCE
 /* Define the PRNG used for arena assignment. */
 static __thread uint32_t balance_x;
@@ -1498,23 +1725,6 @@ _getprogname(void)
        return ("<jemalloc>");
 }
 
-static void
-wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
-{
-#ifndef WIN32
-#define        _write  write
-#endif
-       size_t ret =_write(STDERR_FILENO, p1, (unsigned int) strlen(p1));
-       ret = _write(STDERR_FILENO, p2, (unsigned int) strlen(p2));
-       ret = _write(STDERR_FILENO, p3, (unsigned int) strlen(p3));
-       ret = _write(STDERR_FILENO, p4, (unsigned int) strlen(p4));
-}
-
-#define _malloc_message malloc_message
-
-void   (*_malloc_message)(const char *p1, const char *p2, const char *p3,
-           const char *p4) = wrtmessage;
-
 #ifdef MALLOC_STATS
 /*
  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
@@ -1522,6 +1732,7 @@ void      (*_malloc_message)(const char *p1, const char 
*p2, const char *p3,
 static void
 malloc_printf(const char *format, ...)
 {
+#ifndef WINCE
        char buf[4096];
        va_list ap;
 
@@ -1529,103 +1740,90 @@ malloc_printf(const char *format, ...)
        vsnprintf(buf, sizeof(buf), format, ap);
        va_end(ap);
        _malloc_message(buf, "", "", "");
-}
 #endif
-
-/*
- * We don't want to depend on vsnprintf() for production builds, since that can
- * cause unnecessary bloat for static binaries.  umax2s() provides minimal
- * integer printing functionality, so that malloc_printf() use can be limited 
to
- * MALLOC_STATS code.
- */
-#define        UMAX2S_BUFSIZE  21
-static char *
-umax2s(uintmax_t x, char *s)
-{
-       unsigned i;
-
-       /* Make sure UMAX2S_BUFSIZE is large enough. */
-       assert(sizeof(uintmax_t) <= 8);
-
-       i = UMAX2S_BUFSIZE - 1;
-       s[i] = '\0';
-       do {
-               i--;
-               s[i] = "0123456789"[x % 10];
-               x /= 10;
-       } while (x > 0);
-
-       return (&s[i]);
 }
+#endif
 
 
/******************************************************************************/
 
-#ifdef MALLOC_DSS
-static bool
-base_pages_alloc_dss(size_t minsize)
+#ifdef MALLOC_DECOMMIT
+static inline void
+pages_decommit(void *addr, size_t size)
 {
 
-       /*
-        * Do special DSS allocation here, since base allocations don't need to
-        * be chunk-aligned.
-        */
-       malloc_mutex_lock(&dss_mtx);
-       if (dss_prev != (void *)-1) {
-               intptr_t incr;
-               size_t csize = CHUNK_CEILING(minsize);
-
-               do {
-                       /* Get the current end of the DSS. */
-                       dss_max = sbrk(0);
-
-                       /*
-                        * Calculate how much padding is necessary to
-                        * chunk-align the end of the DSS.  Don't worry about
-                        * dss_max not being chunk-aligned though.
-                        */
-                       incr = (intptr_t)chunksize
-                           - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
-                       assert(incr >= 0);
-                       if ((size_t)incr < minsize)
-                               incr += csize;
-
-                       dss_prev = sbrk(incr);
-                       if (dss_prev == dss_max) {
-                               /* Success. */
-                               dss_max = (void *)((intptr_t)dss_prev + incr);
-                               base_pages = dss_prev;
-                               base_next_addr = base_pages;
-                               base_past_addr = dss_max;
-#ifdef MALLOC_STATS
-                               base_mapped += incr;
+#ifdef MOZ_MEMORY_WINDOWS
+       VirtualFree(addr, size, MEM_DECOMMIT);
+#else
+       if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
+           0) == MAP_FAILED)
+               abort();
 #endif
-                               malloc_mutex_unlock(&dss_mtx);
-                               return (false);
-                       }
-               } while (dss_prev != (void *)-1);
-       }
-       malloc_mutex_unlock(&dss_mtx);
+}
 
-       return (true);
+static inline void
+pages_commit(void *addr, size_t size)
+{
+
+#  ifdef MOZ_MEMORY_WINDOWS
+       VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+#  else
+       if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
+           MAP_ANON, -1, 0) == MAP_FAILED)
+               abort();
+#  endif
 }
 #endif
 
 static bool
 base_pages_alloc_mmap(size_t minsize)
 {
+       bool ret;
        size_t csize;
+#ifdef MALLOC_DECOMMIT
+       size_t pminsize;
+#endif
+       int pfd;
 
        assert(minsize != 0);
-       csize = PAGE_CEILING(minsize);
-       base_pages = pages_map(NULL, csize);
-       if (base_pages == NULL)
-               return (true);
+       csize = CHUNK_CEILING(minsize);
+#ifdef MALLOC_PAGEFILE
+       if (opt_pagefile) {
+               pfd = pagefile_init(csize);
+               if (pfd == -1)
+                       return (true);
+       } else
+#endif
+               pfd = -1;
+       base_pages = pages_map(NULL, csize, pfd);
+       if (base_pages == NULL) {
+               ret = true;
+               goto RETURN;
+       }
        base_next_addr = base_pages;
        base_past_addr = (void *)((uintptr_t)base_pages + csize);
+#ifdef MALLOC_DECOMMIT
+       /*
+        * Leave enough pages for minsize committed, since otherwise they would
+        * have to be immediately recommitted.
+        */
+       pminsize = PAGE_CEILING(minsize);
+       base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
+       if (pminsize < csize)
+               pages_decommit(base_next_decommitted, csize - pminsize);
+#endif
 #ifdef MALLOC_STATS
        base_mapped += csize;
+#  ifdef MALLOC_DECOMMIT
+       base_committed += pminsize;
+#  endif
 #endif
 
+       ret = false;
+RETURN:
+#ifdef MALLOC_PAGEFILE
+       if (pfd != -1)
+               pagefile_close(pfd);
+#endif
        return (false);
 }
 
@@ -1633,18 +1831,8 @@ static bool
 base_pages_alloc(size_t minsize)
 {
 
-#ifdef MALLOC_DSS
-       if (opt_dss) {
-               if (base_pages_alloc_dss(minsize) == false)
-                       return (false);
-       }
-
-       if (opt_mmap && minsize != 0)
-#endif
-       {
-               if (base_pages_alloc_mmap(minsize) == false)
-                       return (false);
-       }
+       if (base_pages_alloc_mmap(minsize) == false)
+               return (false);
 
        return (true);
 }
@@ -1661,13 +1849,31 @@ base_alloc(size_t size)
        malloc_mutex_lock(&base_mtx);
        /* Make sure there's enough space for the allocation. */
        if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
-               if (base_pages_alloc(csize))
+               if (base_pages_alloc(csize)) {
+                       malloc_mutex_unlock(&base_mtx);
                        return (NULL);
+               }
        }
        /* Allocate. */
        ret = base_next_addr;
        base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+#ifdef MALLOC_DECOMMIT
+       /* Make sure enough pages are committed for the new allocation. */
+       if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
+               void *pbase_next_addr =
+                   (void *)(PAGE_CEILING((uintptr_t)base_next_addr));
+
+               pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
+                   (uintptr_t)base_next_decommitted);
+               base_next_decommitted = pbase_next_addr;
+#  ifdef MALLOC_STATS
+               base_committed += (uintptr_t)pbase_next_addr -
+                   (uintptr_t)base_next_decommitted;
+#  endif
+       }
+#endif
        malloc_mutex_unlock(&base_mtx);
+       VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
 
        return (ret);
 }
@@ -1678,6 +1884,12 @@ base_calloc(size_t number, size_t size)
        void *ret;
 
        ret = base_alloc(number * size);
+#ifdef MALLOC_VALGRIND
+       if (ret != NULL) {
+               VALGRIND_FREELIKE_BLOCK(ret, 0);
+               VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, true);
+       }
+#endif
        memset(ret, 0, number * size);
 
        return (ret);
@@ -1692,6 +1904,8 @@ base_node_alloc(void)
        if (base_nodes != NULL) {
                ret = base_nodes;
                base_nodes = *(extent_node_t **)ret;
+               VALGRIND_FREELIKE_BLOCK(ret, 0);
+               VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(extent_node_t), 0, false);
                malloc_mutex_unlock(&base_mtx);
        } else {
                malloc_mutex_unlock(&base_mtx);
@@ -1706,6 +1920,8 @@ base_node_dealloc(extent_node_t *node)
 {
 
        malloc_mutex_lock(&base_mtx);
+       VALGRIND_FREELIKE_BLOCK(node, 0);
+       VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
        *(extent_node_t **)node = base_nodes;
        base_nodes = node;
        malloc_mutex_unlock(&base_mtx);
@@ -1719,7 +1935,7 @@ stats_print(arena_t *arena)
 {
        unsigned i, gap_start;
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
        malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
            " %I64u madvise%s, %I64u page%s purged\n",
            arena->ndirty, arena->ndirty == 1 ? "" : "s",
@@ -1795,7 +2011,7 @@ stats_print(arena_t *arena)
                                gap_start = UINT_MAX;
                        }
                        malloc_printf(
-#if defined(WIN32)
+#if defined(MOZ_MEMORY_WINDOWS)
                            "%13u %1s %4u %4u %3u %9I64u %9I64u"
                            " %9I64u %7u %7u\n",
 #else
@@ -1852,9 +2068,9 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
        return (ret);
 }
 
-/* Generate red-black tree code for size/address-ordered extents. */
-RB_GENERATE_STATIC(extent_tree_szad_s, extent_node_s, link_szad,
-    extent_szad_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
+    link_szad, extent_szad_comp)
 
 static inline int
 extent_ad_comp(extent_node_t *a, extent_node_t *b)
@@ -1865,9 +2081,9 @@ extent_ad_comp(extent_node_t *a, extent_node_t *b)
        return ((a_addr > b_addr) - (a_addr < b_addr));
 }
 
-/* Generate red-black tree code for address-ordered extents. */
-RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
-
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+    extent_ad_comp)
 
 /*
  * End extent tree code.
@@ -1877,32 +2093,89 @@ RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, 
link_ad, extent_ad_comp)
  * Begin chunk management functions.
  */
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
+#ifdef MOZ_MEMORY_WINCE
+#define ALIGN_ADDR2OFFSET(al, ad) \
+       ((uintptr_t)ad & (al - 1))
 static void *
-pages_map(void *addr, size_t size)
+pages_map_align(size_t size, int pfd, size_t alignment)
 {
-       void *ret;
+       
+       void *ret; 
+       int offset;
+       if (size % alignment)
+               size += (alignment - (size % alignment));
+       assert(size >= alignment);
+       ret = pages_map(NULL, size, pfd);
+       offset = ALIGN_ADDR2OFFSET(alignment, ret);
+       if (offset) {  
+               /* try to over allocate by the ammount we're offset */
+               void *tmp;
+               pages_unmap(ret, size);
+               tmp = VirtualAlloc(NULL, size + alignment - offset, 
+                                        MEM_RESERVE, PAGE_NOACCESS);
+               if (offset == ALIGN_ADDR2OFFSET(alignment, tmp))
+                       ret = VirtualAlloc((void*)((intptr_t)tmp + alignment 
+                                                  - offset), size, MEM_COMMIT,
+                                          PAGE_READWRITE);
+               else 
+                       VirtualFree(tmp, 0, MEM_RELEASE);
+               offset = ALIGN_ADDR2OFFSET(alignment, ret);
+               
+       
+               if (offset) {  
+                       /* over allocate to ensure we have an aligned region */
+                       ret = VirtualAlloc(NULL, size + alignment, MEM_RESERVE, 
+                                          PAGE_NOACCESS);
+                       offset = ALIGN_ADDR2OFFSET(alignment, ret);
+                       ret = VirtualAlloc((void*)((intptr_t)ret + 
+                                                  alignment - offset),
+                                          size, MEM_COMMIT, PAGE_READWRITE);
+               }
+       }
+       return (ret);
+}
+#endif
 
+static void *
+pages_map(void *addr, size_t size, int pfd)
+{
+       void *ret = NULL;
+#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
+       void *va_ret;
+       assert(addr == NULL);
+       va_ret = VirtualAlloc(addr, size, MEM_RESERVE, PAGE_NOACCESS);
+       if (va_ret)
+               ret = VirtualAlloc(va_ret, size, MEM_COMMIT, PAGE_READWRITE);
+       assert(va_ret == ret);
+#else
        ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
            PAGE_READWRITE);
-
+#endif
        return (ret);
 }
 
 static void
 pages_unmap(void *addr, size_t size)
 {
-
        if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
+               if (GetLastError() == ERROR_INVALID_PARAMETER) {
+                       MEMORY_BASIC_INFORMATION info;
+                       VirtualQuery(addr, &info, sizeof(info));
+                       if (VirtualFree(info.AllocationBase, 0, MEM_RELEASE))
+                               return;
+               }
+#endif
                _malloc_message(_getprogname(),
                    ": (malloc) Error in VirtualFree()\n", "", "");
                if (opt_abort)
                        abort();
        }
 }
-#elif (defined(DARWIN))
+#elif (defined(MOZ_MEMORY_DARWIN))
 static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, int pfd)
 {
        void *ret;
        kern_return_t err;
@@ -1952,9 +2225,37 @@ pages_copy(void *dest, const void *src, size_t n)
        vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
            (vm_address_t)dest);
 }
-#else /* DARWIN */
+#else /* MOZ_MEMORY_DARWIN */
+#ifdef JEMALLOC_USES_MAP_ALIGN
+static void *
+pages_map_align(size_t size, int pfd, size_t alignment)
+{
+       void *ret;
+
+       /*
+        * We don't use MAP_FIXED here, because it can cause the *replacement*
+        * of existing mappings, and we only want to create new mappings.
+        */
+#ifdef MALLOC_PAGEFILE
+       if (pfd != -1) {
+               ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, 
MAP_PRIVATE |
+                   MAP_NOSYNC | MAP_ALIGN, pfd, 0);
+       } else
+#endif
+              {
+               ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, 
MAP_PRIVATE |
+                   MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
+       }
+       assert(ret != NULL);
+
+       if (ret == MAP_FAILED)
+               ret = NULL;
+       return (ret);
+}
+#endif
+
 static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, int pfd)
 {
        void *ret;
 
@@ -1962,8 +2263,16 @@ pages_map(void *addr, size_t size)
         * We don't use MAP_FIXED here, because it can cause the *replacement*
         * of existing mappings, and we only want to create new mappings.
         */
-       ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-           -1, 0);
+#ifdef MALLOC_PAGEFILE
+       if (pfd != -1) {
+               ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+                   MAP_NOSYNC, pfd, 0);
+       } else
+#endif
+              {
+               ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+                   MAP_ANON, -1, 0);
+       }
        assert(ret != NULL);
 
        if (ret == MAP_FAILED)
@@ -2005,152 +2314,161 @@ pages_unmap(void *addr, size_t size)
 }
 #endif
 
-#ifdef MALLOC_DECOMMIT
-static inline void
-pages_decommit(void *addr, size_t size)
-{
-
-#ifdef WIN32
-       VirtualFree(addr, size, MEM_DECOMMIT);
-#else
-       if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
-           0) == MAP_FAILED)
-               abort();
-#endif
-}
-
-static inline void
-pages_commit(void *addr, size_t size)
-{
-
-#  ifdef WIN32
-       VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
-#  else
-       if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
-           MAP_ANON, -1, 0) == MAP_FAILED)
-               abort();
-#  endif
-}
-#endif
-
-#ifdef MALLOC_DSS
-static void *
-chunk_alloc_dss(size_t size)
+#ifdef MALLOC_VALIDATE
+static inline malloc_rtree_t *
+malloc_rtree_new(unsigned bits)
 {
+       malloc_rtree_t *ret;
+       unsigned bits_per_level, height, i;
+
+       bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
+           sizeof(void *)))) - 1;
+       height = bits / bits_per_level;
+       if (height * bits_per_level != bits)
+               height++;
+       assert(height * bits_per_level >= bits);
+
+       ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) + 
(sizeof(unsigned) *
+           (height - 1)));
+       if (ret == NULL)
+               return (NULL);
 
-       malloc_mutex_lock(&dss_mtx);
-       if (dss_prev != (void *)-1) {
-               intptr_t incr;
+       malloc_spin_init(&ret->lock);
+       ret->height = height;
+       if (bits_per_level * height > bits)
+               ret->level2bits[0] = bits % bits_per_level;
+       else
+               ret->level2bits[0] = bits_per_level;
+       for (i = 1; i < height; i++)
+               ret->level2bits[i] = bits_per_level;
 
+       ret->root = (void**)base_calloc(1, sizeof(void *) << 
ret->level2bits[0]);
+       if (ret->root == NULL) {
                /*
-                * The loop is necessary to recover from races with other
-                * threads that are using the DSS for something other than
-                * malloc.
+                * We leak the rtree here, since there's no generic base
+                * deallocation.
                 */
-               do {
-                       void *ret;
-
-                       /* Get the current end of the DSS. */
-                       dss_max = sbrk(0);
+               return (NULL);
+       }
 
-                       /*
-                        * Calculate how much padding is necessary to
-                        * chunk-align the end of the DSS.
-                        */
-                       incr = (intptr_t)size
-                           - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
-                       if (incr == (intptr_t)size)
-                               ret = dss_max;
-                       else {
-                               ret = (void *)((intptr_t)dss_max + incr);
-                               incr += size;
-                       }
+       return (ret);
+}
 
-                       dss_prev = sbrk(incr);
-                       if (dss_prev == dss_max) {
-                               /* Success. */
-                               dss_max = (void *)((intptr_t)dss_prev + incr);
-                               malloc_mutex_unlock(&dss_mtx);
-                               return (ret);
-                       }
-               } while (dss_prev != (void *)-1);
+/* The least significant bits of the key are ignored. */
+static inline void *
+malloc_rtree_get(malloc_rtree_t *rtree, uintptr_t key)
+{
+       void *ret;
+       uintptr_t subkey;
+       unsigned i, lshift, height, bits;
+       void **node, **child;
+
+       malloc_spin_lock(&rtree->lock);
+       for (i = lshift = 0, height = rtree->height, node = rtree->root;
+           i < height - 1;
+           i++, lshift += bits, node = child) {
+               bits = rtree->level2bits[i];
+               subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+               child = (void**)node[subkey];
+               if (child == NULL) {
+                       malloc_spin_unlock(&rtree->lock);
+                       return (NULL);
+               }
        }
-       malloc_mutex_unlock(&dss_mtx);
 
-       return (NULL);
+       /* node is a leaf, so it contains values rather than node pointers. */
+       bits = rtree->level2bits[i];
+       subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+       ret = node[subkey];
+       malloc_spin_unlock(&rtree->lock);
+
+       return (ret);
 }
 
-static void *
-chunk_recycle_dss(size_t size, bool zero)
+static inline bool
+malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
 {
-       extent_node_t *node, key;
-
-       key.addr = NULL;
-       key.size = size;
-       malloc_mutex_lock(&dss_mtx);
-       node = RB_NFIND(extent_tree_szad_s, &dss_chunks_szad, &key);
-       if (node != NULL) {
-               void *ret = node->addr;
-
-               /* Remove node from the tree. */
-               RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
-               if (node->size == size) {
-                       RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, node);
-                       base_node_dealloc(node);
-               } else {
-                       /*
-                        * Insert the remainder of node's address range as a
-                        * smaller chunk.  Its position within dss_chunks_ad
-                        * does not change.
-                        */
-                       assert(node->size > size);
-                       node->addr = (void *)((uintptr_t)node->addr + size);
-                       node->size -= size;
-                       RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+       uintptr_t subkey;
+       unsigned i, lshift, height, bits;
+       void **node, **child;
+
+       malloc_spin_lock(&rtree->lock);
+       for (i = lshift = 0, height = rtree->height, node = rtree->root;
+           i < height - 1;
+           i++, lshift += bits, node = child) {
+               bits = rtree->level2bits[i];
+               subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+               child = (void**)node[subkey];
+               if (child == NULL) {
+                       child = (void**)base_calloc(1, sizeof(void *) <<
+                           rtree->level2bits[i+1]);
+                       if (child == NULL) {
+                               malloc_spin_unlock(&rtree->lock);
+                               return (true);
+                       }
+                       node[subkey] = child;
                }
-               malloc_mutex_unlock(&dss_mtx);
-
-               if (zero)
-                       memset(ret, 0, size);
-               return (ret);
        }
-       malloc_mutex_unlock(&dss_mtx);
 
-       return (NULL);
+       /* node is a leaf, so it contains values rather than node pointers. */
+       bits = rtree->level2bits[i];
+       subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
+       node[subkey] = val;
+       malloc_spin_unlock(&rtree->lock);
+
+       return (false);
 }
 #endif
 
-#ifdef WIN32
-static inline void *
-chunk_alloc_mmap(size_t size)
+static void *
+chunk_alloc_mmap(size_t size, bool pagefile)
 {
        void *ret;
+#ifndef JEMALLOC_USES_MAP_ALIGN
        size_t offset;
+#endif
+       int pfd;
+
+#ifdef MALLOC_PAGEFILE
+       if (opt_pagefile && pagefile) {
+               pfd = pagefile_init(size);
+               if (pfd == -1)
+                       return (NULL);
+       } else
+#endif
+               pfd = -1;
 
        /*
         * Windows requires that there be a 1:1 mapping between VM
         * allocation/deallocation operations.  Therefore, take care here to
         * acquire the final result via one mapping operation.  This means
         * unmapping any preliminary result that is not correctly aligned.
+        *
+        * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
+        * since it reduces the number of page files.
         */
 
-       ret = pages_map(NULL, size);
+#ifdef JEMALLOC_USES_MAP_ALIGN
+       ret = pages_map_align(size, pfd, chunksize);
+#else
+       ret = pages_map(NULL, size, pfd);
        if (ret == NULL)
-               return (NULL);
+               goto RETURN;
 
        offset = CHUNK_ADDR2OFFSET(ret);
        if (offset != 0) {
                /* Deallocate, then try to allocate at (ret + size - offset). */
                pages_unmap(ret, size);
-               ret = pages_map((void *)((uintptr_t)ret + size - offset), size);
+               ret = pages_map((void *)((uintptr_t)ret + size - offset), size,
+                   pfd);
                while (ret == NULL) {
                        /*
                         * Over-allocate in order to map a memory region that
                         * is definitely large enough.
                         */
-                       ret = pages_map(NULL, size + chunksize);
+                       ret = pages_map(NULL, size + chunksize, -1);
                        if (ret == NULL)
-                               return (NULL);
+                               goto RETURN;
                        /*
                         * Deallocate, then allocate the correct size, within
                         * the over-sized mapping.
@@ -2158,10 +2476,10 @@ chunk_alloc_mmap(size_t size)
                        offset = CHUNK_ADDR2OFFSET(ret);
                        pages_unmap(ret, size + chunksize);
                        if (offset == 0)
-                               ret = pages_map(ret, size);
+                               ret = pages_map(ret, size, pfd);
                        else {
                                ret = pages_map((void *)((uintptr_t)ret +
-                                   chunksize - offset), size);
+                                   chunksize - offset), size, pfd);
                        }
                        /*
                         * Failure here indicates a race with another thread, so
@@ -2169,247 +2487,124 @@ chunk_alloc_mmap(size_t size)
                         */
                }
        }
-
+RETURN:
+#endif
+#ifdef MALLOC_PAGEFILE
+       if (pfd != -1)
+               pagefile_close(pfd);
+#endif
+#ifdef MALLOC_STATS
+       if (ret != NULL)
+               stats_chunks.nchunks += (size / chunksize);
+#endif
        return (ret);
-}
-#else
-static inline void *
-chunk_alloc_mmap(size_t size)
-{
-       void *ret;
-       size_t offset;
-
-       /*
-        * Ideally, there would be a way to specify alignment to mmap() (like
-        * NetBSD has), but in the absence of such a feature, we have to work
-        * hard to efficiently create aligned mappings.  The reliable, but
-        * expensive method is to create a mapping that is over-sized, then
-        * trim the excess.  However, that always results in at least one call
-        * to pages_unmap().
-        *
-        * A more optimistic approach is to try mapping precisely the right
-        * amount, then try to append another mapping if alignment is off.  In
-        * practice, this works out well as long as the application is not
-        * interleaving mappings via direct mmap() calls.  If we do run into a
-        * situation where there is an interleaved mapping and we are unable to
-        * extend an unaligned mapping, our best option is to momentarily
-        * revert to the reliable-but-expensive method.  This will tend to
-        * leave a gap in the memory map that is too small to cause later
-        * problems for the optimistic method.
-        */
-
-       ret = pages_map(NULL, size);
-       if (ret == NULL)
-               return (NULL);
-
-       offset = CHUNK_ADDR2OFFSET(ret);
-       if (offset != 0) {
-               /* Try to extend chunk boundary. */
-               if (pages_map((void *)((uintptr_t)ret + size),
-                   chunksize - offset) == NULL) {
-                       /*
-                        * Extension failed.  Clean up, then revert to the
-                        * reliable-but-expensive method.
-                        */
-                       pages_unmap(ret, size);
+}
 
-                       /* Beware size_t wrap-around. */
-                       if (size + chunksize <= size)
-                               return NULL;
+#ifdef MALLOC_PAGEFILE
+static int
+pagefile_init(size_t size)
+{
+       int ret;
+       size_t i;
+       char pagefile_path[PATH_MAX];
+       char zbuf[MALLOC_PAGEFILE_WRITE_SIZE];
 
-                       ret = pages_map(NULL, size + chunksize);
-                       if (ret == NULL)
-                               return (NULL);
+       /*
+        * Create a temporary file, then immediately unlink it so that it will
+        * not persist.
+        */
+       strcpy(pagefile_path, pagefile_templ);
+       ret = mkstemp(pagefile_path);
+       if (ret == -1)
+               return (ret);
+       if (unlink(pagefile_path)) {
+               char buf[STRERROR_BUF];
 
-                       /* Clean up unneeded leading/trailing space. */
-                       offset = CHUNK_ADDR2OFFSET(ret);
-                       if (offset != 0) {
-                               /* Leading space. */
-                               pages_unmap(ret, chunksize - offset);
+               strerror_r(errno, buf, sizeof(buf));
+               _malloc_message(_getprogname(), ": (malloc) Error in unlink(\"",
+                   pagefile_path, "\"):");
+               _malloc_message(buf, "\n", "", "");
+               if (opt_abort)
+                       abort();
+       }
 
-                               ret = (void *)((uintptr_t)ret +
-                                   (chunksize - offset));
+       /*
+        * Write sequential zeroes to the file in order to assure that disk
+        * space is committed, with minimal fragmentation.  It would be
+        * sufficient to write one zero per disk block, but that potentially
+        * results in more system calls, for no real gain.
+        */
+       memset(zbuf, 0, sizeof(zbuf));
+       for (i = 0; i < size; i += sizeof(zbuf)) {
+               if (write(ret, zbuf, sizeof(zbuf)) != sizeof(zbuf)) {
+                       if (errno != ENOSPC) {
+                               char buf[STRERROR_BUF];
 
-                               /* Trailing space. */
-                               pages_unmap((void *)((uintptr_t)ret + size),
-                                   offset);
-                       } else {
-                               /* Trailing space only. */
-                               pages_unmap((void *)((uintptr_t)ret + size),
-                                   chunksize);
+                               strerror_r(errno, buf, sizeof(buf));
+                               _malloc_message(_getprogname(),
+                                   ": (malloc) Error in write(): ", buf, "\n");
+                               if (opt_abort)
+                                       abort();
                        }
-               } else {
-                       /* Clean up unneeded leading space. */
-                       pages_unmap(ret, chunksize - offset);
-                       ret = (void *)((uintptr_t)ret + (chunksize - offset));
+                       pagefile_close(ret);
+                       return (-1);
                }
        }
 
        return (ret);
 }
+
+static void
+pagefile_close(int pfd)
+{
+
+       if (close(pfd)) {
+               char buf[STRERROR_BUF];
+
+               strerror_r(errno, buf, sizeof(buf));
+               _malloc_message(_getprogname(),
+                   ": (malloc) Error in close(): ", buf, "\n");
+               if (opt_abort)
+                       abort();
+       }
+}
 #endif
 
 static void *
-chunk_alloc(size_t size, bool zero)
+chunk_alloc(size_t size, bool zero, bool pagefile)
 {
        void *ret;
 
        assert(size != 0);
        assert((size & chunksize_mask) == 0);
 
-#ifdef MALLOC_DSS
-       if (opt_dss) {
-               ret = chunk_recycle_dss(size, zero);
-               if (ret != NULL) {
-                       goto RETURN;
-               }
-
-               ret = chunk_alloc_dss(size);
-               if (ret != NULL)
-                       goto RETURN;
-       }
-
-       if (opt_mmap)
-#endif
-       {
-               ret = chunk_alloc_mmap(size);
-               if (ret != NULL)
-                       goto RETURN;
+       ret = chunk_alloc_mmap(size, pagefile);
+       if (ret != NULL) {
+               goto RETURN;
        }
 
        /* All strategies for allocation failed. */
        ret = NULL;
 RETURN:
 #ifdef MALLOC_STATS
-       if (ret != NULL) {
-               stats_chunks.nchunks += (size / chunksize);
+       if (ret != NULL)
                stats_chunks.curchunks += (size / chunksize);
-       }
        if (stats_chunks.curchunks > stats_chunks.highchunks)
                stats_chunks.highchunks = stats_chunks.curchunks;
 #endif
 
-       assert(CHUNK_ADDR2BASE(ret) == ret);
-       return (ret);
-}
-
-#ifdef MALLOC_DSS
-static extent_node_t *
-chunk_dealloc_dss_record(void *chunk, size_t size)
-{
-       extent_node_t *node, *prev, key;
-
-       key.addr = (void *)((uintptr_t)chunk + size);
-       node = RB_NFIND(extent_tree_ad_s, &dss_chunks_ad, &key);
-       /* Try to coalesce forward. */
-       if (node != NULL && node->addr == key.addr) {
-               /*
-                * Coalesce chunk with the following address range.  This does
-                * not change the position within dss_chunks_ad, so only
-                * remove/insert from/into dss_chunks_szad.
-                */
-               RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
-               node->addr = chunk;
-               node->size += size;
-               RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
-       } else {
-               /*
-                * Coalescing forward failed, so insert a new node.  Drop
-                * dss_mtx during node allocation, since it is possible that a
-                * new base chunk will be allocated.
-                */
-               malloc_mutex_unlock(&dss_mtx);
-               node = base_node_alloc();
-               malloc_mutex_lock(&dss_mtx);
-               if (node == NULL)
+#ifdef MALLOC_VALIDATE
+       if (ret != NULL) {
+               if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
+                       chunk_dealloc(ret, size);
                        return (NULL);
-               node->addr = chunk;
-               node->size = size;
-               RB_INSERT(extent_tree_ad_s, &dss_chunks_ad, node);
-               RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
-       }
-
-       /* Try to coalesce backward. */
-       prev = RB_PREV(extent_tree_ad_s, &dss_chunks_ad, node);
-       if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
-           chunk) {
-               /*
-                * Coalesce chunk with the previous address range.  This does
-                * not change the position within dss_chunks_ad, so only
-                * remove/insert node from/into dss_chunks_szad.
-                */
-               RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
-               RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, prev);
-
-               RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
-               node->addr = prev->addr;
-               node->size += prev->size;
-               RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
-
-               base_node_dealloc(prev);
-       }
-
-       return (node);
-}
-
-static bool
-chunk_dealloc_dss(void *chunk, size_t size)
-{
-
-       malloc_mutex_lock(&dss_mtx);
-       if ((uintptr_t)chunk >= (uintptr_t)dss_base
-           && (uintptr_t)chunk < (uintptr_t)dss_max) {
-               extent_node_t *node;
-
-               /* Try to coalesce with other unused chunks. */
-               node = chunk_dealloc_dss_record(chunk, size);
-               if (node != NULL) {
-                       chunk = node->addr;
-                       size = node->size;
-               }
-
-               /* Get the current end of the DSS. */
-               dss_max = sbrk(0);
-
-               /*
-                * Try to shrink the DSS if this chunk is at the end of the
-                * DSS.  The sbrk() call here is subject to a race condition
-                * with threads that use brk(2) or sbrk(2) directly, but the
-                * alternative would be to leak memory for the sake of poorly
-                * designed multi-threaded programs.
-                */
-               if ((void *)((uintptr_t)chunk + size) == dss_max
-                   && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
-                       /* Success. */
-                       dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
-
-                       if (node != NULL) {
-                               RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
-                                   node);
-                               RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
-                                   node);
-                               base_node_dealloc(node);
-                       }
-                       malloc_mutex_unlock(&dss_mtx);
-               } else {
-                       malloc_mutex_unlock(&dss_mtx);
-#ifdef WIN32
-                       VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
-#elif (defined(DARWIN))
-                       mmap(chunk, size, PROT_READ | PROT_WRITE, MAP_PRIVATE
-                           | MAP_ANON | MAP_FIXED, -1, 0);
-#else
-                       madvise(chunk, size, MADV_FREE);
-#endif
                }
-
-               return (false);
        }
-       malloc_mutex_unlock(&dss_mtx);
+#endif
 
-       return (true);
+       assert(CHUNK_ADDR2BASE(ret) == ret);
+       return (ret);
 }
-#endif
 
 static void
 chunk_dealloc_mmap(void *chunk, size_t size)
@@ -2430,16 +2625,11 @@ chunk_dealloc(void *chunk, size_t size)
 #ifdef MALLOC_STATS
        stats_chunks.curchunks -= (size / chunksize);
 #endif
-
-#ifdef MALLOC_DSS
-       if (opt_dss) {
-               if (chunk_dealloc_dss(chunk, size) == false)
-                       return;
-       }
-
-       if (opt_mmap)
+#ifdef MALLOC_VALIDATE
+       malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
 #endif
-               chunk_dealloc_mmap(chunk, size);
+
+       chunk_dealloc_mmap(chunk, size);
 }
 
 /*
@@ -2465,13 +2655,13 @@ choose_arena(void)
         * introduces a bootstrapping issue.
         */
 #ifndef NO_TLS
-       if (g_isthreaded == false) {
+       if (__isthreaded == false) {
            /* Avoid the overhead of TLS for single-threaded operation. */
            return (arenas[0]);
        }
 
-#  ifdef WIN32
-       ret = TlsGetValue(tlsIndex);
+#  ifdef MOZ_MEMORY_WINDOWS
+       ret = (arena_t*)TlsGetValue(tlsIndex);
 #  else
        ret = arenas_map;
 #  endif
@@ -2481,7 +2671,7 @@ choose_arena(void)
                assert(ret != NULL);
        }
 #else
-       if (g_isthreaded && narenas > 1) {
+       if (__isthreaded && narenas > 1) {
                unsigned long ind;
 
                /*
@@ -2538,29 +2728,10 @@ choose_arena_hard(void)
 {
        arena_t *ret;
 
-       assert(g_isthreaded);
-
-#ifdef MALLOC_LAZY_FREE
-       /*
-        * Seed the PRNG used for lazy deallocation.  Since seeding only occurs
-        * on the first allocation by a thread, it is possible for a thread to
-        * deallocate before seeding.  This is not a critical issue though,
-        * since it is extremely unusual for an application to to use threads
-        * that deallocate but *never* allocate, and because even if seeding
-        * never occurs for multiple threads, they will tend to drift apart
-        * unless some aspect of the application forces deallocation
-        * synchronization.
-        */
-       SPRN(lazy_free, (uint32_t)(uintptr_t)(_pthread_self()));
-#endif
+       assert(__isthreaded);
 
 #ifdef MALLOC_BALANCE
-       /*
-        * Seed the PRNG used for arena load balancing.  We can get away with
-        * using the same seed here as for the lazy_free PRNG without
-        * introducing autocorrelation because the PRNG parameters are
-        * distinct.
-        */
+       /* Seed the PRNG used for arena load balancing. */
        SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
 #endif
 
@@ -2585,7 +2756,7 @@ choose_arena_hard(void)
        } else
                ret = arenas[0];
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
        TlsSetValue(tlsIndex, ret);
 #else
        arenas_map = ret;
@@ -2607,51 +2778,57 @@ arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
        return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
 }
 
-/* Generate red-black tree code for arena chunks. */
-RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
+    arena_chunk_t, link_dirty, arena_chunk_comp)
 
 static inline int
-arena_run_comp(arena_run_t *a, arena_run_t *b)
+arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 {
-       uintptr_t a_run = (uintptr_t)a;
-       uintptr_t b_run = (uintptr_t)b;
+       uintptr_t a_mapelm = (uintptr_t)a;
+       uintptr_t b_mapelm = (uintptr_t)b;
 
        assert(a != NULL);
        assert(b != NULL);
 
-       return ((a_run > b_run) - (a_run < b_run));
+       return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
 }
 
-/* Generate red-black tree code for arena runs. */
-RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
+    arena_run_comp)
 
-static extent_node_t *
-arena_chunk_node_alloc(arena_chunk_t *chunk)
+static inline int
+arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 {
-       extent_node_t *ret;
+       int ret;
+       size_t a_size = a->bits & ~pagesize_mask;
+       size_t b_size = b->bits & ~pagesize_mask;
 
-       ret = RB_MIN(extent_tree_ad_s, &chunk->nodes);
-       if (ret != NULL)
-               RB_REMOVE(extent_tree_ad_s, &chunk->nodes, ret);
-       else {
-               ret = chunk->nodes_past;
-               chunk->nodes_past = (extent_node_t *)
-                   ((uintptr_t)chunk->nodes_past + sizeof(extent_node_t));
-               assert((uintptr_t)ret + sizeof(extent_node_t) <=
-                   (uintptr_t)chunk + (arena_chunk_header_npages <<
-                   pagesize_2pow));
+       ret = (a_size > b_size) - (a_size < b_size);
+       if (ret == 0) {
+               uintptr_t a_mapelm, b_mapelm;
+
+               if ((a->bits & CHUNK_MAP_KEY) == 0)
+                       a_mapelm = (uintptr_t)a;
+               else {
+                       /*
+                        * Treat keys as though they are lower than anything
+                        * else.
+                        */
+                       a_mapelm = 0;
+               }
+               b_mapelm = (uintptr_t)b;
+
+               ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
        }
 
        return (ret);
 }
 
-static void
-arena_chunk_node_dealloc(arena_chunk_t *chunk, extent_node_t *node)
-{
-
-       node->addr = (void *)node;
-       RB_INSERT(extent_tree_ad_s, &chunk->nodes, node);
-}
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
+    arena_avail_comp)
 
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
@@ -2817,33 +2994,37 @@ arena_run_reg_dalloc(arena_run_t *run, arena_bin_t 
*bin, void *ptr, size_t size)
 }
 
 static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool small,
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
     bool zero)
 {
        arena_chunk_t *chunk;
-       size_t run_ind, total_pages, need_pages, rem_pages, i;
-       extent_node_t *nodeA, *nodeB, key;
+       size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
 
-       /* Insert a node into runs_alloced_ad for the first part of the run. */
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-       nodeA = arena_chunk_node_alloc(chunk);
-       nodeA->addr = run;
-       nodeA->size = size;
-       RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
-
-       key.addr = run;
-       nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-       assert(nodeB != NULL);
-
+       old_ndirty = chunk->ndirty;
        run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
            >> pagesize_2pow);
-       total_pages = nodeB->size >> pagesize_2pow;
+       total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
+           pagesize_2pow;
        need_pages = (size >> pagesize_2pow);
        assert(need_pages > 0);
        assert(need_pages <= total_pages);
-       assert(need_pages <= CHUNK_MAP_POS_MASK || small == false);
        rem_pages = total_pages - need_pages;
 
+       arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
+
+       /* Keep track of trailing unused pages for later use. */
+       if (rem_pages > 0) {
+               chunk->map[run_ind+need_pages].bits = (rem_pages <<
+                   pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
+                   pagesize_mask);
+               chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
+                   pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
+                   pagesize_mask);
+               arena_avail_tree_insert(&arena->runs_avail,
+                   &chunk->map[run_ind+need_pages]);
+       }
+
        for (i = 0; i < need_pages; i++) {
 #ifdef MALLOC_DECOMMIT
                /*
@@ -2852,7 +3033,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t 
size, bool small,
                 * pages in one operation, in order to reduce system call
                 * overhead.
                 */
-               if (chunk->map[run_ind + i] & CHUNK_MAP_DECOMMITTED) {
+               if (chunk->map[run_ind + i].bits & CHUNK_MAP_DECOMMITTED) {
                        size_t j;
 
                        /*
@@ -2861,8 +3042,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t 
size, bool small,
                         * way.
                         */
                        for (j = 0; i + j < need_pages && (chunk->map[run_ind +
-                           i + j] & CHUNK_MAP_DECOMMITTED); j++) {
-                               chunk->map[run_ind + i + j] ^=
+                           i + j].bits & CHUNK_MAP_DECOMMITTED); j++) {
+                               chunk->map[run_ind + i + j].bits ^=
                                    CHUNK_MAP_DECOMMITTED;
                        }
 
@@ -2870,205 +3051,226 @@ arena_run_split(arena_t *arena, arena_run_t *run, 
size_t size, bool small,
                            << pagesize_2pow)), (j << pagesize_2pow));
 #  ifdef MALLOC_STATS
                        arena->stats.ncommit++;
+                       arena->stats.committed += j;
 #  endif
-               }
+               } else /* No need to zero since commit zeros. */
 #endif
 
                /* Zero if necessary. */
                if (zero) {
-                       if ((chunk->map[run_ind + i] & CHUNK_MAP_UNTOUCHED)
+                       if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
                            == 0) {
+                               VALGRIND_MALLOCLIKE_BLOCK((void *)((uintptr_t)
+                                   chunk + ((run_ind + i) << pagesize_2pow)),
+                                   pagesize, 0, false);
                                memset((void *)((uintptr_t)chunk + ((run_ind
                                    + i) << pagesize_2pow)), 0, pagesize);
-                               /* CHUNK_MAP_UNTOUCHED is cleared below. */
+                               VALGRIND_FREELIKE_BLOCK((void *)((uintptr_t)
+                                   chunk + ((run_ind + i) << pagesize_2pow)),
+                                   0);
+                               /* CHUNK_MAP_ZEROED is cleared below. */
                        }
                }
 
                /* Update dirty page accounting. */
-               if (chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) {
+               if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
                        chunk->ndirty--;
                        arena->ndirty--;
+                       /* CHUNK_MAP_DIRTY is cleared below. */
                }
 
                /* Initialize the chunk map. */
-               if (small)
-                       chunk->map[run_ind + i] = (uint8_t)i;
-               else
-                       chunk->map[run_ind + i] = CHUNK_MAP_LARGE;
+               if (large) {
+                       chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
+                           | CHUNK_MAP_ALLOCATED;
+               } else {
+                       chunk->map[run_ind + i].bits = (size_t)run
+                           | CHUNK_MAP_ALLOCATED;
+               }
        }
 
-       /* Keep track of trailing unused pages for later use. */
-       RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-       if (rem_pages > 0) {
-               /*
-                * Update nodeB in runs_avail_*.  Its position within
-                * runs_avail_ad does not change.
-                */
-               nodeB->addr = (void *)((uintptr_t)nodeB->addr + size);
-               nodeB->size -= size;
-               RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-       } else {
-               /* Remove nodeB from runs_avail_*. */
-               RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
-               arena_chunk_node_dealloc(chunk, nodeB);
-       }
+       /*
+        * Set the run size only in the first element for large runs.  This is
+        * primarily a debugging aid, since the lack of size info for trailing
+        * pages only matters if the application tries to operate on an
+        * interior pointer.
+        */
+       if (large)
+               chunk->map[run_ind].bits |= size;
 
-       chunk->pages_used += need_pages;
+       if (chunk->ndirty == 0 && old_ndirty > 0)
+               arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
 }
 
-static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+static void
+arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
 {
-       arena_chunk_t *chunk;
-       extent_node_t *node;
+       arena_run_t *run;
+       size_t i;
 
-       if (arena->spare != NULL) {
-               chunk = arena->spare;
-               arena->spare = NULL;
-       } else {
-               chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
-               if (chunk == NULL)
-                       return (NULL);
+       VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
+           pagesize_2pow), 0, false);
 #ifdef MALLOC_STATS
-               arena->stats.mapped += chunksize;
+       arena->stats.mapped += chunksize;
 #endif
 
-               chunk->arena = arena;
-
-               RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
+       chunk->arena = arena;
 
-               /*
-                * Claim that no pages are in use, since the header is merely
-                * overhead.
-                */
-               chunk->pages_used = 0;
-               chunk->ndirty = 0;
+       /*
+        * Claim that no pages are in use, since the header is merely overhead.
+        */
+       chunk->ndirty = 0;
 
-               /*
-                * Initialize the map to contain one maximal free untouched
-                * run.
-                */
-               memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
-                   arena_chunk_header_npages);
-               memset(&chunk->map[arena_chunk_header_npages],
-                   (CHUNK_MAP_UNTOUCHED
+       /* Initialize the map to contain one maximal free untouched run. */
+       run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+           pagesize_2pow));
+       for (i = 0; i < arena_chunk_header_npages; i++)
+               chunk->map[i].bits = 0;
+       chunk->map[i].bits = arena_maxclass
 #ifdef MALLOC_DECOMMIT
-                   | CHUNK_MAP_DECOMMITTED
+           | CHUNK_MAP_DECOMMITTED
 #endif
-                   ), (chunk_npages -
-                   arena_chunk_header_npages));
-
-               /* Initialize the tree of unused extent nodes. */
-               RB_INIT(&chunk->nodes);
-               chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
-                   (uintptr_t)&chunk->map[chunk_npages]);
+           | CHUNK_MAP_ZEROED;
+       for (i++; i < chunk_npages-1; i++) {
+               chunk->map[i].bits =
+#ifdef MALLOC_DECOMMIT
+                   CHUNK_MAP_DECOMMITTED |
+#endif
+                   CHUNK_MAP_ZEROED;
+       }
+       chunk->map[chunk_npages-1].bits = arena_maxclass
+#ifdef MALLOC_DECOMMIT
+           | CHUNK_MAP_DECOMMITTED
+#endif
+           | CHUNK_MAP_ZEROED;
 
 #ifdef MALLOC_DECOMMIT
-               /*
-                * Start out decommitted, in order to force a closer
-                * correspondence between dirty pages and committed untouched
-                * pages.
-                */
-               pages_decommit((void *)((uintptr_t)chunk +
-                   (arena_chunk_header_npages << pagesize_2pow)),
-                   ((chunk_npages - arena_chunk_header_npages) <<
-                   pagesize_2pow));
+       /*
+        * Start out decommitted, in order to force a closer correspondence
+        * between dirty pages and committed untouched pages.
+        */
+       pages_decommit(run, arena_maxclass);
 #  ifdef MALLOC_STATS
-               arena->stats.ndecommit++;
-               arena->stats.decommitted += (chunk_npages -
-                   arena_chunk_header_npages);
+       arena->stats.ndecommit++;
+       arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
+       arena->stats.committed += arena_chunk_header_npages;
 #  endif
 #endif
-       }
-
-       /* Insert the run into the runs_avail_* red-black trees. */
-       node = arena_chunk_node_alloc(chunk);
-       node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
-           pagesize_2pow));
-       node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
-       RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
-       RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
 
-       return (chunk);
+       /* Insert the run into the runs_avail tree. */
+       arena_avail_tree_insert(&arena->runs_avail,
+           &chunk->map[arena_chunk_header_npages]);
 }
 
 static void
 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
 {
-       extent_node_t *node, key;
 
        if (arena->spare != NULL) {
-               RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks,
-                   arena->spare);
-               arena->ndirty -= arena->spare->ndirty;
+               if (arena->spare->ndirty > 0) {
+                       arena_chunk_tree_dirty_remove(
+                           &chunk->arena->chunks_dirty, arena->spare);
+                       arena->ndirty -= arena->spare->ndirty;
+#if (defined(MALLOC_STATS) && defined(MALLOC_DECOMMIT))
+                       arena->stats.committed -= arena->spare->ndirty;
+#endif
+               }
+               VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
                chunk_dealloc((void *)arena->spare, chunksize);
 #ifdef MALLOC_STATS
                arena->stats.mapped -= chunksize;
+#  ifdef MALLOC_DECOMMIT
+               arena->stats.committed -= arena_chunk_header_npages;
+#  endif
 #endif
        }
 
        /*
-        * Remove run from the runs trees, regardless of whether this chunk
-        * will be cached, so that the arena does not use it.  Dirty page
-        * flushing only uses the chunks tree, so leaving this chunk in that
-        * tree is sufficient for that purpose.
+        * Remove run from runs_avail, so that the arena does not use it.
+        * Dirty page flushing only uses the chunks_dirty tree, so leaving this
+        * chunk in the chunks_* trees is sufficient for that purpose.
         */
-       key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
-           pagesize_2pow));
-       node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-       assert(node != NULL);
-       RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
-       RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, node);
-       arena_chunk_node_dealloc(chunk, node);
+       arena_avail_tree_remove(&arena->runs_avail,
+           &chunk->map[arena_chunk_header_npages]);
 
        arena->spare = chunk;
 }
 
 static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool small, bool zero)
+arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
+    bool zero)
 {
        arena_chunk_t *chunk;
        arena_run_t *run;
-       extent_node_t *node, key;
+       arena_chunk_map_t *mapelm, key;
 
-       assert(size <= (chunksize - (arena_chunk_header_npages <<
-           pagesize_2pow)));
+       assert(size <= arena_maxclass);
        assert((size & pagesize_mask) == 0);
 
-       /* Search the arena's chunks for the lowest best fit. */
-       key.addr = NULL;
-       key.size = size;
-       node = RB_NFIND(extent_tree_szad_s, &arena->runs_avail_szad, &key);
-       if (node != NULL) {
-               run = (arena_run_t *)node->addr;
-               arena_run_split(arena, run, size, small, zero);
+       chunk = NULL;
+       while (true) {
+               /* Search the arena's chunks for the lowest best fit. */
+               key.bits = size | CHUNK_MAP_KEY;
+               mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
+               if (mapelm != NULL) {
+                       arena_chunk_t *run_chunk =
+                           (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
+                       size_t pageind = ((uintptr_t)mapelm -
+                           (uintptr_t)run_chunk->map) /
+                           sizeof(arena_chunk_map_t);
+
+                       if (chunk != NULL)
+                               chunk_dealloc(chunk, chunksize);
+                       run = (arena_run_t *)((uintptr_t)run_chunk + (pageind
+                           << pagesize_2pow));
+                       arena_run_split(arena, run, size, large, zero);
+                       return (run);
+               }
+
+               if (arena->spare != NULL) {
+                       /* Use the spare. */
+                       chunk = arena->spare;
+                       arena->spare = NULL;
+                       run = (arena_run_t *)((uintptr_t)chunk +
+                           (arena_chunk_header_npages << pagesize_2pow));
+                       /* Insert the run into the runs_avail tree. */
+                       arena_avail_tree_insert(&arena->runs_avail,
+                           &chunk->map[arena_chunk_header_npages]);
+                       arena_run_split(arena, run, size, large, zero);
+                       return (run);
+               }
+
+               /*
+                * No usable runs.  Create a new chunk from which to allocate
+                * the run.
+                */
+               if (chunk == NULL) {
+                       chunk = (arena_chunk_t *)chunk_alloc(chunksize, true,
+                           true);
+                       if (chunk == NULL)
+                               return (NULL);
+               }
+
+               arena_chunk_init(arena, chunk);
+               run = (arena_run_t *)((uintptr_t)chunk +
+                   (arena_chunk_header_npages << pagesize_2pow));
+               /* Update page map. */
+               arena_run_split(arena, run, size, large, zero);
                return (run);
        }
-
-       /*
-        * No usable runs.  Create a new chunk from which to allocate the run.
-        */
-       chunk = arena_chunk_alloc(arena);
-       if (chunk == NULL)
-               return (NULL);
-       run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
-           pagesize_2pow));
-       /* Update page map. */
-       arena_run_split(arena, run, size, small, zero);
-       return (run);
 }
 
 static void
 arena_purge(arena_t *arena)
 {
        arena_chunk_t *chunk;
+       size_t i, npages;
 #ifdef MALLOC_DEBUG
-       size_t ndirty;
-
-       ndirty = 0;
-       RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
+       size_t ndirty = 0;
+       rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
+           chunk) {
                ndirty += chunk->ndirty;
-       }
+       } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
        assert(ndirty == arena->ndirty);
 #endif
        assert(arena->ndirty > opt_dirty_max);
@@ -3079,59 +3281,72 @@ arena_purge(arena_t *arena)
 
        /*
         * Iterate downward through chunks until enough dirty memory has been
+        * purged.  Terminate as soon as possible in order to minimize the
+        * number of system calls, even if a chunk has only been partially
         * purged.
         */
-       RB_FOREACH_REVERSE(chunk, arena_chunk_tree_s, &arena->chunks) {
-               if (chunk->ndirty > 0) {
-                       size_t i;
+       while (arena->ndirty > (opt_dirty_max >> 1)) {
+               chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
+               assert(chunk != NULL);
 
-                       for (i = chunk_npages - 1; i >=
-                           arena_chunk_header_npages; i--) {
-                               if (chunk->map[i] & CHUNK_MAP_DIRTY) {
-                                       size_t npages;
+               for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
+                       assert(i >= arena_chunk_header_npages);
 
-                                       chunk->map[i] = (CHUNK_MAP_LARGE |
+                       if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
-                                           CHUNK_MAP_DECOMMITTED |
+                               assert((chunk->map[i].bits &
+                                   CHUNK_MAP_DECOMMITTED) == 0);
+#endif
+                               chunk->map[i].bits ^=
+#ifdef MALLOC_DECOMMIT
+                                   CHUNK_MAP_DECOMMITTED |
+#endif
+                                   CHUNK_MAP_DIRTY;
+                               /* Find adjacent dirty run(s). */
+                               for (npages = 1; i > arena_chunk_header_npages
+                                   && (chunk->map[i - 1].bits &
+                                   CHUNK_MAP_DIRTY); npages++) {
+                                       i--;
+#ifdef MALLOC_DECOMMIT
+                                       assert((chunk->map[i].bits &
+                                           CHUNK_MAP_DECOMMITTED) == 0);
 #endif
-                                           CHUNK_MAP_POS_MASK);
-                                       chunk->ndirty--;
-                                       arena->ndirty--;
-                                       /* Find adjacent dirty run(s). */
-                                       for (npages = 1; i >
-                                           arena_chunk_header_npages &&
-                                           (chunk->map[i - 1] &
-                                           CHUNK_MAP_DIRTY); npages++) {
-                                               i--;
-                                               chunk->map[i] = (CHUNK_MAP_LARGE
+                                       chunk->map[i].bits ^=
 #ifdef MALLOC_DECOMMIT
-                                                   | CHUNK_MAP_DECOMMITTED
+                                           CHUNK_MAP_DECOMMITTED |
 #endif
-                                                   | CHUNK_MAP_POS_MASK);
-                                               chunk->ndirty--;
-                                               arena->ndirty--;
-                                       }
+                                           CHUNK_MAP_DIRTY;
+                               }
+                               chunk->ndirty -= npages;
+                               arena->ndirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
-                                       pages_decommit((void *)((uintptr_t)
-                                           chunk + (i << pagesize_2pow)),
-                                           (npages << pagesize_2pow));
+                               pages_decommit((void *)((uintptr_t)
+                                   chunk + (i << pagesize_2pow)),
+                                   (npages << pagesize_2pow));
 #  ifdef MALLOC_STATS
-                                       arena->stats.ndecommit++;
-                                       arena->stats.decommitted += npages;
+                               arena->stats.ndecommit++;
+                               arena->stats.decommitted += npages;
+                               arena->stats.committed -= npages;
 #  endif
 #else
-                                       madvise((void *)((uintptr_t)chunk + (i
-                                           << pagesize_2pow)), pagesize *
-                                           npages, MADV_FREE);
+                               madvise((void *)((uintptr_t)chunk + (i <<
+                                   pagesize_2pow)), (npages << pagesize_2pow),
+                                   MADV_FREE);
 #endif
 #ifdef MALLOC_STATS
-                                       arena->stats.nmadvise++;
-                                       arena->stats.purged += npages;
+                               arena->stats.nmadvise++;
+                               arena->stats.purged += npages;
 #endif
-                               }
+                               if (arena->ndirty <= (opt_dirty_max >> 1))
+                                       break;
                        }
                }
+
+               if (chunk->ndirty == 0) {
+                       arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
+                           chunk);
+               }
        }
 }
 
@@ -3139,94 +3354,103 @@ static void
 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 {
        arena_chunk_t *chunk;
-       extent_node_t *nodeA, *nodeB, *nodeC, key;
        size_t size, run_ind, run_pages;
 
-       /* Remove run from runs_alloced_ad. */
-       key.addr = run;
-       nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-       assert(nodeB != NULL);
-       RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
-       size = nodeB->size;
-
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-       run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+       run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
            >> pagesize_2pow);
        assert(run_ind >= arena_chunk_header_npages);
-       assert(run_ind < (chunksize >> pagesize_2pow));
+       assert(run_ind < chunk_npages);
+       if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+               size = chunk->map[run_ind].bits & ~pagesize_mask;
+       else
+               size = run->bin->run_size;
        run_pages = (size >> pagesize_2pow);
 
-       /* Subtract pages from count of pages used in chunk. */
-       chunk->pages_used -= run_pages;
-
+       /* Mark pages as unallocated in the chunk map. */
        if (dirty) {
                size_t i;
 
                for (i = 0; i < run_pages; i++) {
-                       assert((chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) ==
-                           0);
-                       chunk->map[run_ind + i] |= CHUNK_MAP_DIRTY;
-                       chunk->ndirty++;
-                       arena->ndirty++;
+                       assert((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+                           == 0);
+                       chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
                }
-       }
-#ifdef MALLOC_DEBUG
-       /* Set map elements to a bogus value in order to aid error detection. */
-       {
+
+               if (chunk->ndirty == 0) {
+                       arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
+                           chunk);
+               }
+               chunk->ndirty += run_pages;
+               arena->ndirty += run_pages;
+       } else {
                size_t i;
 
                for (i = 0; i < run_pages; i++) {
-                       chunk->map[run_ind + i] |= (CHUNK_MAP_LARGE |
-                           CHUNK_MAP_POS_MASK);
+                       chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
+                           CHUNK_MAP_ALLOCATED);
                }
        }
-#endif
+       chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+           pagesize_mask);
+       chunk->map[run_ind+run_pages-1].bits = size |
+           (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
 
        /* Try to coalesce forward. */
-       key.addr = (void *)((uintptr_t)run + size);
-       nodeC = RB_NFIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-       if (nodeC != NULL && nodeC->addr == key.addr) {
-               /*
-                * Coalesce forward.  This does not change the position within
-                * runs_avail_ad, so only remove/insert from/into
-                * runs_avail_szad.
-                */
-               RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
-               nodeC->addr = (void *)run;
-               nodeC->size += size;
-               RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
-               arena_chunk_node_dealloc(chunk, nodeB);
-               nodeB = nodeC;
-       } else {
+       if (run_ind + run_pages < chunk_npages &&
+           (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+               size_t nrun_size = chunk->map[run_ind+run_pages].bits &
+                   ~pagesize_mask;
+
                /*
-                * Coalescing forward failed, so insert nodeB into runs_avail_*.
+                * Remove successor from runs_avail; the coalesced run is
+                * inserted later.
                 */
-               RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-               RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+               arena_avail_tree_remove(&arena->runs_avail,
+                   &chunk->map[run_ind+run_pages]);
+
+               size += nrun_size;
+               run_pages = size >> pagesize_2pow;
+
+               assert((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+                   == nrun_size);
+               chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+                   pagesize_mask);
+               chunk->map[run_ind+run_pages-1].bits = size |
+                   (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
        }
 
        /* Try to coalesce backward. */
-       nodeA = RB_PREV(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
-       if (nodeA != NULL && (void *)((uintptr_t)nodeA->addr + nodeA->size) ==
-           (void *)run) {
-               /*
-                * Coalesce with previous run.  This does not change nodeB's
-                * position within runs_avail_ad, so only remove/insert
-                * from/into runs_avail_szad.
-                */
-               RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeA);
-               RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeA);
+       if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
+           CHUNK_MAP_ALLOCATED) == 0) {
+               size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
 
-               RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-               nodeB->addr = nodeA->addr;
-               nodeB->size += nodeA->size;
-               RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+               run_ind -= prun_size >> pagesize_2pow;
 
-               arena_chunk_node_dealloc(chunk, nodeA);
+               /*
+                * Remove predecessor from runs_avail; the coalesced run is
+                * inserted later.
+                */
+               arena_avail_tree_remove(&arena->runs_avail,
+                   &chunk->map[run_ind]);
+
+               size += prun_size;
+               run_pages = size >> pagesize_2pow;
+
+               assert((chunk->map[run_ind].bits & ~pagesize_mask) ==
+                   prun_size);
+               chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+                   pagesize_mask);
+               chunk->map[run_ind+run_pages-1].bits = size |
+                   (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
        }
 
+       /* Insert into runs_avail, now that coalescing is complete. */
+       arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
+
        /* Deallocate chunk if it is now completely unused. */
-       if (chunk->pages_used == 0)
+       if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
+           CHUNK_MAP_ALLOCATED)) == arena_maxclass)
                arena_chunk_dealloc(arena, chunk);
 
        /* Enforce opt_dirty_max. */
@@ -3235,58 +3459,43 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool 
dirty)
 }
 
 static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeB,
-    arena_run_t *run, size_t oldsize, size_t newsize)
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    size_t oldsize, size_t newsize)
 {
-       extent_node_t *nodeA;
+       size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
+       size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
 
-       assert(nodeB->addr == run);
-       assert(nodeB->size == oldsize);
        assert(oldsize > newsize);
 
        /*
-        * Update the run's node in runs_alloced_ad.  Its position does not
-        * change.
-        */
-       nodeB->addr = (void *)((uintptr_t)run + (oldsize - newsize));
-       nodeB->size = newsize;
-
-       /*
-        * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
-        * treat the leading run as separately allocated.
+        * Update the chunk map so that arena_run_dalloc() can treat the
+        * leading run as separately allocated.
         */
-       nodeA = arena_chunk_node_alloc(chunk);
-       nodeA->addr = (void *)run;
-       nodeA->size = oldsize - newsize;
-       RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+       chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
+           CHUNK_MAP_ALLOCATED;
+       chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
+           CHUNK_MAP_ALLOCATED;
 
-       arena_run_dalloc(arena, (arena_run_t *)run, false);
+       arena_run_dalloc(arena, run, false);
 }
 
 static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
-    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+    size_t oldsize, size_t newsize, bool dirty)
 {
-       extent_node_t *nodeB;
+       size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
+       size_t npages = newsize >> pagesize_2pow;
 
-       assert(nodeA->addr == run);
-       assert(nodeA->size == oldsize);
        assert(oldsize > newsize);
 
        /*
-        * Update the run's node in runs_alloced_ad.  Its position does not
-        * change.
+        * Update the chunk map so that arena_run_dalloc() can treat the
+        * trailing run as separately allocated.
         */
-       nodeA->size = newsize;
-
-       /*
-        * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
-        * treat the trailing run as separately allocated.
-        */
-       nodeB = arena_chunk_node_alloc(chunk);
-       nodeB->addr = (void *)((uintptr_t)run + newsize);
-       nodeB->size = oldsize - newsize;
-       RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+       chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
+           CHUNK_MAP_ALLOCATED;
+       chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
+           | CHUNK_MAP_ALLOCATED;
 
        arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
            dirty);
@@ -3295,13 +3504,16 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t 
*chunk, extent_node_t *nodeA,
 static arena_run_t *
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
+       arena_chunk_map_t *mapelm;
        arena_run_t *run;
        unsigned i, remainder;
 
        /* Look for a usable run. */
-       if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
+       mapelm = arena_run_tree_first(&bin->runs);
+       if (mapelm != NULL) {
                /* run is guaranteed to have available space. */
-               RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+               arena_run_tree_remove(&bin->runs, mapelm);
+               run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
 #ifdef MALLOC_STATS
                bin->stats.reruns++;
 #endif
@@ -3310,17 +3522,28 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t 
*bin)
        /* No existing runs have any space available. */
 
        /* Allocate a new run. */
-       run = arena_run_alloc(arena, bin->run_size, true, false);
+       run = arena_run_alloc(arena, bin, bin->run_size, false, false);
        if (run == NULL)
                return (NULL);
+       /*
+        * Don't initialize if a race in arena_run_alloc() allowed an existing
+        * run to become usable.
+        */
+       if (run == bin->runcur)
+               return (run);
+
+       VALGRIND_MALLOCLIKE_BLOCK(run, sizeof(arena_run_t) + (sizeof(unsigned) *
+           (bin->regs_mask_nelms - 1)), 0, false);
 
        /* Initialize run internals. */
        run->bin = bin;
 
-       for (i = 0; i < bin->regs_mask_nelms; i++)
+       for (i = 0; i < bin->regs_mask_nelms - 1; i++)
                run->regs_mask[i] = UINT_MAX;
        remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
-       if (remainder != 0) {
+       if (remainder == 0)
+               run->regs_mask[i] = UINT_MAX;
+       else {
                /* The last element has spare bits that need to be unset. */
                run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
                    - remainder));
@@ -3347,7 +3570,6 @@ static inline void *
 arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
 {
        void *ret;
-        arena = 0;              /* this is just to quiet a compiler warning */
 
        assert(run->magic == ARENA_RUN_MAGIC);
        assert(run->nfree > 0);
@@ -3488,7 +3710,7 @@ arena_lock_balance_hard(arena_t *arena)
 #endif
        ind = PRN(balance, narenas_2pow);
        if (arenas[ind] != NULL) {
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
                TlsSetValue(tlsIndex, arenas[ind]);
 #else
                arenas_map = arenas[ind];
@@ -3496,13 +3718,13 @@ arena_lock_balance_hard(arena_t *arena)
        } else {
                malloc_spin_lock(&arenas_lock);
                if (arenas[ind] != NULL) {
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
                        TlsSetValue(tlsIndex, arenas[ind]);
 #else
                        arenas_map = arenas[ind];
 #endif
                } else {
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
                        TlsSetValue(tlsIndex, arenas_extend(ind));
 #else
                        arenas_map = arenas_extend(ind);
@@ -3569,6 +3791,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
 #endif
        malloc_spin_unlock(&arena->lock);
 
+       VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
        if (zero == false) {
 #ifdef MALLOC_FILL
                if (opt_junk)
@@ -3594,7 +3817,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
 #else
        malloc_spin_lock(&arena->lock);
 #endif
-       ret = (void *)arena_run_alloc(arena, size, false, zero);
+       ret = (void *)arena_run_alloc(arena, NULL, size, true, zero);
        if (ret == NULL) {
                malloc_spin_unlock(&arena->lock);
                return (NULL);
@@ -3605,6 +3828,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
 #endif
        malloc_spin_unlock(&arena->lock);
 
+       VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
        if (zero == false) {
 #ifdef MALLOC_FILL
                if (opt_junk)
@@ -3626,9 +3850,6 @@ arena_malloc(arena_t *arena, size_t size, bool zero)
        assert(size != 0);
        assert(QUANTUM_CEILING(size) <= arena_maxclass);
 
-/* #ifdef USE_STATS_MEMORY */
-/*     arena->mi.uordblks += size; */
-/* #endif */
        if (size <= bin_maxclass) {
                return (arena_malloc_small(arena, size, zero));
        } else
@@ -3640,6 +3861,7 @@ imalloc(size_t size)
 {
 
        assert(size != 0);
+
        if (size <= arena_maxclass)
                return (arena_malloc(choose_arena(), size, false));
        else
@@ -3649,6 +3871,7 @@ imalloc(size_t size)
 static inline void *
 icalloc(size_t size)
 {
+
        if (size <= arena_maxclass)
                return (arena_malloc(choose_arena(), size, true));
        else
@@ -3662,7 +3885,6 @@ arena_palloc(arena_t *arena, size_t alignment, size_t 
size, size_t alloc_size)
        void *ret;
        size_t offset;
        arena_chunk_t *chunk;
-       extent_node_t *node, key;
 
        assert((size & pagesize_mask) == 0);
        assert((alignment & pagesize_mask) == 0);
@@ -3672,7 +3894,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t 
size, size_t alloc_size)
 #else
        malloc_spin_lock(&arena->lock);
 #endif
-       ret = (void *)arena_run_alloc(arena, alloc_size, false, false);
+       ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
        if (ret == NULL) {
                malloc_spin_unlock(&arena->lock);
                return (NULL);
@@ -3683,31 +3905,14 @@ arena_palloc(arena_t *arena, size_t alignment, size_t 
size, size_t alloc_size)
        offset = (uintptr_t)ret & (alignment - 1);
        assert((offset & pagesize_mask) == 0);
        assert(offset < alloc_size);
-       if (offset == 0) {
-               /*
-                * Update the run's node in runs_alloced_ad.  Its position
-                * does not change.
-                */
-               key.addr = ret;
-               node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-               assert(node != NULL);
-
-               arena_run_trim_tail(arena, chunk, node, ret, alloc_size, size,
-                   false);
-       } else {
+       if (offset == 0)
+               arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, 
alloc_size, size, false);
+       else {
                size_t leadsize, trailsize;
 
-               /*
-                * Update the run's node in runs_alloced_ad.  Its position
-                * does not change.
-                */
-               key.addr = ret;
-               node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-               assert(node != NULL);
-
                leadsize = alignment - offset;
                if (leadsize > 0) {
-                       arena_run_trim_head(arena, chunk, node, ret, alloc_size,
+                       arena_run_trim_head(arena, chunk, (arena_run_t*)ret, 
alloc_size,
                            alloc_size - leadsize);
                        ret = (void *)((uintptr_t)ret + leadsize);
                }
@@ -3716,8 +3921,8 @@ arena_palloc(arena_t *arena, size_t alignment, size_t 
size, size_t alloc_size)
                if (trailsize != 0) {
                        /* Trim trailing space. */
                        assert(trailsize < alloc_size);
-                       arena_run_trim_tail(arena, chunk, node, ret, size +
-                           trailsize, size, false);
+                       arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, 
size + trailsize,
+                           size, false);
                }
        }
 
@@ -3727,6 +3932,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t 
size, size_t alloc_size)
 #endif
        malloc_spin_unlock(&arena->lock);
 
+       VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
 #ifdef MALLOC_FILL
        if (opt_junk)
                memset(ret, 0xa5, size);
@@ -3837,42 +4043,70 @@ arena_salloc(const void *ptr)
 {
        size_t ret;
        arena_chunk_t *chunk;
-       arena_chunk_map_t mapelm;
-       size_t pageind;
+       size_t pageind, mapbits;
 
        assert(ptr != NULL);
        assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
        chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
        pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
-       mapelm = chunk->map[pageind];
-       if ((mapelm & CHUNK_MAP_LARGE) == 0) {
-               arena_run_t *run;
-
-               /* Small allocation size is in the run header. */
-               pageind -= (mapelm & CHUNK_MAP_POS_MASK);
-               run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
-                   pagesize_2pow));
+       mapbits = chunk->map[pageind].bits;
+       assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+       if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+               arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
                assert(run->magic == ARENA_RUN_MAGIC);
                ret = run->bin->reg_size;
        } else {
-               arena_t *arena = chunk->arena;
-               extent_node_t *node, key;
-
-               /* Large allocation size is in the extent tree. */
-               assert((mapelm & CHUNK_MAP_POS_MASK) == 0);
-               arena = chunk->arena;
-               malloc_spin_lock(&arena->lock);
-               key.addr = (void *)ptr;
-               node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-               assert(node != NULL);
-               ret = node->size;
-               malloc_spin_unlock(&arena->lock);
+               ret = mapbits & ~pagesize_mask;
+               assert(ret != 0);
        }
 
        return (ret);
 }
 
+#if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
+/*
+ * Validate ptr before assuming that it points to an allocation.  Currently,
+ * the following validation is performed:
+ *
+ * + Check that ptr is not NULL.
+ *
+ * + Check that ptr lies within a mapped chunk.
+ */
+static inline size_t
+isalloc_validate(const void *ptr)
+{
+       arena_chunk_t *chunk;
+
+       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+       if (chunk == NULL)
+               return (0);
+
+       if (malloc_rtree_get(chunk_rtree, (uintptr_t)chunk) == NULL)
+               return (0);
+
+       if (chunk != ptr) {
+               assert(chunk->arena->magic == ARENA_MAGIC);
+               return (arena_salloc(ptr));
+       } else {
+               size_t ret;
+               extent_node_t *node;
+               extent_node_t key;
+
+               /* Chunk. */
+               key.addr = (void *)chunk;
+               malloc_mutex_lock(&huge_mtx);
+               node = extent_tree_ad_search(&huge, &key);
+               if (node != NULL)
+                       ret = node->size;
+               else
+                       ret = 0;
+               malloc_mutex_unlock(&huge_mtx);
+               return (ret);
+       }
+}
+#endif
+
 static inline size_t
 isalloc(const void *ptr)
 {
@@ -3896,7 +4130,7 @@ isalloc(const void *ptr)
 
                /* Extract from tree of huge allocations. */
                key.addr = __DECONST(void *, ptr);
-               node = RB_FIND(extent_tree_ad_s, &huge, &key);
+               node = extent_tree_ad_search(&huge, &key);
                assert(node != NULL);
 
                ret = node->size;
@@ -3909,22 +4143,17 @@ isalloc(const void *ptr)
 
 static inline void
 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind, arena_chunk_map_t mapelm)
+    arena_chunk_map_t *mapelm)
 {
        arena_run_t *run;
        arena_bin_t *bin;
        size_t size;
 
-       pageind -= (mapelm & CHUNK_MAP_POS_MASK);
-
-       run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
+       run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
        assert(run->magic == ARENA_RUN_MAGIC);
        bin = run->bin;
        size = bin->reg_size;
 
-/* #ifdef USE_STATS_MEMORY */
-/*         arena->mi.fordblks += size; */
-/* #endif */
 #ifdef MALLOC_FILL
        if (opt_junk)
                memset(ptr, 0x5a, size);
@@ -3938,16 +4167,23 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t 
*chunk, void *ptr,
                if (run == bin->runcur)
                        bin->runcur = NULL;
                else if (bin->nregs != 1) {
+                       size_t run_pageind = (((uintptr_t)run -
+                           (uintptr_t)chunk)) >> pagesize_2pow;
+                       arena_chunk_map_t *run_mapelm =
+                           &chunk->map[run_pageind];
                        /*
                         * This block's conditional is necessary because if the
                         * run only contains one region, then it never gets
                         * inserted into the non-full runs tree.
                         */
-                       RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+                       assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
+                           run_mapelm);
+                       arena_run_tree_remove(&bin->runs, run_mapelm);
                }
 #ifdef MALLOC_DEBUG
                run->magic = 0;
 #endif
+               VALGRIND_FREELIKE_BLOCK(run, 0);
                arena_run_dalloc(arena, run, true);
 #ifdef MALLOC_STATS
                bin->stats.curruns--;
@@ -3962,13 +4198,31 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t 
*chunk, void *ptr,
                else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
                        /* Switch runcur. */
                        if (bin->runcur->nfree > 0) {
+                               arena_chunk_t *runcur_chunk =
+                                   
(arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
+                               size_t runcur_pageind =
+                                   (((uintptr_t)bin->runcur -
+                                   (uintptr_t)runcur_chunk)) >> pagesize_2pow;
+                               arena_chunk_map_t *runcur_mapelm =
+                                   &runcur_chunk->map[runcur_pageind];
+
                                /* Insert runcur. */
-                               RB_INSERT(arena_run_tree_s, &bin->runs,
-                                   bin->runcur);
+                               assert(arena_run_tree_search(&bin->runs,
+                                   runcur_mapelm) == NULL);
+                               arena_run_tree_insert(&bin->runs,
+                                   runcur_mapelm);
                        }
                        bin->runcur = run;
-               } else
-                       RB_INSERT(arena_run_tree_s, &bin->runs, run);
+               } else {
+                       size_t run_pageind = (((uintptr_t)run -
+                           (uintptr_t)chunk)) >> pagesize_2pow;
+                       arena_chunk_map_t *run_mapelm =
+                           &chunk->map[run_pageind];
+
+                       assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
+                           NULL);
+                       arena_run_tree_insert(&bin->runs, run_mapelm);
+               }
        }
 #ifdef MALLOC_STATS
        arena->stats.allocated_small -= size;
@@ -3976,120 +4230,28 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t 
*chunk, void *ptr,
 #endif
 }
 
-#ifdef MALLOC_LAZY_FREE
-static inline void
-arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind, arena_chunk_map_t *mapelm)
-{
-       void **free_cache = arena->free_cache;
-       unsigned i, slot;
-
-       if (g_isthreaded == false || opt_lazy_free_2pow < 0) {
-               malloc_spin_lock(&arena->lock);
-               arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
-               malloc_spin_unlock(&arena->lock);
-               return;
-       }
-
-       for (i = 0; i < LAZY_FREE_NPROBES; i++) {
-               slot = PRN(lazy_free, opt_lazy_free_2pow);
-               if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
-                   (uintptr_t)NULL, (uintptr_t)ptr)) {
-                       return;
-               }
-       }
-
-       arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm);
-}
-
-static void
-arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind, arena_chunk_map_t *mapelm)
-{
-       void **free_cache = arena->free_cache;
-       unsigned i, slot;
-
-       malloc_spin_lock(&arena->lock);
-       arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
-
-       /*
-        * Check whether another thread already cleared the cache.  It is
-        * possible that another thread cleared the cache *and* this slot was
-        * already refilled, which could result in a mostly fruitless cache
-        * sweep, but such a sequence of events causes no correctness issues.
-        */
-       if ((ptr = (void *)atomic_readandclear_ptr(
-           (uintptr_t *)&free_cache[slot]))
-           != NULL) {
-               unsigned lazy_free_mask;
-               
-               /*
-                * Clear the cache, since we failed to find a slot.  It is
-                * possible that other threads will continue to insert objects
-                * into the cache while this one sweeps, but that is okay,
-                * since on average the cache is still swept with the same
-                * frequency.
-                */
-
-               /* Handle pointer at current slot. */
-               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-               pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
-                   pagesize_2pow);
-               mapelm = &chunk->map[pageind];
-               arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
-
-               /* Sweep remainder of slots. */
-               lazy_free_mask = (1U << opt_lazy_free_2pow) - 1;
-               for (i = (slot + 1) & lazy_free_mask;
-                    i != slot;
-                    i = (i + 1) & lazy_free_mask) {
-                       ptr = (void *)atomic_readandclear_ptr(
-                           (uintptr_t *)&free_cache[i]);
-                       if (ptr != NULL) {
-                               chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-                               pageind = (((uintptr_t)ptr - (uintptr_t)chunk)
-                                   >> pagesize_2pow);
-                               mapelm = &chunk->map[pageind];
-                               arena_dalloc_small(arena, chunk, ptr, pageind,
-                                   *mapelm);
-                       }
-               }
-       }
-
-       malloc_spin_unlock(&arena->lock);
-}
-#endif
-
 static void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
        /* Large allocation. */
        malloc_spin_lock(&arena->lock);
 
-        chunk = 0;              /* this is just to quiet a compiler warning */
 #ifdef MALLOC_FILL
 #ifndef MALLOC_STATS
        if (opt_junk)
 #endif
 #endif
        {
-               extent_node_t *node, key;
-               size_t size;
+               size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+                   pagesize_2pow;
+               size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
-               key.addr = ptr;
-               node = RB_FIND(extent_tree_ad_s,
-                   &arena->runs_alloced_ad, &key);
-               assert(node != NULL);
-               size = node->size;
 #ifdef MALLOC_FILL
 #ifdef MALLOC_STATS
                if (opt_junk)
 #endif
                        memset(ptr, 0x5a, size);
 #endif
-/* #ifdef USE_STATS_MEMORY */
-/*         arena->mi.fordblks += size; */
-/* #endif */
 #ifdef MALLOC_STATS
                arena->stats.allocated_large -= size;
 #endif
@@ -4103,44 +4265,45 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t 
*chunk, void *ptr)
 }
 
 static inline void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc(void *ptr, size_t offset)
 {
+       arena_chunk_t *chunk;
+       arena_t *arena;
        size_t pageind;
        arena_chunk_map_t *mapelm;
 
+       assert(ptr != NULL);
+       assert(offset != 0);
+       assert(CHUNK_ADDR2OFFSET(ptr) == offset);
+
+       chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
+       arena = chunk->arena;
        assert(arena != NULL);
        assert(arena->magic == ARENA_MAGIC);
-       assert(chunk->arena == arena);
-       assert(ptr != NULL);
-       assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
-       pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+       pageind = offset >> pagesize_2pow;
        mapelm = &chunk->map[pageind];
-       if ((*mapelm & CHUNK_MAP_LARGE) == 0) {
+       assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+       if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
                /* Small allocation. */
-#ifdef MALLOC_LAZY_FREE
-               arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
-#else
                malloc_spin_lock(&arena->lock);
-               arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+               arena_dalloc_small(arena, chunk, ptr, mapelm);
                malloc_spin_unlock(&arena->lock);
-#endif
-       } else {
-               assert((*mapelm & CHUNK_MAP_POS_MASK) == 0);
+       } else
                arena_dalloc_large(arena, chunk, ptr);
-       }
+       VALGRIND_FREELIKE_BLOCK(ptr, 0);
 }
 
 static inline void
 idalloc(void *ptr)
 {
-       arena_chunk_t *chunk;
+       size_t offset;
 
        assert(ptr != NULL);
 
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr)
-               arena_dalloc(chunk->arena, chunk, ptr);
+       offset = CHUNK_ADDR2OFFSET(ptr);
+       if (offset != 0)
+               arena_dalloc(ptr, offset);
        else
                huge_dalloc(ptr);
 }
@@ -4149,7 +4312,6 @@ static void
 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
-       extent_node_t *node, key;
 
        assert(size < oldsize);
 
@@ -4157,16 +4319,13 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t 
*chunk, void *ptr,
         * Shrink the run, and make trailing pages available for other
         * allocations.
         */
-       key.addr = (void *)((uintptr_t)ptr);
 #ifdef MALLOC_BALANCE
        arena_lock_balance(arena);
 #else
        malloc_spin_lock(&arena->lock);
 #endif
-       node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-       assert(node != NULL);
-       arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
-           size, true);
+       arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
+           true);
 #ifdef MALLOC_STATS
        arena->stats.allocated_large -= oldsize - size;
 #endif
@@ -4177,44 +4336,34 @@ static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
-       extent_node_t *nodeC, key;
+       size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
+       size_t npages = oldsize >> pagesize_2pow;
+
+       assert(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
        /* Try to extend the run. */
        assert(size > oldsize);
-       key.addr = (void *)((uintptr_t)ptr + oldsize);
 #ifdef MALLOC_BALANCE
        arena_lock_balance(arena);
 #else
        malloc_spin_lock(&arena->lock);
 #endif
-       nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-       if (nodeC != NULL && oldsize + nodeC->size >= size) {
-               extent_node_t *nodeA, *nodeB;
-
+       if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
+           & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
+           ~pagesize_mask) >= size - oldsize) {
                /*
                 * The next run is available and sufficiently large.  Split the
                 * following run, then merge the first part with the existing
-                * allocation.  This results in a bit more tree manipulation
-                * than absolutely necessary, but it substantially simplifies
-                * the code.
+                * allocation.
                 */
-               arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
-                   oldsize, false, false);
-
-               key.addr = ptr;
-               nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
-                   &key);
-               assert(nodeA != NULL);
-
-               key.addr = (void *)((uintptr_t)ptr + oldsize);
-               nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
-                   &key);
-               assert(nodeB != NULL);
-
-               nodeA->size += nodeB->size;
+               arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
+                   ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
+                   false);
 
-               RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
-               arena_chunk_node_dealloc(chunk, nodeB);
+               chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
+                   CHUNK_MAP_ALLOCATED;
+               chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
+                   CHUNK_MAP_ALLOCATED;
 
 #ifdef MALLOC_STATS
                arena->stats.allocated_large += size - oldsize;
@@ -4345,10 +4494,28 @@ iralloc(void *ptr, size_t size)
 
        oldsize = isalloc(ptr);
 
+#ifndef MALLOC_VALGRIND
        if (size <= arena_maxclass)
                return (arena_ralloc(ptr, size, oldsize));
        else
                return (huge_ralloc(ptr, size, oldsize));
+#else
+       /*
+        * Valgrind does not provide a public interface for modifying an
+        * existing allocation, so use malloc/memcpy/free instead.
+        */
+       {
+               void *ret = imalloc(size);
+               if (ret != NULL) {
+                       if (oldsize < size)
+                           memcpy(ret, ptr, oldsize);
+                       else
+                           memcpy(ret, ptr, size);
+                       idalloc(ptr);
+               }
+               return (ret);
+       }
+#endif
 }
 
 static bool
@@ -4366,27 +4533,16 @@ arena_new(arena_t *arena)
 #endif
 
        /* Initialize chunks. */
-       RB_INIT(&arena->chunks);
+       arena_chunk_tree_dirty_new(&arena->chunks_dirty);
        arena->spare = NULL;
 
        arena->ndirty = 0;
 
-       RB_INIT(&arena->runs_avail_szad);
-       RB_INIT(&arena->runs_avail_ad);
-       RB_INIT(&arena->runs_alloced_ad);
+       arena_avail_tree_new(&arena->runs_avail);
 
 #ifdef MALLOC_BALANCE
        arena->contention = 0;
 #endif
-#ifdef MALLOC_LAZY_FREE
-       if (opt_lazy_free_2pow >= 0) {
-               arena->free_cache = (void **) base_calloc(1, sizeof(void *)
-                   * (1U << opt_lazy_free_2pow));
-               if (arena->free_cache == NULL)
-                       return (true);
-       } else
-               arena->free_cache = NULL;
-#endif
 
        /* Initialize bins. */
        prev_run_size = pagesize;
@@ -4395,7 +4551,7 @@ arena_new(arena_t *arena)
        for (i = 0; i < ntbins; i++) {
                bin = &arena->bins[i];
                bin->runcur = NULL;
-               RB_INIT(&bin->runs);
+               arena_run_tree_new(&bin->runs);
 
                bin->reg_size = (1U << (TINY_MIN_2POW + i));
 
@@ -4410,7 +4566,7 @@ arena_new(arena_t *arena)
        for (; i < ntbins + nqbins; i++) {
                bin = &arena->bins[i];
                bin->runcur = NULL;
-               RB_INIT(&bin->runs);
+               arena_run_tree_new(&bin->runs);
 
                bin->reg_size = quantum * (i - ntbins + 1);
 
@@ -4426,7 +4582,7 @@ arena_new(arena_t *arena)
        for (; i < ntbins + nqbins + nsbins; i++) {
                bin = &arena->bins[i];
                bin->runcur = NULL;
-               RB_INIT(&bin->runs);
+               arena_run_tree_new(&bin->runs);
 
                bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
 
@@ -4486,6 +4642,9 @@ huge_malloc(size_t size, bool zero)
 {
        void *ret;
        size_t csize;
+#ifdef MALLOC_DECOMMIT
+       size_t psize;
+#endif
        extent_node_t *node;
 
        /* Allocate one or more contiguous chunks for this request. */
@@ -4501,7 +4660,7 @@ huge_malloc(size_t size, bool zero)
        if (node == NULL)
                return (NULL);
 
-       ret = chunk_alloc(csize, zero);
+       ret = chunk_alloc(csize, zero, true);
        if (ret == NULL) {
                base_node_dealloc(node);
                return (NULL);
@@ -4509,22 +4668,50 @@ huge_malloc(size_t size, bool zero)
 
        /* Insert node into huge. */
        node->addr = ret;
+#ifdef MALLOC_DECOMMIT
+       psize = PAGE_CEILING(size);
+       node->size = psize;
+#else
        node->size = csize;
+#endif
 
        malloc_mutex_lock(&huge_mtx);
-       RB_INSERT(extent_tree_ad_s, &huge, node);
+       extent_tree_ad_insert(&huge, node);
 #ifdef MALLOC_STATS
        huge_nmalloc++;
+#  ifdef MALLOC_DECOMMIT
+       huge_allocated += psize;
+#  else
        huge_allocated += csize;
+#  endif
 #endif
        malloc_mutex_unlock(&huge_mtx);
 
+#ifdef MALLOC_DECOMMIT
+       if (csize - psize > 0)
+               pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
+#endif
+
+#ifdef MALLOC_DECOMMIT
+       VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, zero);
+#else
+       VALGRIND_MALLOCLIKE_BLOCK(ret, csize, 0, zero);
+#endif
+
 #ifdef MALLOC_FILL
        if (zero == false) {
                if (opt_junk)
+#  ifdef MALLOC_DECOMMIT
+                       memset(ret, 0xa5, psize);
+#  else
                        memset(ret, 0xa5, csize);
+#  endif
                else if (opt_zero)
+#  ifdef MALLOC_DECOMMIT
+                       memset(ret, 0, psize);
+#  else
                        memset(ret, 0, csize);
+#  endif
        }
 #endif
 
@@ -4537,7 +4724,11 @@ huge_palloc(size_t alignment, size_t size)
 {
        void *ret;
        size_t alloc_size, chunk_size, offset;
+#ifdef MALLOC_DECOMMIT
+       size_t psize;
+#endif
        extent_node_t *node;
+       int pfd;
 
        /*
         * This allocation requires alignment that is even larger than chunk
@@ -4561,55 +4752,101 @@ huge_palloc(size_t alignment, size_t size)
        if (node == NULL)
                return (NULL);
 
-       ret = chunk_alloc(alloc_size, false);
-       if (ret == NULL) {
-               base_node_dealloc(node);
-               return (NULL);
-       }
-
-       offset = (uintptr_t)ret & (alignment - 1);
-       assert((offset & chunksize_mask) == 0);
-       assert(offset < alloc_size);
-       if (offset == 0) {
-               /* Trim trailing space. */
-               chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
-                   - chunk_size);
-       } else {
-               size_t trailsize;
-
-               /* Trim leading space. */
-               chunk_dealloc(ret, alignment - offset);
-
-               ret = (void *)((uintptr_t)ret + (alignment - offset));
+       /*
+        * Windows requires that there be a 1:1 mapping between VM
+        * allocation/deallocation operations.  Therefore, take care here to
+        * acquire the final result via one mapping operation.
+        *
+        * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
+        * since it reduces the number of page files.
+        */
+#ifdef MALLOC_PAGEFILE
+       if (opt_pagefile) {
+               pfd = pagefile_init(size);
+               if (pfd == -1)
+                       return (NULL);
+       } else
+#endif
+               pfd = -1;
+#ifdef JEMALLOC_USES_MAP_ALIGN
+               ret = pages_map_align(chunk_size, pfd, alignment);
+#else
+       do {
+               void *over;
 
-               trailsize = alloc_size - (alignment - offset) - chunk_size;
-               if (trailsize != 0) {
-                   /* Trim trailing space. */
-                   assert(trailsize < alloc_size);
-                   chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
-                       trailsize);
+               over = chunk_alloc(alloc_size, false, false);
+               if (over == NULL) {
+                       base_node_dealloc(node);
+                       ret = NULL;
+                       goto RETURN;
                }
-       }
 
+               offset = (uintptr_t)over & (alignment - 1);
+               assert((offset & chunksize_mask) == 0);
+               assert(offset < alloc_size);
+               ret = (void *)((uintptr_t)over + offset);
+               chunk_dealloc(over, alloc_size);
+               ret = pages_map(ret, chunk_size, pfd);
+               /*
+                * Failure here indicates a race with another thread, so try
+                * again.
+                */
+       } while (ret == NULL);
+#endif
        /* Insert node into huge. */
        node->addr = ret;
+#ifdef MALLOC_DECOMMIT
+       psize = PAGE_CEILING(size);
+       node->size = psize;
+#else
        node->size = chunk_size;
+#endif
 
        malloc_mutex_lock(&huge_mtx);
-       RB_INSERT(extent_tree_ad_s, &huge, node);
+       extent_tree_ad_insert(&huge, node);
 #ifdef MALLOC_STATS
        huge_nmalloc++;
+#  ifdef MALLOC_DECOMMIT
+       huge_allocated += psize;
+#  else
        huge_allocated += chunk_size;
+#  endif
 #endif
        malloc_mutex_unlock(&huge_mtx);
 
+#ifdef MALLOC_DECOMMIT
+       if (chunk_size - psize > 0) {
+               pages_decommit((void *)((uintptr_t)ret + psize),
+                   chunk_size - psize);
+       }
+#endif
+
+#ifdef MALLOC_DECOMMIT
+       VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, false);
+#else
+       VALGRIND_MALLOCLIKE_BLOCK(ret, chunk_size, 0, false);
+#endif
+
 #ifdef MALLOC_FILL
        if (opt_junk)
+#  ifdef MALLOC_DECOMMIT
+               memset(ret, 0xa5, psize);
+#  else
                memset(ret, 0xa5, chunk_size);
+#  endif
        else if (opt_zero)
+#  ifdef MALLOC_DECOMMIT
+               memset(ret, 0, psize);
+#  else
                memset(ret, 0, chunk_size);
+#  endif
 #endif
 
+RETURN:
+#ifdef MALLOC_PAGEFILE
+       if (pfd != -1)
+               pagefile_close(pfd);
+#endif
        return (ret);
 }
 
@@ -4620,13 +4857,57 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
        size_t copysize;
 
        /* Avoid moving the allocation if the size class would not change. */
+
        if (oldsize > arena_maxclass &&
            CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
+#ifdef MALLOC_DECOMMIT
+               size_t psize = PAGE_CEILING(size);
+#endif
 #ifdef MALLOC_FILL
                if (opt_junk && size < oldsize) {
                        memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
                            - size);
-               } else if (opt_zero && size > oldsize) {
+               }
+#endif
+#ifdef MALLOC_DECOMMIT
+               if (psize < oldsize) {
+                       extent_node_t *node, key;
+
+                       pages_decommit((void *)((uintptr_t)ptr + psize),
+                           oldsize - psize);
+
+                       /* Update recorded size. */
+                       malloc_mutex_lock(&huge_mtx);
+                       key.addr = __DECONST(void *, ptr);
+                       node = extent_tree_ad_search(&huge, &key);
+                       assert(node != NULL);
+                       assert(node->size == oldsize);
+#  ifdef MALLOC_STATS
+                       huge_allocated -= oldsize - psize;
+#  endif
+                       node->size = psize;
+                       malloc_mutex_unlock(&huge_mtx);
+               } else if (psize > oldsize) {
+                       extent_node_t *node, key;
+
+                       pages_commit((void *)((uintptr_t)ptr + oldsize),
+                           psize - oldsize);
+
+                       /* Update recorded size. */
+                       malloc_mutex_lock(&huge_mtx);
+                       key.addr = __DECONST(void *, ptr);
+                       node = extent_tree_ad_search(&huge, &key);
+                       assert(node != NULL);
+                       assert(node->size == oldsize);
+#  ifdef MALLOC_STATS
+                       huge_allocated += psize - oldsize;
+#  endif
+                       node->size = psize;
+                       malloc_mutex_unlock(&huge_mtx);
+               }
+#endif
+#ifdef MALLOC_FILL
+               if (opt_zero && size > oldsize) {
                        memset((void *)((uintptr_t)ptr + oldsize), 0, size
                            - oldsize);
                }
@@ -4663,10 +4944,10 @@ huge_dalloc(void *ptr)
 
        /* Extract from tree of huge allocations. */
        key.addr = ptr;
-       node = RB_FIND(extent_tree_ad_s, &huge, &key);
+       node = extent_tree_ad_search(&huge, &key);
        assert(node != NULL);
        assert(node->addr == ptr);
-       RB_REMOVE(extent_tree_ad_s, &huge, node);
+       extent_tree_ad_remove(&huge, node);
 
 #ifdef MALLOC_STATS
        huge_ndalloc++;
@@ -4676,18 +4957,22 @@ huge_dalloc(void *ptr)
        malloc_mutex_unlock(&huge_mtx);
 
        /* Unmap chunk. */
-#ifdef MALLOC_DSS
 #ifdef MALLOC_FILL
-       if (opt_dss && opt_junk)
+       if (opt_junk)
                memset(node->addr, 0x5a, node->size);
 #endif
-#endif
+#ifdef MALLOC_DECOMMIT
+       chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
+#else
        chunk_dealloc(node->addr, node->size);
+#endif
+       VALGRIND_FREELIKE_BLOCK(node->addr, 0);
 
        base_node_dealloc(node);
 }
 
-#ifdef BSD
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+#ifdef MOZ_MEMORY_BSD
 static inline unsigned
 malloc_ncpus(void)
 {
@@ -4705,7 +4990,7 @@ malloc_ncpus(void)
 
        return (ret);
 }
-#elif (defined(LINUX_HOST))
+#elif (defined(MOZ_MEMORY_LINUX))
 #include <fcntl.h>
 
 static inline unsigned
@@ -4713,8 +4998,9 @@ malloc_ncpus(void)
 {
        unsigned ret;
        int fd, nread, column;
-       char buf[1];
+       char buf[1024];
        static const char matchstr[] = "processor\t:";
+       int i;
 
        /*
         * sysconf(3) would be the preferred method for determining the number
@@ -4734,27 +5020,30 @@ malloc_ncpus(void)
                nread = read(fd, &buf, sizeof(buf));
                if (nread <= 0)
                        break; /* EOF or error. */
-
-               if (buf[0] == '\n')
-                       column = 0;
-               else if (column != -1) {
-                       if (buf[0] == matchstr[column]) {
-                               column++;
-                               if (column == sizeof(matchstr) - 1) {
+               for (i = 0;i < nread;i++) {
+                       char c = buf[i];
+                       if (c == '\n')
+                               column = 0;
+                       else if (column != -1) {
+                               if (c == matchstr[column]) {
+                                       column++;
+                                       if (column == sizeof(matchstr) - 1) {
+                                               column = -1;
+                                               ret++;
+                                       }
+                               } else
                                        column = -1;
-                                       ret++;
-                               }
-                       } else
-                               column = -1;
+                       }
                }
        }
+
        if (ret == 0)
                ret = 1; /* Something went wrong in the parser. */
        close(fd);
 
        return (ret);
 }
-#elif (defined(DARWIN))
+#elif (defined(MOZ_MEMORY_DARWIN))
 #include <mach/mach_init.h>
 #include <mach/mach_host.h>
 
@@ -4766,61 +5055,19 @@ malloc_ncpus(void)
        processor_info_array_t pinfo;
        mach_msg_type_number_t pinfocnt;
 
-       error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
-                                   &n, &pinfo, &pinfocnt);
-       if (error != KERN_SUCCESS)
-               return (1); /* Error. */
-       else
-               return (n);
-}
-#elif (defined(HAVE_KSTAT))
-#include <kstat.h>
-
-static inline unsigned
-malloc_ncpus(void)
-{
-       unsigned ret;
-       kstat_ctl_t *ctl;
-       kstat_t *kstat;
-       kstat_named_t *named;
-       unsigned i;
-
-       if ((ctl = kstat_open()) == NULL)
-               return (1); /* Error. */
-
-       if ((kstat = kstat_lookup(ctl, "unix", -1, "system_misc")) == NULL)
-               return (1); /* Error. */
-
-       if (kstat_read(ctl, kstat, NULL) == -1)
-               return (1); /* Error. */
-
-       named = KSTAT_NAMED_PTR(kstat);
-
-       for (i = 0; i < kstat->ks_ndata; i++) {
-               if (strcmp(named[i].name, "ncpus") == 0) {
-                       /* Figure out which one of these to actually use. */
-                       switch(named[i].data_type) {
-                       case KSTAT_DATA_INT32:
-                               ret = named[i].value.i32;
-                               break;
-                       case KSTAT_DATA_UINT32:
-                               ret = named[i].value.ui32;
-                               break;
-                       case KSTAT_DATA_INT64:
-                               ret = named[i].value.i64;
-                               break;
-                       case KSTAT_DATA_UINT64:
-                               ret = named[i].value.ui64;
-                               break;
-                       default:
-                               return (1); /* Error. */
-                       }
-               }
-       }
-
-       kstat_close(ctl); /* Don't bother checking for an error. */
+       error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
+                                   &n, &pinfo, &pinfocnt);
+       if (error != KERN_SUCCESS)
+               return (1); /* Error. */
+       else
+               return (n);
+}
+#elif (defined(MOZ_MEMORY_SOLARIS))
 
-       return (ret);
+static inline unsigned
+malloc_ncpus(void)
+{
+       return sysconf(_SC_NPROCESSORS_ONLN);
 }
 #else
 static inline unsigned
@@ -4834,6 +5081,7 @@ malloc_ncpus(void)
        return (1);
 }
 #endif
+#endif
 
 static void
 malloc_print_stats(void)
@@ -4852,14 +5100,11 @@ malloc_print_stats(void)
                    "\n", "");
                _malloc_message("Boolean MALLOC_OPTIONS: ",
                    opt_abort ? "A" : "a", "", "");
-#ifdef MALLOC_DSS
-               _malloc_message(opt_dss ? "D" : "d", "", "", "");
-#endif
 #ifdef MALLOC_FILL
                _malloc_message(opt_junk ? "J" : "j", "", "", "");
 #endif
-#ifdef MALLOC_DSS
-               _malloc_message(opt_mmap ? "M" : "m", "", "", "");
+#ifdef MALLOC_PAGEFILE
+               _malloc_message(opt_pagefile ? "o" : "O", "", "", "");
 #endif
                _malloc_message("P", "", "", "");
 #ifdef MALLOC_UTRACE
@@ -4876,15 +5121,10 @@ malloc_print_stats(void)
 #endif
                _malloc_message("\n", "", "", "");
 
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
                _malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
-               _malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
-#ifdef MALLOC_LAZY_FREE
-               if (opt_lazy_free_2pow >= 0) {
-                       _malloc_message("Lazy free slots: ",
-                           umax2s(1U << opt_lazy_free_2pow, s), "\n", "");
-               } else
-                       _malloc_message("Lazy free slots: 0\n", "", "", "");
 #endif
+               _malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
 #ifdef MALLOC_BALANCE
                _malloc_message("Arena balance threshold: ",
                    umax2s(opt_balance_threshold, s), "\n", "");
@@ -4936,7 +5176,7 @@ malloc_print_stats(void)
                        mapped += base_mapped;
                        malloc_mutex_unlock(&base_mtx);
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
                        malloc_printf("Allocated: %lu, mapped: %lu\n",
                            allocated, mapped);
 #else
@@ -4968,7 +5208,7 @@ malloc_print_stats(void)
                        /* Print chunk stats. */
                        malloc_printf(
                            "huge: nmalloc      ndalloc    allocated\n");
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
                        malloc_printf(" %12llu %12llu %12lu\n",
                            huge_nmalloc, huge_ndalloc, huge_allocated);
 #else
@@ -4997,12 +5237,13 @@ malloc_print_stats(void)
  * implementation has to take pains to avoid infinite recursion during
  * initialization.
  */
-#if (defined(WIN32) || defined(DARWIN))
+#if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN)) && 
!defined(MOZ_MEMORY_WINCE)
 #define        malloc_init() false
 #else
 static inline bool
 malloc_init(void)
 {
+
        if (malloc_initialized == false)
                return (malloc_init_hard());
 
@@ -5010,7 +5251,7 @@ malloc_init(void)
 }
 #endif
 
-#ifndef WIN32
+#if !defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_WINCE) 
 static
 #endif
 bool
@@ -5020,11 +5261,11 @@ malloc_init_hard(void)
        char buf[PATH_MAX + 1];
        const char *opts;
        long result;
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
        int linklen;
 #endif
 
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
        malloc_mutex_lock(&init_lock);
 #endif
 
@@ -5033,19 +5274,19 @@ malloc_init_hard(void)
                 * Another thread initialized the allocator before this one
                 * acquired init_lock.
                 */
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
                malloc_mutex_unlock(&init_lock);
 #endif
                return (false);
        }
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
        /* get a thread local storage index */
        tlsIndex = TlsAlloc();
 #endif
 
        /* Get page size and number of CPUs */
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
        {
                SYSTEM_INFO info;
 
@@ -5054,10 +5295,14 @@ malloc_init_hard(void)
 
                pagesize = (unsigned) result;
 
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
                ncpus = info.dwNumberOfProcessors;
+#endif
        }
 #else
+#ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
        ncpus = malloc_ncpus();
+#endif
 
        result = sysconf(_SC_PAGESIZE);
        assert(result != -1);
@@ -5073,9 +5318,35 @@ malloc_init_hard(void)
        pagesize_mask = result - 1;
        pagesize_2pow = ffs((int)result) - 1;
 
-#ifdef MALLOC_LAZY_FREE
-               if (ncpus == 1)
-                       opt_lazy_free_2pow = -1;
+#ifdef MALLOC_PAGEFILE
+       /*
+        * Determine where to create page files.  It is insufficient to
+        * unconditionally use P_tmpdir (typically "/tmp"), since for some
+        * operating systems /tmp is a separate filesystem that is rather small.
+        * Therefore prefer, in order, the following locations:
+        *
+        * 1) MALLOC_TMPDIR
+        * 2) TMPDIR
+        * 3) P_tmpdir
+        */
+       {
+               char *s;
+               size_t slen;
+               static const char suffix[] = "/jemalloc.XXXXXX";
+
+               if ((s = getenv("MALLOC_TMPDIR")) == NULL && (s =
+                   getenv("TMPDIR")) == NULL)
+                       s = P_tmpdir;
+               slen = strlen(s);
+               if (slen + sizeof(suffix) > sizeof(pagefile_templ)) {
+                       _malloc_message(_getprogname(),
+                           ": (malloc) Page file path too long\n",
+                           "", "");
+                       abort();
+               }
+               memcpy(pagefile_templ, s, slen);
+               memcpy(&pagefile_templ[slen], suffix, sizeof(suffix));
+       }
 #endif
 
        for (i = 0; i < 3; i++) {
@@ -5084,7 +5355,7 @@ malloc_init_hard(void)
                /* Get runtime configuration. */
                switch (i) {
                case 0:
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
                        if ((linklen = readlink("/etc/malloc.conf", buf,
                                                sizeof(buf) - 1)) != -1) {
                                /*
@@ -5102,18 +5373,18 @@ malloc_init_hard(void)
                        }
                        break;
                case 1:
-/*                     if (issetugid() == 0 && (opts = */
-/*                         getenv("MALLOC_OPTIONS")) != NULL) { */
-/*                             /\* */
-/*                              * Do nothing; opts is already initialized to */
-/*                              * the value of the MALLOC_OPTIONS environment 
*/
-/*                              * variable. */
-/*                              *\/ */
-/*                     } else { */
+                       if (issetugid() == 0 && (opts =
+                           getenv("MALLOC_OPTIONS")) != NULL) {
+                               /*
+                                * Do nothing; opts is already initialized to
+                                * the value of the MALLOC_OPTIONS environment
+                                * variable.
+                                */
+                       } else {
                                /* No configuration specified. */
                                buf[0] = '\0';
                                opts = buf;
-/*                     } */
+                       }
                        break;
                case 2:
                        if (_malloc_options != NULL) {
@@ -5178,16 +5449,6 @@ MALLOC_OUT:
                                                opt_balance_threshold <<= 1;
 #endif
                                        break;
-                               case 'd':
-#ifdef MALLOC_DSS
-                                       opt_dss = false;
-#endif
-                                       break;
-                               case 'D':
-#ifdef MALLOC_DSS
-                                       opt_dss = true;
-#endif
-                                       break;
                                case 'f':
                                        opt_dirty_max >>= 1;
                                        break;
@@ -5219,34 +5480,22 @@ MALLOC_OUT:
                                            (sizeof(size_t) << 3))
                                                opt_chunk_2pow++;
                                        break;
-                               case 'l':
-#ifdef MALLOC_LAZY_FREE
-                                       if (opt_lazy_free_2pow >= 0)
-                                               opt_lazy_free_2pow--;
-#endif
-                                       break;
-                               case 'L':
-#ifdef MALLOC_LAZY_FREE
-                                       if (ncpus > 1)
-                                               opt_lazy_free_2pow++;
-#endif
-                                       break;
-                               case 'm':
-#ifdef MALLOC_DSS
-                                       opt_mmap = false;
-#endif
-                                       break;
-                               case 'M':
-#ifdef MALLOC_DSS
-                                       opt_mmap = true;
-#endif
-                                       break;
                                case 'n':
                                        opt_narenas_lshift--;
                                        break;
                                case 'N':
                                        opt_narenas_lshift++;
                                        break;
+#ifdef MALLOC_PAGEFILE
+                               case 'o':
+                                       /* Do not over-commit. */
+                                       opt_pagefile = true;
+                                       break;
+                               case 'O':
+                                       /* Allow over-commit. */
+                                       opt_pagefile = false;
+                                       break;
+#endif
                                case 'p':
                                        opt_print_stats = false;
                                        break;
@@ -5319,20 +5568,19 @@ MALLOC_OUT:
                }
        }
 
-#ifdef MALLOC_DSS
-       /* Make sure that there is some method for acquiring memory. */
-       if (opt_dss == false && opt_mmap == false)
-               opt_mmap = true;
-#endif
-
        /* Take care to call atexit() only once. */
        if (opt_print_stats) {
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
                /* Print statistics at exit. */
                atexit(malloc_print_stats);
 #endif
        }
 
+#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN) && 
!defined(MOZ_MEMORY_ANDROID))
+       /* Prevent potential deadlock on malloc locks after fork. */
+       pthread_atfork(_malloc_prefork, _malloc_postfork, _malloc_postfork);
+#endif
+
        /* Set variables according to the value of opt_small_max_2pow. */
        if (opt_small_max_2pow < opt_quantum_2pow)
                opt_small_max_2pow = opt_quantum_2pow;
@@ -5370,20 +5618,20 @@ MALLOC_OUT:
                 * than we will need.
                 */
                header_size = sizeof(arena_chunk_t) +
-                   (sizeof(arena_chunk_map_t) * (chunk_npages - 1)) +
-                   (sizeof(extent_node_t) * chunk_npages);
+                   (sizeof(arena_chunk_map_t) * (chunk_npages - 1));
                arena_chunk_header_npages = (header_size >> pagesize_2pow) +
                    ((header_size & pagesize_mask) != 0);
        }
        arena_maxclass = chunksize - (arena_chunk_header_npages <<
            pagesize_2pow);
-#ifdef MALLOC_LAZY_FREE
+
+#ifdef JEMALLOC_USES_MAP_ALIGN
        /*
-        * Make sure that allocating the free_cache does not exceed the limits
-        * of what base_alloc() can handle.
+        * When using MAP_ALIGN, the alignment parameter must be a power of two
+        * multiple of the system pagesize, or mmap will fail.
         */
-       while ((sizeof(void *) << opt_lazy_free_2pow) > chunksize)
-               opt_lazy_free_2pow--;
+       assert((chunksize % pagesize) == 0);
+       assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
 #endif
 
        UTRACE(0, 0, 0);
@@ -5400,15 +5648,7 @@ MALLOC_OUT:
 
        /* Initialize chunks data. */
        malloc_mutex_init(&huge_mtx);
-       RB_INIT(&huge);
-#ifdef MALLOC_DSS
-       malloc_mutex_init(&dss_mtx);
-       dss_base = sbrk(0);
-       dss_prev = dss_base;
-       dss_max = dss_base;
-       RB_INIT(&dss_chunks_szad);
-       RB_INIT(&dss_chunks_ad);
-#endif
+       extent_tree_ad_new(&huge);
 #ifdef MALLOC_STATS
        huge_nmalloc = 0;
        huge_ndalloc = 0;
@@ -5418,19 +5658,16 @@ MALLOC_OUT:
        /* Initialize base allocation data structures. */
 #ifdef MALLOC_STATS
        base_mapped = 0;
-#endif
-#ifdef MALLOC_DSS
-       /*
-        * Allocate a base chunk here, since it doesn't actually have to be
-        * chunk-aligned.  Doing this before allocating any other chunks allows
-        * the use of space that would otherwise be wasted.
-        */
-       if (opt_dss)
-               base_pages_alloc(0);
+#  ifdef MALLOC_DECOMMIT
+       base_committed = 0;
+#  endif
 #endif
        base_nodes = NULL;
        malloc_mutex_init(&base_mtx);
 
+#ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
+       narenas = 1;
+#else
        if (ncpus > 1) {
                /*
                 * For SMP systems, create four times as many arenas as there
@@ -5441,6 +5678,7 @@ MALLOC_OUT:
 
        /* Determine how many arenas to use. */
        narenas = ncpus;
+#endif
        if (opt_narenas_lshift > 0) {
                if ((narenas << opt_narenas_lshift) > narenas)
                        narenas <<= opt_narenas_lshift;
@@ -5500,7 +5738,7 @@ MALLOC_OUT:
        /* Allocate and initialize arenas. */
        arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
        if (arenas == NULL) {
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
                malloc_mutex_unlock(&init_lock);
 #endif
                return (true);
@@ -5517,7 +5755,7 @@ MALLOC_OUT:
         */
        arenas_extend(0);
        if (arenas[0] == NULL) {
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
                malloc_mutex_unlock(&init_lock);
 #endif
                return (true);
@@ -5528,7 +5766,7 @@ MALLOC_OUT:
         * spurious creation of an extra arena if the application switches to
         * threaded mode.
         */
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
        TlsSetValue(tlsIndex, arenas[0]);
 #else
        arenas_map = arenas[0];
@@ -5537,26 +5775,29 @@ MALLOC_OUT:
 
        /*
         * Seed here for the initial thread, since choose_arena_hard() is only
-        * called for other threads.  The seed values don't really matter.
+        * called for other threads.  The seed value doesn't really matter.
         */
-#ifdef MALLOC_LAZY_FREE
-       SPRN(lazy_free, 42);
-#endif
 #ifdef MALLOC_BALANCE
        SPRN(balance, 42);
 #endif
 
        malloc_spin_init(&arenas_lock);
 
+#ifdef MALLOC_VALIDATE
+       chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
+       if (chunk_rtree == NULL)
+               return (true);
+#endif
+
        malloc_initialized = true;
-#ifndef WIN32
+#ifndef MOZ_MEMORY_WINDOWS
        malloc_mutex_unlock(&init_lock);
 #endif
        return (false);
 }
 
 /* XXX Why not just expose malloc_print_stats()? */
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
 void
 malloc_shutdown()
 {
@@ -5573,14 +5814,50 @@ malloc_shutdown()
  * Begin malloc(3)-compatible functions.
  */
 
-DSOEXPORT
-#ifdef DARWIN
-inline void *
-moz_malloc(size_t size)
+/*
+ * Inline the standard malloc functions if they are being subsumed by Darwin's
+ * zone infrastructure.
+ */
+#ifdef MOZ_MEMORY_DARWIN
+#  define ZONE_INLINE  inline
 #else
+#  define ZONE_INLINE
+#endif
+
+/* Mangle standard interfaces on Darwin and Android, 
+   in order to avoid linking problems. */
+#if defined(MOZ_MEMORY_DARWIN)
+#define        malloc(a)       moz_malloc(a)
+#define        valloc(a)       moz_valloc(a)
+#define        calloc(a, b)    moz_calloc(a, b)
+#define        realloc(a, b)   moz_realloc(a, b)
+#define        free(a)         moz_free(a)
+#endif
+
+#if defined(MOZ_MEMORY_ANDROID) || defined(WRAP_MALLOC)
+inline void sys_free(void* ptr) {return free(ptr);}
+#define        malloc(a)       je_malloc(a)
+#define        valloc(a)       je_valloc(a)
+#define        calloc(a, b)    je_calloc(a, b)
+#define        realloc(a, b)   je_realloc(a, b)
+#define        free(a)         je_free(a)
+#define posix_memalign(a, b, c)  je_posix_memalign(a, b, c)
+
+char    *je_strndup(const char *src, size_t len) {
+  char* dst = (char*)je_malloc(len + 1);
+  if(dst)
+    strncpy(dst, src, len + 1);
+  return dst;
+}
+char    *je_strdup(const char *src) {
+  size_t len = strlen(src);
+  return je_strndup(src, len );
+}
+#endif
+
+ZONE_INLINE
 void *
 malloc(size_t size)
-#endif
 {
        void *ret;
 
@@ -5621,106 +5898,104 @@ RETURN:
        return (ret);
 }
 
-DSOEXPORT
-#ifdef DARWIN
-inline int
-moz_posix_memalign(void **memptr, size_t alignment, size_t size)
+#ifdef MOZ_MEMORY_SOLARIS
+#  ifdef __SUNPRO_C
+void *
+memalign(size_t alignment, size_t size);
+#pragma no_inline(memalign)
+#  elif (defined(__GNU_C__))
+__attribute__((noinline))
+#  endif
 #else
-int
-posix_memalign(void **memptr, size_t alignment, size_t size)
+inline
 #endif
+void *
+memalign(size_t alignment, size_t size)
 {
-       int ret;
-       void *result;
+       void *ret;
 
-       if (malloc_init())
-               result = NULL;
-       else {
-               /* Make sure that alignment is a large enough power of 2. */
-               if (((alignment - 1) & alignment) != 0
-                   || alignment < sizeof(void *)) {
-#ifdef MALLOC_XMALLOC
-                       if (opt_xmalloc) {
-                               _malloc_message(_getprogname(),
-                                   ": (malloc) Error in posix_memalign(): "
-                                   "invalid alignment\n", "", "");
-                               abort();
-                       }
+       assert(((alignment - 1) & alignment) == 0);
+
+       if (malloc_init()) {
+               ret = NULL;
+               goto RETURN;
+       }
+
+       if (size == 0) {
+#ifdef MALLOC_SYSV
+               if (opt_sysv == false)
 #endif
-                       result = NULL;
-                       ret = EINVAL;
+                       size = 1;
+#ifdef MALLOC_SYSV
+               else {
+                       ret = NULL;
                        goto RETURN;
                }
+#endif
+       }
+
+       alignment = alignment < sizeof(void*) ? sizeof(void*) : alignment;
+       ret = ipalloc(alignment, size);
 
-               result = ipalloc(alignment, size);
+RETURN:
+#ifdef MALLOC_XMALLOC
+       if (opt_xmalloc && ret == NULL) {
+               _malloc_message(_getprogname(),
+               ": (malloc) Error in memalign(): out of memory\n", "", "");
+               abort();
        }
+#endif
+       UTRACE(0, size, ret);
+       return (ret);
+}
+
+ZONE_INLINE
+int
+posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+       void *result;
 
-       if (result == NULL) {
+       /* Make sure that alignment is a large enough power of 2. */
+       if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) {
 #ifdef MALLOC_XMALLOC
                if (opt_xmalloc) {
                        _malloc_message(_getprogname(),
-                       ": (malloc) Error in posix_memalign(): out of memory\n",
-                       "", "");
+                           ": (malloc) Error in posix_memalign(): "
+                           "invalid alignment\n", "", "");
                        abort();
                }
 #endif
-               ret = ENOMEM;
-               goto RETURN;
+               return (EINVAL);
        }
 
-       *memptr = result;
-       ret = 0;
-
-RETURN:
-       UTRACE(0, size, result);
-       return (ret);
-}
-
-DSOEXPORT
-#ifdef DARWIN
-inline void *
-moz_memalign(size_t alignment, size_t size)
-#else
-void *
-memalign(size_t alignment, size_t size)
-#endif
-{
-       void *ret;
+       /* The 0-->1 size promotion is done in the memalign() call below */
 
-#ifdef DARWIN
-       if (moz_posix_memalign(&ret, alignment, size) != 0)
+#ifdef MOZ_MEMORY_DARWIN
+       result = moz_memalign(alignment, size);
 #else
-       if (posix_memalign(&ret, alignment, size) != 0)
+       result = memalign(alignment, size);
 #endif
-               return (NULL);
+       if (result == NULL)
+               return (ENOMEM);
 
-       return ret;
+       *memptr = result;
+       return (0);
 }
 
-DSOEXPORT
-#ifdef DARWIN
-inline void *
-moz_valloc(size_t size)
-#else
+ZONE_INLINE
 void *
 valloc(size_t size)
-#endif
 {
-#ifdef DARWIN
+#ifdef MOZ_MEMORY_DARWIN
        return (moz_memalign(pagesize, size));
 #else
        return (memalign(pagesize, size));
 #endif
 }
 
-DSOEXPORT
-#ifdef DARWIN
-inline void *
-moz_calloc(size_t num, size_t size)
-#else
+ZONE_INLINE
 void *
 calloc(size_t num, size_t size)
-#endif
 {
        void *ret;
        size_t num_size;
@@ -5774,14 +6049,9 @@ RETURN:
        return (ret);
 }
 
-DSOEXPORT
-#ifdef DARWIN
-inline void *
-moz_realloc(void *ptr, size_t size)
-#else
+ZONE_INLINE
 void *
 realloc(void *ptr, size_t size)
-#endif
 {
        void *ret;
 
@@ -5842,72 +6112,25 @@ RETURN:
        return (ret);
 }
 
-DSOEXPORT
-#ifdef DARWIN
-inline void
-moz_free(void *ptr)
-#else
+ZONE_INLINE
 void
 free(void *ptr)
-#endif
 {
-
+       size_t offset;
+       
        UTRACE(ptr, 0, 0);
-       if (ptr != NULL) {
-               assert(malloc_initialized);
-
-               idalloc(ptr);
-       }
-}
-
-/*
- * This is a work in progress, which doesn't even get used.
- */
-#ifdef USE_STATS_MEMORY
-/* Utility to update mallinfo for malloc_stats() and mallinfo() */
-
-static void
-malloc_update_mallinfo(arena_t *arena, struct mallinfo *mi)
-{
-  int i, navail;
-  size_t avail;
-  avail = chunksize * narenas;
-
-#if 0
-  malloc_printf("allocated:   %u %u %u\n",
-                arena->stats.allocated_small + arena->stats.allocated_large,
-                arena->stats.nmalloc_small + arena->stats.nmalloc_large,
-                arena->stats.ndalloc_small + arena->stats.ndalloc_large);
-  
-  malloc_printf("available:   %u\n", avail);
-#endif
-  
-/*   stats_print(arena); */
-  malloc_spin_lock(&arena->lock);
-
-  /* clear unused fields */
-  mi->arena = mi->ordblks = mi->smblks = mi->usmblks = mi->fsmblks = 0;
-  mi->uordblks = arena->stats.allocated_small + arena->stats.allocated_large;
-  mi->fordblks = arena->stats.mapped - mi->uordblks;
-  
-  mi->hblks = mi->hblkhd = 0;
-  mi->keepcost = arena->stats.mapped;
-  malloc_spin_unlock(&arena->lock);
-}
 
-#ifndef DARWIN
-DSOEXPORT
-struct mallinfo
-mallinfo()
-{
-    struct mallinfo mi;
-
-    /* Calculate and print allocated/mapped stats. */
-    malloc_update_mallinfo(choose_arena(), &mi);
-    return mi;
+       /*
+        * A version of idalloc that checks for NULL pointer but only for
+        * huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
+        */
+       assert(CHUNK_ADDR2OFFSET(NULL) == 0);
+       offset = CHUNK_ADDR2OFFSET(ptr);
+       if (offset != 0)
+               arena_dalloc(ptr, offset);
+       else if (ptr != NULL)
+               huge_dalloc(ptr);
 }
-# endif
-#endif
 
 /*
  * End malloc(3)-compatible functions.
@@ -5916,23 +6139,126 @@ mallinfo()
 /*
  * Begin non-standard functions.
  */
-
-DSOEXPORT
-#ifdef DARWIN
-inline size_t
-moz_malloc_usable_size(const void *ptr)
+#ifdef MOZ_MEMORY_ANDROID
+size_t
+malloc_usable_size(void *ptr)
 #else
 size_t
 malloc_usable_size(const void *ptr)
 #endif
 {
 
+#ifdef MALLOC_VALIDATE
+       return (isalloc_validate(ptr));
+#else
        assert(ptr != NULL);
 
        return (isalloc(ptr));
+#endif
+}
+
+void
+jemalloc_stats(jemalloc_stats_t *stats)
+{
+       size_t i;
+
+       assert(stats != NULL);
+
+       /*
+        * Gather runtime settings.
+        */
+       stats->opt_abort = opt_abort;
+       stats->opt_junk =
+#ifdef MALLOC_FILL
+           opt_junk ? true :
+#endif
+           false;
+       stats->opt_utrace =
+#ifdef MALLOC_UTRACE
+           opt_utrace ? true :
+#endif
+           false;
+       stats->opt_sysv =
+#ifdef MALLOC_SYSV
+           opt_sysv ? true :
+#endif
+           false;
+       stats->opt_xmalloc =
+#ifdef MALLOC_XMALLOC
+           opt_xmalloc ? true :
+#endif
+           false;
+       stats->opt_zero =
+#ifdef MALLOC_FILL
+           opt_zero ? true :
+#endif
+           false;
+       stats->narenas = narenas;
+       stats->balance_threshold =
+#ifdef MALLOC_BALANCE
+           opt_balance_threshold
+#else
+           SIZE_T_MAX
+#endif
+           ;
+       stats->quantum = quantum;
+       stats->small_max = small_max;
+       stats->large_max = arena_maxclass;
+       stats->chunksize = chunksize;
+       stats->dirty_max = opt_dirty_max;
+
+       /*
+        * Gather current memory usage statistics.
+        */
+       stats->mapped = 0;
+       stats->committed = 0;
+       stats->allocated = 0;
+       stats->dirty = 0;
+
+       /* Get huge mapped/allocated. */
+       malloc_mutex_lock(&huge_mtx);
+       stats->mapped += stats_chunks.curchunks * chunksize;
+#ifdef MALLOC_DECOMMIT
+       stats->committed += huge_allocated;
+#endif
+       stats->allocated += huge_allocated;
+       malloc_mutex_unlock(&huge_mtx);
+
+       /* Get base mapped. */
+       malloc_mutex_lock(&base_mtx);
+       stats->mapped += base_mapped;
+#ifdef MALLOC_DECOMMIT
+       assert(base_committed <= base_mapped);
+       stats->committed += base_committed;
+#endif
+       malloc_mutex_unlock(&base_mtx);
+
+       /* Iterate over arenas and their chunks. */
+       for (i = 0; i < narenas; i++) {
+               arena_t *arena = arenas[i];
+               if (arena != NULL) {
+                       arena_chunk_t *chunk;
+
+                       malloc_spin_lock(&arena->lock);
+                       stats->allocated += arena->stats.allocated_small;
+                       stats->allocated += arena->stats.allocated_large;
+#ifdef MALLOC_DECOMMIT
+                       stats->committed += (arena->stats.committed <<
+                           pagesize_2pow);
+#endif
+                       stats->dirty += (arena->ndirty << pagesize_2pow);
+                       malloc_spin_unlock(&arena->lock);
+               }
+       }
+
+#ifndef MALLOC_DECOMMIT
+       stats->committed = stats->mapped;
+#endif
+       assert(stats->mapped >= stats->committed);
+       assert(stats->committed >= stats->allocated);
 }
 
-#ifdef WIN32
+#ifdef MOZ_MEMORY_WINDOWS
 void*
 _recalloc(void *ptr, size_t count, size_t size)
 {
@@ -5977,7 +6303,6 @@ _msize(const void *ptr)
 }
 #endif
 
-
 /*
  * End non-standard functions.
  */
@@ -6001,15 +6326,10 @@ _malloc_prefork(void)
                if (arenas[i] != NULL)
                        malloc_spin_lock(&arenas[i]->lock);
        }
-       malloc_spin_unlock(&arenas_lock);
 
        malloc_mutex_lock(&base_mtx);
 
        malloc_mutex_lock(&huge_mtx);
-
-#ifdef MALLOC_DSS
-       malloc_mutex_lock(&dss_mtx);
-#endif
 }
 
 void
@@ -6019,15 +6339,10 @@ _malloc_postfork(void)
 
        /* Release all mutexes, now that fork() has completed. */
 
-#ifdef MALLOC_DSS
-       malloc_mutex_unlock(&dss_mtx);
-#endif
-
        malloc_mutex_unlock(&huge_mtx);
 
        malloc_mutex_unlock(&base_mtx);
 
-       malloc_spin_lock(&arenas_lock);
        for (i = 0; i < narenas; i++) {
                if (arenas[i] != NULL)
                        malloc_spin_unlock(&arenas[i]->lock);
@@ -6040,16 +6355,17 @@ _malloc_postfork(void)
  */
 
/******************************************************************************/
 
+#ifdef HAVE_DLOPEN
+#  include <dlfcn.h>
+#endif
 
-#ifdef DARWIN
+#ifdef MOZ_MEMORY_DARWIN
 static malloc_zone_t zone;
 static struct malloc_introspection_t zone_introspect;
 
 static size_t
 zone_size(malloc_zone_t *zone, void *ptr)
 {
-       size_t ret = 0;
-       arena_chunk_t *chunk;
 
        /*
         * There appear to be places within Darwin (such as setenv(3)) that
@@ -6060,76 +6376,21 @@ zone_size(malloc_zone_t *zone, void *ptr)
         * not work in practice, we must check all pointers to assure that they
         * reside within a mapped chunk before determining size.
         */
-
-       chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-       if (chunk != ptr) {
-               arena_t *arena;
-               unsigned i;
-               arena_t *arenas_snapshot[narenas];
-
-               /*
-                * Make a copy of the arenas vector while holding arenas_lock in
-                * order to assure that all elements are up to date in this
-                * processor's cache.  Do this outside the following loop in
-                * order to reduce lock acquisitions.
-                */
-               malloc_spin_lock(&arenas_lock);
-               memcpy(&arenas_snapshot, arenas, sizeof(arena_t *) * narenas);
-               malloc_spin_unlock(&arenas_lock);
-
-               /* Region. */
-               for (i = 0; i < narenas; i++) {
-                       arena = arenas_snapshot[i];
-
-                       if (arena != NULL) {
-                               bool own;
-
-                               /* Make sure ptr is within a chunk. */
-                               malloc_spin_lock(&arena->lock);
-                               if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
-                                   chunk) == chunk)
-                                       own = true;
-                               else
-                                       own = false;
-                               malloc_spin_unlock(&arena->lock);
-
-                               if (own) {
-                                       ret = arena_salloc(ptr);
-                                       goto RETURN;
-                               }
-                       }
-               }
-       } else {
-               extent_node_t *node;
-               extent_node_t key;
-
-               /* Chunk. */
-               key.addr = (void *)chunk;
-               malloc_mutex_lock(&huge_mtx);
-               node = RB_FIND(extent_tree_ad_s, &huge, &key);
-               if (node != NULL)
-                       ret = node->size;
-               else
-                       ret = 0;
-               malloc_mutex_unlock(&huge_mtx);
-       }
-
-RETURN:
-       return (ret);
+       return (isalloc_validate(ptr));
 }
 
 static void *
 zone_malloc(malloc_zone_t *zone, size_t size)
 {
 
-       return (moz_malloc(size));
+       return (malloc(size));
 }
 
 static void *
 zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
 {
 
-       return (moz_calloc(num, size));
+       return (calloc(num, size));
 }
 
 static void *
@@ -6137,7 +6398,7 @@ zone_valloc(malloc_zone_t *zone, size_t size)
 {
        void *ret = NULL; /* Assignment avoids useless compiler warning. */
 
-       moz_posix_memalign(&ret, pagesize, size);
+       posix_memalign(&ret, pagesize, size);
 
        return (ret);
 }
@@ -6146,14 +6407,14 @@ static void
 zone_free(malloc_zone_t *zone, void *ptr)
 {
 
-       moz_free(ptr);
+       free(ptr);
 }
 
 static void *
 zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
 {
 
-       return (moz_realloc(ptr, size));
+       return (realloc(ptr, size));
 }
 
 static void *
@@ -6176,10 +6437,10 @@ zone_good_size(malloc_zone_t *zone, size_t size)
         * how large it could have been without moving up to the next size
         * class.
         */
-       p = moz_malloc(size);
+       p = malloc(size);
        if (p != NULL) {
                ret = isalloc(p);
-               moz_free(p);
+               free(p);
        } else
                ret = size;
 
@@ -6258,4 +6519,29 @@ jemalloc_darwin_init(void)
                sizeof(malloc_zone_t *) * (malloc_num_zones - 1));
        malloc_zones[0] = &zone;
 }
+
+#elif defined(__GLIBC__) && !defined(__UCLIBC__)
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (bug 493541).
+ *
+ * These definitions interpose hooks in glibc.  The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+#ifndef WRAP_MALLOC
+void (*__free_hook)(void *ptr) = free;
+void *(*__malloc_hook)(size_t size) = malloc;
+void *(*__realloc_hook)(void *ptr, size_t size) = realloc;
+void *(*__memalign_hook)(size_t alignment, size_t size) = memalign;
+#endif
+
+#elif defined(RTLD_DEEPBIND)
+/*
+ * XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
+ * implementations permit similar inconsistencies?  Should STV_SINGLETON
+ * visibility be used for interposition where available?
+ */
+#  error "Interposing malloc is unsafe on this system without libc malloc 
hooks."
 #endif
diff --git a/libbase/jemalloc.h b/libbase/jemalloc.h
new file mode 100644
index 0000000..03e0559
--- /dev/null
+++ b/libbase/jemalloc.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <address@hidden>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice(s), this list of conditions and the following disclaimer as
+ *    the first lines of this file unmodified other than the possible
+ *    addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice(s), this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _JEMALLOC_H_
+#define _JEMALLOC_H_
+
+#include "jemalloc_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const char *_malloc_options;
+
+/* Darwin and Linux already have memory allocation functions */
+#if (!defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_MEMORY_LINUX))
+void   *malloc(size_t size);
+void   *valloc(size_t size);
+void   *calloc(size_t num, size_t size);
+void   *realloc(void *ptr, size_t size);
+void   free(void *ptr);
+int    posix_memalign(void **memptr, size_t alignment, size_t size);
+#endif /* MOZ_MEMORY_DARWIN, MOZ_MEMORY_LINUX */
+
+#if defined(MOZ_MEMORY_ANDROID) || defined(WRAP_MALLOC)
+void   *je_malloc(size_t size);
+void   *je_valloc(size_t size);
+void   *je_calloc(size_t num, size_t size);
+void   *je_realloc(void *ptr, size_t size);
+void   je_free(void *ptr);
+int    je_posix_memalign(void **memptr, size_t alignment, size_t size);
+char    *je_strndup(const char *src, size_t len);
+char    *je_strdup(const char *src);
+#endif
+
+/* Linux has memalign and malloc_usable_size */
+#if !defined(MOZ_MEMORY_LINUX)
+void   *memalign(size_t alignment, size_t size);
+size_t malloc_usable_size(const void *ptr);
+#endif /* MOZ_MEMORY_LINUX */
+
+void   jemalloc_stats(jemalloc_stats_t *stats);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* _JEMALLOC_H_ */
diff --git a/libbase/jemalloc_rb.h b/libbase/jemalloc_rb.h
new file mode 100644
index 0000000..43d8569
--- /dev/null
+++ b/libbase/jemalloc_rb.h
@@ -0,0 +1,982 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2008 Jason Evans <address@hidden>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice(s), this list of conditions and the following disclaimer
+ *    unmodified other than the allowable addition of one or more
+ *    copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice(s), this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************
+ *
+ * cpp macro implementation of left-leaning red-black trees.
+ *
+ * Usage:
+ *
+ *   (Optional.)
+ *   #define SIZEOF_PTR ...
+ *   #define SIZEOF_PTR_2POW ...
+ *   #define RB_NO_C99_VARARRAYS
+ *
+ *   (Optional, see assert(3).)
+ *   #define NDEBUG
+ *
+ *   (Required.)
+ *   #include <assert.h>
+ *   #include <rb.h>
+ *   ...
+ *
+ * All operations are done non-recursively.  Parent pointers are not used, and
+ * color bits are stored in the least significant bit of right-child pointers,
+ * thus making node linkage as compact as is possible for red-black trees.
+ *
+ * Some macros use a comparison function pointer, which is expected to have the
+ * following prototype:
+ *
+ *   int (a_cmp *)(a_type *a_node, a_type *a_other);
+ *                         ^^^^^^
+ *                      or a_key
+ *
+ * Interpretation of comparision function return values:
+ *
+ *   -1 : a_node <  a_other
+ *    0 : a_node == a_other
+ *    1 : a_node >  a_other
+ *
+ * In all cases, the a_node or a_key macro argument is the first argument to 
the
+ * comparison function, which makes it possible to write comparison functions
+ * that treat the first argument specially.
+ *
+ 
******************************************************************************/
+
+#ifndef RB_H_
+#define        RB_H_
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 178995 2008-05-14 18:33:13Z 
jasone $");
+#endif
+
+/* Node structure. */
+#define        rb_node(a_type)                                                 
\
+struct {                                                               \
+    a_type *rbn_left;                                                  \
+    a_type *rbn_right_red;                                             \
+}
+
+/* Root structure. */
+#define        rb_tree(a_type)                                                 
\
+struct {                                                               \
+    a_type *rbt_root;                                                  \
+    a_type rbt_nil;                                                    \
+}
+
+/* Left accessors. */
+#define        rbp_left_get(a_type, a_field, a_node)                           
\
+    ((a_node)->a_field.rbn_left)
+#define        rbp_left_set(a_type, a_field, a_node, a_left) do {              
\
+    (a_node)->a_field.rbn_left = a_left;                               \
+} while (0)
+
+/* Right accessors. */
+#define        rbp_right_get(a_type, a_field, a_node)                          
\
+    ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red)          \
+      & ((ssize_t)-2)))
+#define        rbp_right_set(a_type, a_field, a_node, a_right) do {            
\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right)        
\
+      | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1)));        
\
+} while (0)
+
+/* Color accessors. */
+#define        rbp_red_get(a_type, a_field, a_node)                            
\
+    ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red)             \
+      & ((size_t)1)))
+#define        rbp_color_set(a_type, a_field, a_node, a_red) do {              
\
+    (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t)         \
+      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2))                        
\
+      | ((ssize_t)a_red));                                             \
+} while (0)
+#define        rbp_red_set(a_type, a_field, a_node) do {                       
\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t)         \
+      (a_node)->a_field.rbn_right_red) | ((size_t)1));                 \
+} while (0)
+#define        rbp_black_set(a_type, a_field, a_node) do {                     
\
+    (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)          \
+      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));               \
+} while (0)
+
+/* Node initializer. */
+#define        rbp_node_new(a_type, a_field, a_tree, a_node) do {              
\
+    rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil);       \
+    rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil);      \
+    rbp_red_set(a_type, a_field, (a_node));                            \
+} while (0)
+
+/* Tree initializer. */
+#define        rb_new(a_type, a_field, a_tree) do {                            
\
+    (a_tree)->rbt_root = &(a_tree)->rbt_nil;                           \
+    rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil);         \
+    rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil);                        
\
+} while (0)
+
+/* Tree operations. */
+#define        rbp_black_height(a_type, a_field, a_tree, r_height) do {        
\
+    a_type *rbp_bh_t;                                                  \
+    for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0;                        
\
+      rbp_bh_t != &(a_tree)->rbt_nil;                                  \
+      rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) {            \
+       if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) {          \
+           (r_height)++;                                               \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+#define        rbp_first(a_type, a_field, a_tree, a_root, r_node) do {         
\
+    for ((r_node) = (a_root);                                          \
+      rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil;   \
+      (r_node) = rbp_left_get(a_type, a_field, (r_node))) {            \
+    }                                                                  \
+} while (0)
+
+#define        rbp_last(a_type, a_field, a_tree, a_root, r_node) do {          
\
+    for ((r_node) = (a_root);                                          \
+      rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil;  \
+      (r_node) = rbp_right_get(a_type, a_field, (r_node))) {           \
+    }                                                                  \
+} while (0)
+
+#define        rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {   
\
+    if (rbp_right_get(a_type, a_field, (a_node))                       \
+      != &(a_tree)->rbt_nil) {                                         \
+       rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type,        \
+         a_field, (a_node)), (r_node));                                \
+    } else {                                                           \
+       a_type *rbp_n_t = (a_tree)->rbt_root;                           \
+       assert(rbp_n_t != &(a_tree)->rbt_nil);                          \
+       (r_node) = &(a_tree)->rbt_nil;                                  \
+       while (true) {                                                  \
+           int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t);                 \
+           if (rbp_n_cmp < 0) {                                        \
+               (r_node) = rbp_n_t;                                     \
+               rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t);       \
+           } else if (rbp_n_cmp > 0) {                                 \
+               rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t);      \
+           } else {                                                    \
+               break;                                                  \
+           }                                                           \
+           assert(rbp_n_t != &(a_tree)->rbt_nil);                      \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+#define        rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {   
\
+    if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
+       rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type,          \
+         a_field, (a_node)), (r_node));                                \
+    } else {                                                           \
+       a_type *rbp_p_t = (a_tree)->rbt_root;                           \
+       assert(rbp_p_t != &(a_tree)->rbt_nil);                          \
+       (r_node) = &(a_tree)->rbt_nil;                                  \
+       while (true) {                                                  \
+           int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t);                 \
+           if (rbp_p_cmp < 0) {                                        \
+               rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t);       \
+           } else if (rbp_p_cmp > 0) {                                 \
+               (r_node) = rbp_p_t;                                     \
+               rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t);      \
+           } else {                                                    \
+               break;                                                  \
+           }                                                           \
+           assert(rbp_p_t != &(a_tree)->rbt_nil);                      \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+#define        rb_first(a_type, a_field, a_tree, r_node) do {                  
\
+    rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node));  \
+    if ((r_node) == &(a_tree)->rbt_nil) {                              \
+       (r_node) = NULL;                                                \
+    }                                                                  \
+} while (0)
+
+#define        rb_last(a_type, a_field, a_tree, r_node) do {                   
\
+    rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node);     \
+    if ((r_node) == &(a_tree)->rbt_nil) {                              \
+       (r_node) = NULL;                                                \
+    }                                                                  \
+} while (0)
+
+#define        rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {    
\
+    rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node));      \
+    if ((r_node) == &(a_tree)->rbt_nil) {                              \
+       (r_node) = NULL;                                                \
+    }                                                                  \
+} while (0)
+
+#define        rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do {    
\
+    rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node));      \
+    if ((r_node) == &(a_tree)->rbt_nil) {                              \
+       (r_node) = NULL;                                                \
+    }                                                                  \
+} while (0)
+
+#define        rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {   
\
+    int rbp_se_cmp;                                                    \
+    (r_node) = (a_tree)->rbt_root;                                     \
+    while ((r_node) != &(a_tree)->rbt_nil                              \
+      && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) {             \
+       if (rbp_se_cmp < 0) {                                           \
+           (r_node) = rbp_left_get(a_type, a_field, (r_node));         \
+       } else {                                                        \
+           (r_node) = rbp_right_get(a_type, a_field, (r_node));        \
+       }                                                               \
+    }                                                                  \
+    if ((r_node) == &(a_tree)->rbt_nil) {                              \
+       (r_node) = NULL;                                                \
+    }                                                                  \
+} while (0)
+
+/*
+ * Find a match if it exists.  Otherwise, find the next greater node, if one
+ * exists.
+ */
+#define        rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {  
\
+    a_type *rbp_ns_t = (a_tree)->rbt_root;                             \
+    (r_node) = NULL;                                                   \
+    while (rbp_ns_t != &(a_tree)->rbt_nil) {                           \
+       int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t);                    \
+       if (rbp_ns_cmp < 0) {                                           \
+           (r_node) = rbp_ns_t;                                        \
+           rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t);         \
+       } else if (rbp_ns_cmp > 0) {                                    \
+           rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t);        \
+       } else {                                                        \
+           (r_node) = rbp_ns_t;                                        \
+           break;                                                      \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+/*
+ * Find a match if it exists.  Otherwise, find the previous lesser node, if one
+ * exists.
+ */
+#define        rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do {  
\
+    a_type *rbp_ps_t = (a_tree)->rbt_root;                             \
+    (r_node) = NULL;                                                   \
+    while (rbp_ps_t != &(a_tree)->rbt_nil) {                           \
+       int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t);                    \
+       if (rbp_ps_cmp < 0) {                                           \
+           rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t);         \
+       } else if (rbp_ps_cmp > 0) {                                    \
+           (r_node) = rbp_ps_t;                                        \
+           rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t);        \
+       } else {                                                        \
+           (r_node) = rbp_ps_t;                                        \
+           break;                                                      \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+#define        rbp_rotate_left(a_type, a_field, a_node, r_node) do {           
\
+    (r_node) = rbp_right_get(a_type, a_field, (a_node));               \
+    rbp_right_set(a_type, a_field, (a_node),                           \
+      rbp_left_get(a_type, a_field, (r_node)));                                
\
+    rbp_left_set(a_type, a_field, (r_node), (a_node));                 \
+} while (0)
+
+#define        rbp_rotate_right(a_type, a_field, a_node, r_node) do {          
\
+    (r_node) = rbp_left_get(a_type, a_field, (a_node));                        
\
+    rbp_left_set(a_type, a_field, (a_node),                            \
+      rbp_right_get(a_type, a_field, (r_node)));                       \
+    rbp_right_set(a_type, a_field, (r_node), (a_node));                        
\
+} while (0)
+
+#define        rbp_lean_left(a_type, a_field, a_node, r_node) do {             
\
+    bool rbp_ll_red;                                                   \
+    rbp_rotate_left(a_type, a_field, (a_node), (r_node));              \
+    rbp_ll_red = rbp_red_get(a_type, a_field, (a_node));               \
+    rbp_color_set(a_type, a_field, (r_node), rbp_ll_red);              \
+    rbp_red_set(a_type, a_field, (a_node));                            \
+} while (0)
+
+#define        rbp_lean_right(a_type, a_field, a_node, r_node) do {            
\
+    bool rbp_lr_red;                                                   \
+    rbp_rotate_right(a_type, a_field, (a_node), (r_node));             \
+    rbp_lr_red = rbp_red_get(a_type, a_field, (a_node));               \
+    rbp_color_set(a_type, a_field, (r_node), rbp_lr_red);              \
+    rbp_red_set(a_type, a_field, (a_node));                            \
+} while (0)
+
+#define        rbp_move_red_left(a_type, a_field, a_node, r_node) do {         
\
+    a_type *rbp_mrl_t, *rbp_mrl_u;                                     \
+    rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node));               \
+    rbp_red_set(a_type, a_field, rbp_mrl_t);                           \
+    rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node));              \
+    rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t);              \
+    if (rbp_red_get(a_type, a_field, rbp_mrl_u)) {                     \
+       rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u);        \
+       rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u);            \
+       rbp_rotate_left(a_type, a_field, (a_node), (r_node));           \
+       rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node));           \
+       if (rbp_red_get(a_type, a_field, rbp_mrl_t)) {                  \
+           rbp_black_set(a_type, a_field, rbp_mrl_t);                  \
+           rbp_red_set(a_type, a_field, (a_node));                     \
+           rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t);      \
+           rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t);         \
+       } else {                                                        \
+           rbp_black_set(a_type, a_field, (a_node));                   \
+       }                                                               \
+    } else {                                                           \
+       rbp_red_set(a_type, a_field, (a_node));                         \
+       rbp_rotate_left(a_type, a_field, (a_node), (r_node));           \
+    }                                                                  \
+} while (0)
+
+#define        rbp_move_red_right(a_type, a_field, a_node, r_node) do {        
\
+    a_type *rbp_mrr_t;                                                 \
+    rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node));               \
+    if (rbp_red_get(a_type, a_field, rbp_mrr_t)) {                     \
+       a_type *rbp_mrr_u, *rbp_mrr_v;                                  \
+       rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t);          \
+       rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u);           \
+       if (rbp_red_get(a_type, a_field, rbp_mrr_v)) {                  \
+           rbp_color_set(a_type, a_field, rbp_mrr_u,                   \
+             rbp_red_get(a_type, a_field, (a_node)));                  \
+           rbp_black_set(a_type, a_field, rbp_mrr_v);                  \
+           rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u);     \
+           rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u);         \
+           rbp_rotate_right(a_type, a_field, (a_node), (r_node));      \
+           rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);      \
+           rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);        \
+       } else {                                                        \
+           rbp_color_set(a_type, a_field, rbp_mrr_t,                   \
+             rbp_red_get(a_type, a_field, (a_node)));                  \
+           rbp_red_set(a_type, a_field, rbp_mrr_u);                    \
+           rbp_rotate_right(a_type, a_field, (a_node), (r_node));      \
+           rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);      \
+           rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);        \
+       }                                                               \
+       rbp_red_set(a_type, a_field, (a_node));                         \
+    } else {                                                           \
+       rbp_red_set(a_type, a_field, rbp_mrr_t);                        \
+       rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t);           \
+       if (rbp_red_get(a_type, a_field, rbp_mrr_t)) {                  \
+           rbp_black_set(a_type, a_field, rbp_mrr_t);                  \
+           rbp_rotate_right(a_type, a_field, (a_node), (r_node));      \
+           rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t);      \
+           rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t);        \
+       } else {                                                        \
+           rbp_rotate_left(a_type, a_field, (a_node), (r_node));       \
+       }                                                               \
+    }                                                                  \
+} while (0)
+
+#define        rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do {          
\
+    a_type rbp_i_s;                                                    \
+    a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u;           \
+    int rbp_i_cmp = 0;                                                 \
+    rbp_i_g = &(a_tree)->rbt_nil;                                      \
+    rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root);       \
+    rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil);      \
+    rbp_black_set(a_type, a_field, &rbp_i_s);                          \
+    rbp_i_p = &rbp_i_s;                                                        
\
+    rbp_i_c = (a_tree)->rbt_root;                                      \
+    /* Iteratively search down the tree for the insertion point,      */\
+    /* splitting 4-nodes as they are encountered.  At the end of each */\
+    /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down    */\
+    /* the tree, assuming a sufficiently deep tree.                   */\
+    while (rbp_i_c != &(a_tree)->rbt_nil) {                            \
+       rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c);               \
+       rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t);               \
+       if (rbp_red_get(a_type, a_field, rbp_i_t)                       \
+         && rbp_red_get(a_type, a_field, rbp_i_u)) {                   \
+           /* rbp_i_c is the top of a logical 4-node, so split it.   */\
+           /* This iteration does not move down the tree, due to the */\
+           /* disruptiveness of node splitting.                      */\
+           /*                                                        */\
+           /* Rotate right.                                          */\
+           rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t);        \
+           /* Pass red links up one level.                           */\
+           rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t);           \
+           rbp_black_set(a_type, a_field, rbp_i_u);                    \
+           if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) {    \
+               rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t);        \
+               rbp_i_c = rbp_i_t;                                      \
+           } else {                                                    \
+               /* rbp_i_c was the right child of rbp_i_p, so rotate  */\
+               /* left in order to maintain the left-leaning         */\
+               /* invariant.                                         */\
+               assert(rbp_right_get(a_type, a_field, rbp_i_p)          \
+                 == rbp_i_c);                                          \
+               rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t);       \
+               rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u);       \
+               if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+                   rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u);    \
+               } else {                                                \
+                   assert(rbp_right_get(a_type, a_field, rbp_i_g)      \
+                     == rbp_i_p);                                      \
+                   rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u);   \
+               }                                                       \
+               rbp_i_p = rbp_i_u;                                      \
+               rbp_i_cmp = (a_cmp)((a_node), rbp_i_p);                 \
+               if (rbp_i_cmp < 0) {                                    \
+                   rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p);   \
+               } else {                                                \
+                   assert(rbp_i_cmp > 0);                              \
+                   rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p);  \
+               }                                                       \
+               continue;                                               \
+           }                                                           \
+       }                                                               \
+       rbp_i_g = rbp_i_p;                                              \
+       rbp_i_p = rbp_i_c;                                              \
+       rbp_i_cmp = (a_cmp)((a_node), rbp_i_c);                         \
+       if (rbp_i_cmp < 0) {                                            \
+           rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c);           \
+       } else {                                                        \
+           assert(rbp_i_cmp > 0);                                      \
+           rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c);          \
+       }                                                               \
+    }                                                                  \
+    /* rbp_i_p now refers to the node under which to insert.          */\
+    rbp_node_new(a_type, a_field, a_tree, (a_node));                   \
+    if (rbp_i_cmp > 0) {                                               \
+       rbp_right_set(a_type, a_field, rbp_i_p, (a_node));              \
+       rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t);               \
+       if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {        \
+           rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t);            \
+       } else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+           rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t);           \
+       }                                                               \
+    } else {                                                           \
+       rbp_left_set(a_type, a_field, rbp_i_p, (a_node));               \
+    }                                                                  \
+    /* Update the root and make sure that it is black.                */\
+    (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s);      \
+    rbp_black_set(a_type, a_field, (a_tree)->rbt_root);                        
\
+} while (0)
+
+#define        rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do {          
\
+    a_type rbp_r_s;                                                    \
+    a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u;          \
+    int rbp_r_cmp;                                                     \
+    rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root);       \
+    rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil);      \
+    rbp_black_set(a_type, a_field, &rbp_r_s);                          \
+    rbp_r_p = &rbp_r_s;                                                        
\
+    rbp_r_c = (a_tree)->rbt_root;                                      \
+    rbp_r_xp = &(a_tree)->rbt_nil;                                     \
+    /* Iterate down the tree, but always transform 2-nodes to 3- or   */\
+    /* 4-nodes in order to maintain the invariant that the current    */\
+    /* node is not a 2-node.  This allows simple deletion once a leaf */\
+    /* is reached.  Handle the root specially though, since there may */\
+    /* be no way to convert it from a 2-node to a 3-node.             */\
+    rbp_r_cmp = (a_cmp)((a_node), rbp_r_c);                            \
+    if (rbp_r_cmp < 0) {                                               \
+       rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);               \
+       rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);               \
+       if (rbp_red_get(a_type, a_field, rbp_r_t) == false              \
+         && rbp_red_get(a_type, a_field, rbp_r_u) == false) {          \
+           /* Apply standard transform to prepare for left move.     */\
+           rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t);       \
+           rbp_black_set(a_type, a_field, rbp_r_t);                    \
+           rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);            \
+           rbp_r_c = rbp_r_t;                                          \
+       } else {                                                        \
+           /* Move left.                                             */\
+           rbp_r_p = rbp_r_c;                                          \
+           rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);           \
+       }                                                               \
+    } else {                                                           \
+       if (rbp_r_cmp == 0) {                                           \
+           assert((a_node) == rbp_r_c);                                \
+           if (rbp_right_get(a_type, a_field, rbp_r_c)                 \
+             == &(a_tree)->rbt_nil) {                                  \
+               /* Delete root node (which is also a leaf node).      */\
+               if (rbp_left_get(a_type, a_field, rbp_r_c)              \
+                 != &(a_tree)->rbt_nil) {                              \
+                   rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t);  \
+                   rbp_right_set(a_type, a_field, rbp_r_t,             \
+                     &(a_tree)->rbt_nil);                              \
+               } else {                                                \
+                   rbp_r_t = &(a_tree)->rbt_nil;                       \
+               }                                                       \
+               rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);        \
+           } else {                                                    \
+               /* This is the node we want to delete, but we will    */\
+               /* instead swap it with its successor and delete the  */\
+               /* successor.  Record enough information to do the    */\
+               /* swap later.  rbp_r_xp is the a_node's parent.      */\
+               rbp_r_xp = rbp_r_p;                                     \
+               rbp_r_cmp = 1; /* Note that deletion is incomplete.   */\
+           }                                                           \
+       }                                                               \
+       if (rbp_r_cmp == 1) {                                           \
+           if (rbp_red_get(a_type, a_field, rbp_left_get(a_type,       \
+             a_field, rbp_right_get(a_type, a_field, rbp_r_c)))        \
+             == false) {                                               \
+               rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);       \
+               if (rbp_red_get(a_type, a_field, rbp_r_t)) {            \
+                   /* Standard transform.                            */\
+                   rbp_move_red_right(a_type, a_field, rbp_r_c,        \
+                     rbp_r_t);                                         \
+               } else {                                                \
+                   /* Root-specific transform.                       */\
+                   rbp_red_set(a_type, a_field, rbp_r_c);              \
+                   rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);   \
+                   if (rbp_red_get(a_type, a_field, rbp_r_u)) {        \
+                       rbp_black_set(a_type, a_field, rbp_r_u);        \
+                       rbp_rotate_right(a_type, a_field, rbp_r_c,      \
+                         rbp_r_t);                                     \
+                       rbp_rotate_left(a_type, a_field, rbp_r_c,       \
+                         rbp_r_u);                                     \
+                       rbp_right_set(a_type, a_field, rbp_r_t,         \
+                         rbp_r_u);                                     \
+                   } else {                                            \
+                       rbp_red_set(a_type, a_field, rbp_r_t);          \
+                       rbp_rotate_left(a_type, a_field, rbp_r_c,       \
+                         rbp_r_t);                                     \
+                   }                                                   \
+               }                                                       \
+               rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);        \
+               rbp_r_c = rbp_r_t;                                      \
+           } else {                                                    \
+               /* Move right.                                        */\
+               rbp_r_p = rbp_r_c;                                      \
+               rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c);      \
+           }                                                           \
+       }                                                               \
+    }                                                                  \
+    if (rbp_r_cmp != 0) {                                              \
+       while (true) {                                                  \
+           assert(rbp_r_p != &(a_tree)->rbt_nil);                      \
+           rbp_r_cmp = (a_cmp)((a_node), rbp_r_c);                     \
+           if (rbp_r_cmp < 0) {                                        \
+               rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c);       \
+               if (rbp_r_t == &(a_tree)->rbt_nil) {                    \
+                   /* rbp_r_c now refers to the successor node to    */\
+                   /* relocate, and rbp_r_xp/a_node refer to the     */\
+                   /* context for the relocation.                    */\
+                   if (rbp_left_get(a_type, a_field, rbp_r_xp)         \
+                     == (a_node)) {                                    \
+                       rbp_left_set(a_type, a_field, rbp_r_xp,         \
+                         rbp_r_c);                                     \
+                   } else {                                            \
+                       assert(rbp_right_get(a_type, a_field,           \
+                         rbp_r_xp) == (a_node));                       \
+                       rbp_right_set(a_type, a_field, rbp_r_xp,        \
+                         rbp_r_c);                                     \
+                   }                                                   \
+                   rbp_left_set(a_type, a_field, rbp_r_c,              \
+                     rbp_left_get(a_type, a_field, (a_node)));         \
+                   rbp_right_set(a_type, a_field, rbp_r_c,             \
+                     rbp_right_get(a_type, a_field, (a_node)));        \
+                   rbp_color_set(a_type, a_field, rbp_r_c,             \
+                     rbp_red_get(a_type, a_field, (a_node)));          \
+                   if (rbp_left_get(a_type, a_field, rbp_r_p)          \
+                     == rbp_r_c) {                                     \
+                       rbp_left_set(a_type, a_field, rbp_r_p,          \
+                         &(a_tree)->rbt_nil);                          \
+                   } else {                                            \
+                       assert(rbp_right_get(a_type, a_field, rbp_r_p)  \
+                         == rbp_r_c);                                  \
+                       rbp_right_set(a_type, a_field, rbp_r_p,         \
+                         &(a_tree)->rbt_nil);                          \
+                   }                                                   \
+                   break;                                              \
+               }                                                       \
+               rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);       \
+               if (rbp_red_get(a_type, a_field, rbp_r_t) == false      \
+                 && rbp_red_get(a_type, a_field, rbp_r_u) == false) {  \
+                   rbp_move_red_left(a_type, a_field, rbp_r_c,         \
+                     rbp_r_t);                                         \
+                   if (rbp_left_get(a_type, a_field, rbp_r_p)          \
+                     == rbp_r_c) {                                     \
+                       rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+                   } else {                                            \
+                       rbp_right_set(a_type, a_field, rbp_r_p,         \
+                         rbp_r_t);                                     \
+                   }                                                   \
+                   rbp_r_c = rbp_r_t;                                  \
+               } else {                                                \
+                   rbp_r_p = rbp_r_c;                                  \
+                   rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c);   \
+               }                                                       \
+           } else {                                                    \
+               /* Check whether to delete this node (it has to be    */\
+               /* the correct node and a leaf node).                 */\
+               if (rbp_r_cmp == 0) {                                   \
+                   assert((a_node) == rbp_r_c);                        \
+                   if (rbp_right_get(a_type, a_field, rbp_r_c)         \
+                     == &(a_tree)->rbt_nil) {                          \
+                       /* Delete leaf node.                          */\
+                       if (rbp_left_get(a_type, a_field, rbp_r_c)      \
+                         != &(a_tree)->rbt_nil) {                      \
+                           rbp_lean_right(a_type, a_field, rbp_r_c,    \
+                             rbp_r_t);                                 \
+                           rbp_right_set(a_type, a_field, rbp_r_t,     \
+                             &(a_tree)->rbt_nil);                      \
+                       } else {                                        \
+                           rbp_r_t = &(a_tree)->rbt_nil;               \
+                       }                                               \
+                       if (rbp_left_get(a_type, a_field, rbp_r_p)      \
+                         == rbp_r_c) {                                 \
+                           rbp_left_set(a_type, a_field, rbp_r_p,      \
+                             rbp_r_t);                                 \
+                       } else {                                        \
+                           rbp_right_set(a_type, a_field, rbp_r_p,     \
+                             rbp_r_t);                                 \
+                       }                                               \
+                       break;                                          \
+                   } else {                                            \
+                       /* This is the node we want to delete, but we */\
+                       /* will instead swap it with its successor    */\
+                       /* and delete the successor.  Record enough   */\
+                       /* information to do the swap later.          */\
+                       /* rbp_r_xp is a_node's parent.               */\
+                       rbp_r_xp = rbp_r_p;                             \
+                   }                                                   \
+               }                                                       \
+               rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c);      \
+               rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t);       \
+               if (rbp_red_get(a_type, a_field, rbp_r_u) == false) {   \
+                   rbp_move_red_right(a_type, a_field, rbp_r_c,        \
+                     rbp_r_t);                                         \
+                   if (rbp_left_get(a_type, a_field, rbp_r_p)          \
+                     == rbp_r_c) {                                     \
+                       rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+                   } else {                                            \
+                       rbp_right_set(a_type, a_field, rbp_r_p,         \
+                         rbp_r_t);                                     \
+                   }                                                   \
+                   rbp_r_c = rbp_r_t;                                  \
+               } else {                                                \
+                   rbp_r_p = rbp_r_c;                                  \
+                   rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c);  \
+               }                                                       \
+           }                                                           \
+       }                                                               \
+    }                                                                  \
+    /* Update root.                                                   */\
+    (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s);      \
+} while (0)
+
+/*
+ * The rb_wrap() macro provides a convenient way to wrap functions around the
+ * cpp macros.  The main benefits of wrapping are that 1) repeated macro
+ * expansion can cause code bloat, especially for rb_{insert,remove)(), and
+ * 2) type, linkage, comparison functions, etc. need not be specified at every
+ * call point.
+ */
+
+#define        rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp)  
\
+a_attr void                                                            \
+a_prefix##new(a_tree_type *tree) {                                     \
+    rb_new(a_type, a_field, tree);                                     \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##first(a_tree_type *tree) {                                   \
+    a_type *ret;                                                       \
+    rb_first(a_type, a_field, tree, ret);                              \
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##last(a_tree_type *tree) {                                    \
+    a_type *ret;                                                       \
+    rb_last(a_type, a_field, tree, ret);                               \
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##next(a_tree_type *tree, a_type *node) {                      \
+    a_type *ret;                                                       \
+    rb_next(a_type, a_field, a_cmp, tree, node, ret);                  \
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##prev(a_tree_type *tree, a_type *node) {                      \
+    a_type *ret;                                                       \
+    rb_prev(a_type, a_field, a_cmp, tree, node, ret);                  \
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##search(a_tree_type *tree, a_type *key) {                     \
+    a_type *ret;                                                       \
+    rb_search(a_type, a_field, a_cmp, tree, key, ret);                 \
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##nsearch(a_tree_type *tree, a_type *key) {                    \
+    a_type *ret;                                                       \
+    rb_nsearch(a_type, a_field, a_cmp, tree, key, ret);                        
\
+    return (ret);                                                      \
+}                                                                      \
+a_attr a_type *                                                                
\
+a_prefix##psearch(a_tree_type *tree, a_type *key) {                    \
+    a_type *ret;                                                       \
+    rb_psearch(a_type, a_field, a_cmp, tree, key, ret);                        
\
+    return (ret);                                                      \
+}                                                                      \
+a_attr void                                                            \
+a_prefix##insert(a_tree_type *tree, a_type *node) {                    \
+    rb_insert(a_type, a_field, a_cmp, tree, node);                     \
+}                                                                      \
+a_attr void                                                            \
+a_prefix##remove(a_tree_type *tree, a_type *node) {                    \
+    rb_remove(a_type, a_field, a_cmp, tree, node);                     \
+}
+
+/*
+ * The iterators simulate recursion via an array of pointers that store the
+ * current path.  This is critical to performance, since a series of calls to
+ * rb_{next,prev}() would require time proportional to (n lg n), whereas this
+ * implementation only requires time proportional to (n).
+ *
+ * Since the iterators cache a path down the tree, any tree modification may
+ * cause the cached path to become invalid.  In order to continue iteration,
+ * use something like the following sequence:
+ *
+ *   {
+ *       a_type *node, *tnode;
+ *
+ *       rb_foreach_begin(a_type, a_field, a_tree, node) {
+ *           ...
+ *           rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
+ *           rb_remove(a_type, a_field, a_cmp, a_tree, node);
+ *           rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
+ *           ...
+ *       } rb_foreach_end(a_type, a_field, a_tree, node)
+ *   }
+ *
+ * Note that this idiom is not advised if every iteration modifies the tree,
+ * since in that case there is no algorithmic complexity improvement over a
+ * series of rb_{next,prev}() calls, thus making the setup overhead wasted
+ * effort.
+ */
+
+#ifdef RB_NO_C99_VARARRAYS
+   /*
+    * Avoid using variable-length arrays, at the cost of using more stack 
space.
+    * Size the path arrays such that they are always large enough, even if a
+    * tree consumes all of memory.  Since each node must contain a minimum of
+    * two pointers, there can never be more nodes than:
+    *
+    *   1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
+    *
+    * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
+    * is:
+    *
+    *   (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+    *
+    * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
+    * systems, respectively (approximatly 348 and 1440 bytes, respectively).
+    */
+#  define rbp_compute_f_height(a_type, a_field, a_tree)
+#  define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+#  define rbp_compute_fr_height(a_type, a_field, a_tree)
+#  define rbp_fr_height        (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+#else
+#  define rbp_compute_f_height(a_type, a_field, a_tree)                        
\
+    /* Compute the maximum possible tree depth (3X the black height). */\
+    unsigned rbp_f_height;                                             \
+    rbp_black_height(a_type, a_field, a_tree, rbp_f_height);           \
+    rbp_f_height *= 3;
+#  define rbp_compute_fr_height(a_type, a_field, a_tree)               \
+    /* Compute the maximum possible tree depth (3X the black height). */\
+    unsigned rbp_fr_height;                                            \
+    rbp_black_height(a_type, a_field, a_tree, rbp_fr_height);          \
+    rbp_fr_height *= 3;
+#endif
+
+#define        rb_foreach_begin(a_type, a_field, a_tree, a_var) {              
\
+    rbp_compute_f_height(a_type, a_field, a_tree)                      \
+    {                                                                  \
+       /* Initialize the path to contain the left spine.             */\
+       a_type *rbp_f_path[rbp_f_height];                               \
+       a_type *rbp_f_node;                                             \
+       bool rbp_f_synced = false;                                      \
+       unsigned rbp_f_depth = 0;                                       \
+       if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {                 \
+           rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root;               \
+           rbp_f_depth++;                                              \
+           while ((rbp_f_node = rbp_left_get(a_type, a_field,          \
+             rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {      \
+               rbp_f_path[rbp_f_depth] = rbp_f_node;                   \
+               rbp_f_depth++;                                          \
+           }                                                           \
+       }                                                               \
+       /* While the path is non-empty, iterate.                      */\
+       while (rbp_f_depth > 0) {                                       \
+           (a_var) = rbp_f_path[rbp_f_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define        rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node)         
\
+           /* Re-initialize the path to contain the path to a_node.  */\
+           rbp_f_depth = 0;                                            \
+           if (a_node != NULL) {                                       \
+               if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {         \
+                   rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root;       \
+                   rbp_f_depth++;                                      \
+                   rbp_f_node = rbp_f_path[0];                         \
+                   while (true) {                                      \
+                       int rbp_f_cmp = (a_cmp)((a_node),               \
+                         rbp_f_path[rbp_f_depth-1]);                   \
+                       if (rbp_f_cmp < 0) {                            \
+                           rbp_f_node = rbp_left_get(a_type, a_field,  \
+                             rbp_f_path[rbp_f_depth-1]);               \
+                       } else if (rbp_f_cmp > 0) {                     \
+                           rbp_f_node = rbp_right_get(a_type, a_field, \
+                             rbp_f_path[rbp_f_depth-1]);               \
+                       } else {                                        \
+                           break;                                      \
+                       }                                               \
+                       assert(rbp_f_node != &(a_tree)->rbt_nil);       \
+                       rbp_f_path[rbp_f_depth] = rbp_f_node;           \
+                       rbp_f_depth++;                                  \
+                   }                                                   \
+               }                                                       \
+           }                                                           \
+           rbp_f_synced = true;
+
+#define        rb_foreach_end(a_type, a_field, a_tree, a_var)                  
\
+           if (rbp_f_synced) {                                         \
+               rbp_f_synced = false;                                   \
+               continue;                                               \
+           }                                                           \
+           /* Find the successor.                                    */\
+           if ((rbp_f_node = rbp_right_get(a_type, a_field,            \
+             rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {      \
+               /* The successor is the left-most node in the right   */\
+               /* subtree.                                           */\
+               rbp_f_path[rbp_f_depth] = rbp_f_node;                   \
+               rbp_f_depth++;                                          \
+               while ((rbp_f_node = rbp_left_get(a_type, a_field,      \
+                 rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) {  \
+                   rbp_f_path[rbp_f_depth] = rbp_f_node;               \
+                   rbp_f_depth++;                                      \
+               }                                                       \
+           } else {                                                    \
+               /* The successor is above the current node.  Unwind   */\
+               /* until a left-leaning edge is removed from the      */\
+               /* path, or the path is empty.                        */\
+               for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) {   \
+                   if (rbp_left_get(a_type, a_field,                   \
+                     rbp_f_path[rbp_f_depth-1])                        \
+                     == rbp_f_path[rbp_f_depth]) {                     \
+                       break;                                          \
+                   }                                                   \
+               }                                                       \
+           }                                                           \
+       }                                                               \
+    }                                                                  \
+}
+
+#define        rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) {      
\
+    rbp_compute_fr_height(a_type, a_field, a_tree)                     \
+    {                                                                  \
+       /* Initialize the path to contain the right spine.            */\
+       a_type *rbp_fr_path[rbp_fr_height];                             \
+       a_type *rbp_fr_node;                                            \
+       bool rbp_fr_synced = false;                                     \
+       unsigned rbp_fr_depth = 0;                                      \
+       if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {                 \
+           rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root;             \
+           rbp_fr_depth++;                                             \
+           while ((rbp_fr_node = rbp_right_get(a_type, a_field,        \
+             rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {    \
+               rbp_fr_path[rbp_fr_depth] = rbp_fr_node;                \
+               rbp_fr_depth++;                                         \
+           }                                                           \
+       }                                                               \
+       /* While the path is non-empty, iterate.                      */\
+       while (rbp_fr_depth > 0) {                                      \
+           (a_var) = rbp_fr_path[rbp_fr_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define        rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) 
\
+           /* Re-initialize the path to contain the path to a_node.  */\
+           rbp_fr_depth = 0;                                           \
+           if (a_node != NULL) {                                       \
+               if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) {         \
+                   rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root;     \
+                   rbp_fr_depth++;                                     \
+                   rbp_fr_node = rbp_fr_path[0];                       \
+                   while (true) {                                      \
+                       int rbp_fr_cmp = (a_cmp)((a_node),              \
+                         rbp_fr_path[rbp_fr_depth-1]);                 \
+                       if (rbp_fr_cmp < 0) {                           \
+                           rbp_fr_node = rbp_left_get(a_type, a_field, \
+                             rbp_fr_path[rbp_fr_depth-1]);             \
+                       } else if (rbp_fr_cmp > 0) {                    \
+                           rbp_fr_node = rbp_right_get(a_type, a_field,\
+                             rbp_fr_path[rbp_fr_depth-1]);             \
+                       } else {                                        \
+                           break;                                      \
+                       }                                               \
+                       assert(rbp_fr_node != &(a_tree)->rbt_nil);      \
+                       rbp_fr_path[rbp_fr_depth] = rbp_fr_node;        \
+                       rbp_fr_depth++;                                 \
+                   }                                                   \
+               }                                                       \
+           }                                                           \
+           rbp_fr_synced = true;
+
+#define        rb_foreach_reverse_end(a_type, a_field, a_tree, a_var)          
\
+           if (rbp_fr_synced) {                                        \
+               rbp_fr_synced = false;                                  \
+               continue;                                               \
+           }                                                           \
+           if (rbp_fr_depth == 0) {                                    \
+               /* rb_foreach_reverse_sync() was called with a NULL   */\
+               /* a_node.                                            */\
+               break;                                                  \
+           }                                                           \
+           /* Find the predecessor.                                  */\
+           if ((rbp_fr_node = rbp_left_get(a_type, a_field,            \
+             rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {    \
+               /* The predecessor is the right-most node in the left */\
+               /* subtree.                                           */\
+               rbp_fr_path[rbp_fr_depth] = rbp_fr_node;                \
+               rbp_fr_depth++;                                         \
+               while ((rbp_fr_node = rbp_right_get(a_type, a_field,    \
+                 rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
+                   rbp_fr_path[rbp_fr_depth] = rbp_fr_node;            \
+                   rbp_fr_depth++;                                     \
+               }                                                       \
+           } else {                                                    \
+               /* The predecessor is above the current node.  Unwind */\
+               /* until a right-leaning edge is removed from the     */\
+               /* path, or the path is empty.                        */\
+               for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
+                   if (rbp_right_get(a_type, a_field,                  \
+                     rbp_fr_path[rbp_fr_depth-1])                      \
+                     == rbp_fr_path[rbp_fr_depth]) {                   \
+                       break;                                          \
+                   }                                                   \
+               }                                                       \
+           }                                                           \
+       }                                                               \
+    }                                                                  \
+}
+
+#endif /* RB_H_ */
diff --git a/libbase/jemalloc_types.h b/libbase/jemalloc_types.h
new file mode 100644
index 0000000..7b18f6c
--- /dev/null
+++ b/libbase/jemalloc_types.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <address@hidden>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice(s), this list of conditions and the following disclaimer as
+ *    the first lines of this file unmodified other than the possible
+ *    addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice(s), this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _JEMALLOC_TYPES_H_
+#define _JEMALLOC_TYPES_H_
+
+/* grab size_t */
+#ifdef _MSC_VER
+#include <crtdefs.h>
+#else
+#include <stddef.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned char jemalloc_bool;
+
+/*
+ * jemalloc_stats() is not a stable interface.  When using jemalloc_stats_t, be
+ * sure that the compiled results of jemalloc.c are in sync with this header
+ * file.
+ */
+typedef struct {
+       /*
+        * Run-time configuration settings.
+        */
+       jemalloc_bool   opt_abort;      /* abort(3) on error? */
+       jemalloc_bool   opt_junk;       /* Fill allocated/free memory with 
0xa5/0x5a? */
+       jemalloc_bool   opt_utrace;     /* Trace all allocation events? */
+       jemalloc_bool   opt_sysv;       /* SysV semantics? */
+       jemalloc_bool   opt_xmalloc;    /* abort(3) on OOM? */
+       jemalloc_bool   opt_zero;       /* Fill allocated memory with 0x0? */
+       size_t  narenas;        /* Number of arenas. */
+       size_t  balance_threshold; /* Arena contention rebalance threshold. */
+       size_t  quantum;        /* Allocation quantum. */
+       size_t  small_max;      /* Max quantum-spaced allocation size. */
+       size_t  large_max;      /* Max sub-chunksize allocation size. */
+       size_t  chunksize;      /* Size of each virtual memory mapping. */
+       size_t  dirty_max;      /* Max dirty pages per arena. */
+
+       /*
+        * Current memory usage statistics.
+        */
+       size_t  mapped;         /* Bytes mapped (not necessarily committed). */
+       size_t  committed;      /* Bytes committed (readable/writable). */
+       size_t  allocated;      /* Bytes allocted (in use by application). */
+       size_t  dirty;          /* Bytes dirty (committed unused pages). */
+} jemalloc_stats_t;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* _JEMALLOC_TYPES_H_ */
diff --git a/libbase/jemtree.h b/libbase/jemtree.h
deleted file mode 100644
index c9ef389..0000000
--- a/libbase/jemtree.h
+++ /dev/null
@@ -1,743 +0,0 @@
-/*     $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $  */
-/*     $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $    */
-/* $FreeBSD: src/sys/sys/tree.h,v 1.7 2007/12/28 07:03:26 jasone Exp $ */
-
-/*-
- * Copyright 2002 Niels Provos <address@hidden>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef        _SYS_TREE_H_
-#define        _SYS_TREE_H_
-
-/*
- * This file defines data structures for different types of trees:
- * splay trees and red-black trees.
- *
- * A splay tree is a self-organizing data structure.  Every operation
- * on the tree causes a splay to happen.  The splay moves the requested
- * node to the root of the tree and partly rebalances it.
- *
- * This has the benefit that request locality causes faster lookups as
- * the requested nodes move to the top of the tree.  On the other hand,
- * every lookup causes memory writes.
- *
- * The Balance Theorem bounds the total access time for m operations
- * and n inserts on an initially empty tree as O((m + n)lg n).  The
- * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
- *
- * A red-black tree is a binary search tree with the node color as an
- * extra attribute.  It fulfills a set of conditions:
- *     - every search path from the root to a leaf consists of the
- *       same number of black nodes,
- *     - each red node (except for the root) has a black parent,
- *     - each leaf node is black.
- *
- * Every operation on a red-black tree is bounded as O(lg n).
- * The maximum height of a red-black tree is 2lg (n+1).
- */
-
-#define SPLAY_HEAD(name, type)                                         \
-struct name {                                                          \
-       struct type *sph_root; /* root of the tree */                   \
-}
-
-#define SPLAY_INITIALIZER(root)                                                
\
-       { NULL }
-
-#define SPLAY_INIT(root) do {                                          \
-       (root)->sph_root = NULL;                                        \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_ENTRY(type)                                              \
-struct {                                                               \
-       struct type *spe_left; /* left element */                       \
-       struct type *spe_right; /* right element */                     \
-}
-
-#define SPLAY_LEFT(elm, field)         (elm)->field.spe_left
-#define SPLAY_RIGHT(elm, field)                (elm)->field.spe_right
-#define SPLAY_ROOT(head)               (head)->sph_root
-#define SPLAY_EMPTY(head)              (SPLAY_ROOT(head) == NULL)
-
-/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
-#define SPLAY_ROTATE_RIGHT(head, tmp, field) do {                      \
-       SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field);  \
-       SPLAY_RIGHT(tmp, field) = (head)->sph_root;                     \
-       (head)->sph_root = tmp;                                         \
-} while (/*CONSTCOND*/ 0)
-       
-#define SPLAY_ROTATE_LEFT(head, tmp, field) do {                       \
-       SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field);  \
-       SPLAY_LEFT(tmp, field) = (head)->sph_root;                      \
-       (head)->sph_root = tmp;                                         \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_LINKLEFT(head, tmp, field) do {                          \
-       SPLAY_LEFT(tmp, field) = (head)->sph_root;                      \
-       tmp = (head)->sph_root;                                         \
-       (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);         \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_LINKRIGHT(head, tmp, field) do {                         \
-       SPLAY_RIGHT(tmp, field) = (head)->sph_root;                     \
-       tmp = (head)->sph_root;                                         \
-       (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);        \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_ASSEMBLE(head, node, left, right, field) do {            \
-       SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
-       SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
-       SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
-       SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
-} while (/*CONSTCOND*/ 0)
-
-/* Generates prototypes and inline functions */
-
-#define SPLAY_PROTOTYPE(name, type, field, cmp)                                
\
-void name##_SPLAY(struct name *, struct type *);                       \
-void name##_SPLAY_MINMAX(struct name *, int);                          \
-struct type *name##_SPLAY_INSERT(struct name *, struct type *);                
\
-struct type *name##_SPLAY_REMOVE(struct name *, struct type *);                
\
-                                                                       \
-/* Finds the node with the same key as elm */                          \
-static __inline struct type *                                          \
-name##_SPLAY_FIND(struct name *head, struct type *elm)                 \
-{                                                                      \
-       if (SPLAY_EMPTY(head))                                          \
-               return(NULL);                                           \
-       name##_SPLAY(head, elm);                                        \
-       if ((cmp)(elm, (head)->sph_root) == 0)                          \
-               return (head->sph_root);                                \
-       return (NULL);                                                  \
-}                                                                      \
-                                                                       \
-static __inline struct type *                                          \
-name##_SPLAY_NEXT(struct name *head, struct type *elm)                 \
-{                                                                      \
-       name##_SPLAY(head, elm);                                        \
-       if (SPLAY_RIGHT(elm, field) != NULL) {                          \
-               elm = SPLAY_RIGHT(elm, field);                          \
-               while (SPLAY_LEFT(elm, field) != NULL) {                \
-                       elm = SPLAY_LEFT(elm, field);                   \
-               }                                                       \
-       } else                                                          \
-               elm = NULL;                                             \
-       return (elm);                                                   \
-}                                                                      \
-                                                                       \
-static __inline struct type *                                          \
-name##_SPLAY_MIN_MAX(struct name *head, int val)                       \
-{                                                                      \
-       name##_SPLAY_MINMAX(head, val);                                 \
-        return (SPLAY_ROOT(head));                                     \
-}
-
-/* Main splay operation.
- * Moves node close to the key of elm to top
- */
-#define SPLAY_GENERATE(name, type, field, cmp)                         \
-struct type *                                                          \
-name##_SPLAY_INSERT(struct name *head, struct type *elm)               \
-{                                                                      \
-    if (SPLAY_EMPTY(head)) {                                           \
-           SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL;    \
-    } else {                                                           \
-           int __comp;                                                 \
-           name##_SPLAY(head, elm);                                    \
-           __comp = (cmp)(elm, (head)->sph_root);                      \
-           if(__comp < 0) {                                            \
-                   SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, 
field);\
-                   SPLAY_RIGHT(elm, field) = (head)->sph_root;         \
-                   SPLAY_LEFT((head)->sph_root, field) = NULL;         \
-           } else if (__comp > 0) {                                    \
-                   SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, 
field);\
-                   SPLAY_LEFT(elm, field) = (head)->sph_root;          \
-                   SPLAY_RIGHT((head)->sph_root, field) = NULL;        \
-           } else                                                      \
-                   return ((head)->sph_root);                          \
-    }                                                                  \
-    (head)->sph_root = (elm);                                          \
-    return (NULL);                                                     \
-}                                                                      \
-                                                                       \
-struct type *                                                          \
-name##_SPLAY_REMOVE(struct name *head, struct type *elm)               \
-{                                                                      \
-       struct type *__tmp;                                             \
-       if (SPLAY_EMPTY(head))                                          \
-               return (NULL);                                          \
-       name##_SPLAY(head, elm);                                        \
-       if ((cmp)(elm, (head)->sph_root) == 0) {                        \
-               if (SPLAY_LEFT((head)->sph_root, field) == NULL) {      \
-                       (head)->sph_root = SPLAY_RIGHT((head)->sph_root, 
field);\
-               } else {                                                \
-                       __tmp = SPLAY_RIGHT((head)->sph_root, field);   \
-                       (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
-                       name##_SPLAY(head, elm);                        \
-                       SPLAY_RIGHT((head)->sph_root, field) = __tmp;   \
-               }                                                       \
-               return (elm);                                           \
-       }                                                               \
-       return (NULL);                                                  \
-}                                                                      \
-                                                                       \
-void                                                                   \
-name##_SPLAY(struct name *head, struct type *elm)                      \
-{                                                                      \
-       struct type __node, *__left, *__right, *__tmp;                  \
-       int __comp;                                                     \
-\
-       SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
-       __left = __right = &__node;                                     \
-\
-       while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) {          \
-               if (__comp < 0) {                                       \
-                       __tmp = SPLAY_LEFT((head)->sph_root, field);    \
-                       if (__tmp == NULL)                              \
-                               break;                                  \
-                       if ((cmp)(elm, __tmp) < 0){                     \
-                               SPLAY_ROTATE_RIGHT(head, __tmp, field); \
-                               if (SPLAY_LEFT((head)->sph_root, field) == 
NULL)\
-                                       break;                          \
-                       }                                               \
-                       SPLAY_LINKLEFT(head, __right, field);           \
-               } else if (__comp > 0) {                                \
-                       __tmp = SPLAY_RIGHT((head)->sph_root, field);   \
-                       if (__tmp == NULL)                              \
-                               break;                                  \
-                       if ((cmp)(elm, __tmp) > 0){                     \
-                               SPLAY_ROTATE_LEFT(head, __tmp, field);  \
-                               if (SPLAY_RIGHT((head)->sph_root, field) == 
NULL)\
-                                       break;                          \
-                       }                                               \
-                       SPLAY_LINKRIGHT(head, __left, field);           \
-               }                                                       \
-       }                                                               \
-       SPLAY_ASSEMBLE(head, &__node, __left, __right, field);          \
-}                                                                      \
-                                                                       \
-/* Splay with either the minimum or the maximum element                        
\
- * Used to find minimum or maximum element in tree.                    \
- */                                                                    \
-void name##_SPLAY_MINMAX(struct name *head, int __comp) \
-{                                                                      \
-       struct type __node, *__left, *__right, *__tmp;                  \
-\
-       SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
-       __left = __right = &__node;                                     \
-\
-       while (1) {                                                     \
-               if (__comp < 0) {                                       \
-                       __tmp = SPLAY_LEFT((head)->sph_root, field);    \
-                       if (__tmp == NULL)                              \
-                               break;                                  \
-                       if (__comp < 0){                                \
-                               SPLAY_ROTATE_RIGHT(head, __tmp, field); \
-                               if (SPLAY_LEFT((head)->sph_root, field) == 
NULL)\
-                                       break;                          \
-                       }                                               \
-                       SPLAY_LINKLEFT(head, __right, field);           \
-               } else if (__comp > 0) {                                \
-                       __tmp = SPLAY_RIGHT((head)->sph_root, field);   \
-                       if (__tmp == NULL)                              \
-                               break;                                  \
-                       if (__comp > 0) {                               \
-                               SPLAY_ROTATE_LEFT(head, __tmp, field);  \
-                               if (SPLAY_RIGHT((head)->sph_root, field) == 
NULL)\
-                                       break;                          \
-                       }                                               \
-                       SPLAY_LINKRIGHT(head, __left, field);           \
-               }                                                       \
-       }                                                               \
-       SPLAY_ASSEMBLE(head, &__node, __left, __right, field);          \
-}
-
-#define SPLAY_NEGINF   -1
-#define SPLAY_INF      1
-
-#define SPLAY_INSERT(name, x, y)       name##_SPLAY_INSERT(x, y)
-#define SPLAY_REMOVE(name, x, y)       name##_SPLAY_REMOVE(x, y)
-#define SPLAY_FIND(name, x, y)         name##_SPLAY_FIND(x, y)
-#define SPLAY_NEXT(name, x, y)         name##_SPLAY_NEXT(x, y)
-#define SPLAY_MIN(name, x)             (SPLAY_EMPTY(x) ? NULL  \
-                                       : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
-#define SPLAY_MAX(name, x)             (SPLAY_EMPTY(x) ? NULL  \
-                                       : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
-
-#define SPLAY_FOREACH(x, name, head)                                   \
-       for ((x) = SPLAY_MIN(name, head);                               \
-            (x) != NULL;                                               \
-            (x) = SPLAY_NEXT(name, head, x))
-
-/* Macros that define a red-black tree */
-#define RB_HEAD(name, type)                                            \
-struct name {                                                          \
-       struct type *rbh_root; /* root of the tree */                   \
-}
-
-#define RB_INITIALIZER(root)                                           \
-       { NULL }
-
-#define RB_INIT(root) do {                                             \
-       (root)->rbh_root = NULL;                                        \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_BLACK       0
-#define RB_RED         1
-#define RB_ENTRY(type)                                                 \
-struct {                                                               \
-       struct type *rbe_left;          /* left element */              \
-       struct type *rbe_right;         /* right element */             \
-       struct type *rbe_parent;        /* parent element */            \
-       int rbe_color;                  /* node color */                \
-}
-
-#define RB_LEFT(elm, field)            (elm)->field.rbe_left
-#define RB_RIGHT(elm, field)           (elm)->field.rbe_right
-#define RB_PARENT(elm, field)          (elm)->field.rbe_parent
-#define RB_COLOR(elm, field)           (elm)->field.rbe_color
-#define RB_ROOT(head)                  (head)->rbh_root
-#define RB_EMPTY(head)                 (RB_ROOT(head) == NULL)
-
-#define RB_SET(elm, parent, field) do {                                        
\
-       RB_PARENT(elm, field) = parent;                                 \
-       RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL;              \
-       RB_COLOR(elm, field) = RB_RED;                                  \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_SET_BLACKRED(black, red, field) do {                                
\
-       RB_COLOR(black, field) = RB_BLACK;                              \
-       RB_COLOR(red, field) = RB_RED;                                  \
-} while (/*CONSTCOND*/ 0)
-
-#ifndef RB_AUGMENT
-#define RB_AUGMENT(x)  do {} while (0)
-#endif
-
-#define RB_ROTATE_LEFT(head, elm, tmp, field) do {                     \
-       (tmp) = RB_RIGHT(elm, field);                                   \
-       if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) {     \
-               RB_PARENT(RB_LEFT(tmp, field), field) = (elm);          \
-       }                                                               \
-       RB_AUGMENT(elm);                                                \
-       if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {  \
-               if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))     \
-                       RB_LEFT(RB_PARENT(elm, field), field) = (tmp);  \
-               else                                                    \
-                       RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
-       } else                                                          \
-               (head)->rbh_root = (tmp);                               \
-       RB_LEFT(tmp, field) = (elm);                                    \
-       RB_PARENT(elm, field) = (tmp);                                  \
-       RB_AUGMENT(tmp);                                                \
-       if ((RB_PARENT(tmp, field)))                                    \
-               RB_AUGMENT(RB_PARENT(tmp, field));                      \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_ROTATE_RIGHT(head, elm, tmp, field) do {                    \
-       (tmp) = RB_LEFT(elm, field);                                    \
-       if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) {     \
-               RB_PARENT(RB_RIGHT(tmp, field), field) = (elm);         \
-       }                                                               \
-       RB_AUGMENT(elm);                                                \
-       if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {  \
-               if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))     \
-                       RB_LEFT(RB_PARENT(elm, field), field) = (tmp);  \
-               else                                                    \
-                       RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
-       } else                                                          \
-               (head)->rbh_root = (tmp);                               \
-       RB_RIGHT(tmp, field) = (elm);                                   \
-       RB_PARENT(elm, field) = (tmp);                                  \
-       RB_AUGMENT(tmp);                                                \
-       if ((RB_PARENT(tmp, field)))                                    \
-               RB_AUGMENT(RB_PARENT(tmp, field));                      \
-} while (/*CONSTCOND*/ 0)
-
-/* Generates prototypes and inline functions */
-#define        RB_PROTOTYPE(name, type, field, cmp)                            
\
-       RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
-#define        RB_PROTOTYPE_STATIC(name, type, field, cmp)                     
\
-       RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
-#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr)            \
-attr void name##_RB_INSERT_COLOR(struct name *, struct type *);                
\
-attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
-attr struct type *name##_RB_REMOVE(struct name *, struct type *);      \
-attr struct type *name##_RB_INSERT(struct name *, struct type *);      \
-attr struct type *name##_RB_FIND(struct name *, struct type *);                
\
-attr struct type *name##_RB_NFIND(struct name *, struct type *);       \
-attr struct type *name##_RB_NEXT(struct type *);                       \
-attr struct type *name##_RB_PREV(struct type *);                       \
-attr struct type *name##_RB_MINMAX(struct name *, int);                        
\
-                                                                       \
-
-/* Main rb operation.
- * Moves node close to the key of elm to top
- */
-#define        RB_GENERATE(name, type, field, cmp)                             
\
-       RB_GENERATE_INTERNAL(name, type, field, cmp,)
-#define        RB_GENERATE_STATIC(name, type, field, cmp)                      
\
-       RB_GENERATE_INTERNAL(name, type, field, cmp, static)
-#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr)             \
-attr void                                                              \
-name##_RB_INSERT_COLOR(struct name *head, struct type *elm)            \
-{                                                                      \
-       struct type *parent, *gparent, *tmp;                            \
-       while ((parent = RB_PARENT(elm, field)) != NULL &&              \
-           RB_COLOR(parent, field) == RB_RED) {                        \
-               gparent = RB_PARENT(parent, field);                     \
-               if (parent == RB_LEFT(gparent, field)) {                \
-                       tmp = RB_RIGHT(gparent, field);                 \
-                       if (tmp && RB_COLOR(tmp, field) == RB_RED) {    \
-                               RB_COLOR(tmp, field) = RB_BLACK;        \
-                               RB_SET_BLACKRED(parent, gparent, field);\
-                               elm = gparent;                          \
-                               continue;                               \
-                       }                                               \
-                       if (RB_RIGHT(parent, field) == elm) {           \
-                               RB_ROTATE_LEFT(head, parent, tmp, field);\
-                               tmp = parent;                           \
-                               parent = elm;                           \
-                               elm = tmp;                              \
-                       }                                               \
-                       RB_SET_BLACKRED(parent, gparent, field);        \
-                       RB_ROTATE_RIGHT(head, gparent, tmp, field);     \
-               } else {                                                \
-                       tmp = RB_LEFT(gparent, field);                  \
-                       if (tmp && RB_COLOR(tmp, field) == RB_RED) {    \
-                               RB_COLOR(tmp, field) = RB_BLACK;        \
-                               RB_SET_BLACKRED(parent, gparent, field);\
-                               elm = gparent;                          \
-                               continue;                               \
-                       }                                               \
-                       if (RB_LEFT(parent, field) == elm) {            \
-                               RB_ROTATE_RIGHT(head, parent, tmp, field);\
-                               tmp = parent;                           \
-                               parent = elm;                           \
-                               elm = tmp;                              \
-                       }                                               \
-                       RB_SET_BLACKRED(parent, gparent, field);        \
-                       RB_ROTATE_LEFT(head, gparent, tmp, field);      \
-               }                                                       \
-       }                                                               \
-       RB_COLOR(head->rbh_root, field) = RB_BLACK;                     \
-}                                                                      \
-                                                                       \
-attr void                                                              \
-name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type 
*elm) \
-{                                                                      \
-       struct type *tmp;                                               \
-       while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) &&     \
-           elm != RB_ROOT(head)) {                                     \
-               if (RB_LEFT(parent, field) == elm) {                    \
-                       tmp = RB_RIGHT(parent, field);                  \
-                       if (RB_COLOR(tmp, field) == RB_RED) {           \
-                               RB_SET_BLACKRED(tmp, parent, field);    \
-                               RB_ROTATE_LEFT(head, parent, tmp, field);\
-                               tmp = RB_RIGHT(parent, field);          \
-                       }                                               \
-                       if ((RB_LEFT(tmp, field) == NULL ||             \
-                           RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) 
&&\
-                           (RB_RIGHT(tmp, field) == NULL ||            \
-                           RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) 
{\
-                               RB_COLOR(tmp, field) = RB_RED;          \
-                               elm = parent;                           \
-                               parent = RB_PARENT(elm, field);         \
-                       } else {                                        \
-                               if (RB_RIGHT(tmp, field) == NULL ||     \
-                                   RB_COLOR(RB_RIGHT(tmp, field), field) == 
RB_BLACK) {\
-                                       struct type *oleft;             \
-                                       if ((oleft = RB_LEFT(tmp, field)) \
-                                           != NULL)                    \
-                                               RB_COLOR(oleft, field) = 
RB_BLACK;\
-                                       RB_COLOR(tmp, field) = RB_RED;  \
-                                       RB_ROTATE_RIGHT(head, tmp, oleft, 
field);\
-                                       tmp = RB_RIGHT(parent, field);  \
-                               }                                       \
-                               RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
-                               RB_COLOR(parent, field) = RB_BLACK;     \
-                               if (RB_RIGHT(tmp, field))               \
-                                       RB_COLOR(RB_RIGHT(tmp, field), field) = 
RB_BLACK;\
-                               RB_ROTATE_LEFT(head, parent, tmp, field);\
-                               elm = RB_ROOT(head);                    \
-                               break;                                  \
-                       }                                               \
-               } else {                                                \
-                       tmp = RB_LEFT(parent, field);                   \
-                       if (RB_COLOR(tmp, field) == RB_RED) {           \
-                               RB_SET_BLACKRED(tmp, parent, field);    \
-                               RB_ROTATE_RIGHT(head, parent, tmp, field);\
-                               tmp = RB_LEFT(parent, field);           \
-                       }                                               \
-                       if ((RB_LEFT(tmp, field) == NULL ||             \
-                           RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) 
&&\
-                           (RB_RIGHT(tmp, field) == NULL ||            \
-                           RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) 
{\
-                               RB_COLOR(tmp, field) = RB_RED;          \
-                               elm = parent;                           \
-                               parent = RB_PARENT(elm, field);         \
-                       } else {                                        \
-                               if (RB_LEFT(tmp, field) == NULL ||      \
-                                   RB_COLOR(RB_LEFT(tmp, field), field) == 
RB_BLACK) {\
-                                       struct type *oright;            \
-                                       if ((oright = RB_RIGHT(tmp, field)) \
-                                           != NULL)                    \
-                                               RB_COLOR(oright, field) = 
RB_BLACK;\
-                                       RB_COLOR(tmp, field) = RB_RED;  \
-                                       RB_ROTATE_LEFT(head, tmp, oright, 
field);\
-                                       tmp = RB_LEFT(parent, field);   \
-                               }                                       \
-                               RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
-                               RB_COLOR(parent, field) = RB_BLACK;     \
-                               if (RB_LEFT(tmp, field))                \
-                                       RB_COLOR(RB_LEFT(tmp, field), field) = 
RB_BLACK;\
-                               RB_ROTATE_RIGHT(head, parent, tmp, field);\
-                               elm = RB_ROOT(head);                    \
-                               break;                                  \
-                       }                                               \
-               }                                                       \
-       }                                                               \
-       if (elm)                                                        \
-               RB_COLOR(elm, field) = RB_BLACK;                        \
-}                                                                      \
-                                                                       \
-attr struct type *                                                     \
-name##_RB_REMOVE(struct name *head, struct type *elm)                  \
-{                                                                      \
-       struct type *child, *parent, *old = elm;                        \
-       int color;                                                      \
-       if (RB_LEFT(elm, field) == NULL)                                \
-               child = RB_RIGHT(elm, field);                           \
-       else if (RB_RIGHT(elm, field) == NULL)                          \
-               child = RB_LEFT(elm, field);                            \
-       else {                                                          \
-               struct type *left;                                      \
-               elm = RB_RIGHT(elm, field);                             \
-               while ((left = RB_LEFT(elm, field)) != NULL)            \
-                       elm = left;                                     \
-               child = RB_RIGHT(elm, field);                           \
-               parent = RB_PARENT(elm, field);                         \
-               color = RB_COLOR(elm, field);                           \
-               if (child)                                              \
-                       RB_PARENT(child, field) = parent;               \
-               if (parent) {                                           \
-                       if (RB_LEFT(parent, field) == elm)              \
-                               RB_LEFT(parent, field) = child;         \
-                       else                                            \
-                               RB_RIGHT(parent, field) = child;        \
-                       RB_AUGMENT(parent);                             \
-               } else                                                  \
-                       RB_ROOT(head) = child;                          \
-               if (RB_PARENT(elm, field) == old)                       \
-                       parent = elm;                                   \
-               (elm)->field = (old)->field;                            \
-               if (RB_PARENT(old, field)) {                            \
-                       if (RB_LEFT(RB_PARENT(old, field), field) == old)\
-                               RB_LEFT(RB_PARENT(old, field), field) = elm;\
-                       else                                            \
-                               RB_RIGHT(RB_PARENT(old, field), field) = elm;\
-                       RB_AUGMENT(RB_PARENT(old, field));              \
-               } else                                                  \
-                       RB_ROOT(head) = elm;                            \
-               RB_PARENT(RB_LEFT(old, field), field) = elm;            \
-               if (RB_RIGHT(old, field))                               \
-                       RB_PARENT(RB_RIGHT(old, field), field) = elm;   \
-               if (parent) {                                           \
-                       left = parent;                                  \
-                       do {                                            \
-                               RB_AUGMENT(left);                       \
-                       } while ((left = RB_PARENT(left, field)) != NULL); \
-               }                                                       \
-               goto color;                                             \
-       }                                                               \
-       parent = RB_PARENT(elm, field);                                 \
-       color = RB_COLOR(elm, field);                                   \
-       if (child)                                                      \
-               RB_PARENT(child, field) = parent;                       \
-       if (parent) {                                                   \
-               if (RB_LEFT(parent, field) == elm)                      \
-                       RB_LEFT(parent, field) = child;                 \
-               else                                                    \
-                       RB_RIGHT(parent, field) = child;                \
-               RB_AUGMENT(parent);                                     \
-       } else                                                          \
-               RB_ROOT(head) = child;                                  \
-color:                                                                 \
-       if (color == RB_BLACK)                                          \
-               name##_RB_REMOVE_COLOR(head, parent, child);            \
-       return (old);                                                   \
-}                                                                      \
-                                                                       \
-/* Inserts a node into the RB tree */                                  \
-attr struct type *                                                     \
-name##_RB_INSERT(struct name *head, struct type *elm)                  \
-{                                                                      \
-       struct type *tmp;                                               \
-       struct type *parent = NULL;                                     \
-       int comp = 0;                                                   \
-       tmp = RB_ROOT(head);                                            \
-       while (tmp) {                                                   \
-               parent = tmp;                                           \
-               comp = (cmp)(elm, parent);                              \
-               if (comp < 0)                                           \
-                       tmp = RB_LEFT(tmp, field);                      \
-               else if (comp > 0)                                      \
-                       tmp = RB_RIGHT(tmp, field);                     \
-               else                                                    \
-                       return (tmp);                                   \
-       }                                                               \
-       RB_SET(elm, parent, field);                                     \
-       if (parent != NULL) {                                           \
-               if (comp < 0)                                           \
-                       RB_LEFT(parent, field) = elm;                   \
-               else                                                    \
-                       RB_RIGHT(parent, field) = elm;                  \
-               RB_AUGMENT(parent);                                     \
-       } else                                                          \
-               RB_ROOT(head) = elm;                                    \
-       name##_RB_INSERT_COLOR(head, elm);                              \
-       return (NULL);                                                  \
-}                                                                      \
-                                                                       \
-/* Finds the node with the same key as elm */                          \
-attr struct type *                                                     \
-name##_RB_FIND(struct name *head, struct type *elm)                    \
-{                                                                      \
-       struct type *tmp = RB_ROOT(head);                               \
-       int comp;                                                       \
-       while (tmp) {                                                   \
-               comp = cmp(elm, tmp);                                   \
-               if (comp < 0)                                           \
-                       tmp = RB_LEFT(tmp, field);                      \
-               else if (comp > 0)                                      \
-                       tmp = RB_RIGHT(tmp, field);                     \
-               else                                                    \
-                       return (tmp);                                   \
-       }                                                               \
-       return (NULL);                                                  \
-}                                                                      \
-                                                                       \
-/* Finds the first node greater than or equal to the search key */     \
-attr struct type *                                                     \
-name##_RB_NFIND(struct name *head, struct type *elm)                   \
-{                                                                      \
-       struct type *tmp = RB_ROOT(head);                               \
-       struct type *res = NULL;                                        \
-       int comp;                                                       \
-       while (tmp) {                                                   \
-               comp = cmp(elm, tmp);                                   \
-               if (comp < 0) {                                         \
-                       res = tmp;                                      \
-                       tmp = RB_LEFT(tmp, field);                      \
-               }                                                       \
-               else if (comp > 0)                                      \
-                       tmp = RB_RIGHT(tmp, field);                     \
-               else                                                    \
-                       return (tmp);                                   \
-       }                                                               \
-       return (res);                                                   \
-}                                                                      \
-                                                                       \
-/* ARGSUSED */                                                         \
-attr struct type *                                                     \
-name##_RB_NEXT(struct type *elm)                                       \
-{                                                                      \
-       if (RB_RIGHT(elm, field)) {                                     \
-               elm = RB_RIGHT(elm, field);                             \
-               while (RB_LEFT(elm, field))                             \
-                       elm = RB_LEFT(elm, field);                      \
-       } else {                                                        \
-               if (RB_PARENT(elm, field) &&                            \
-                   (elm == RB_LEFT(RB_PARENT(elm, field), field)))     \
-                       elm = RB_PARENT(elm, field);                    \
-               else {                                                  \
-                       while (RB_PARENT(elm, field) &&                 \
-                           (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
-                               elm = RB_PARENT(elm, field);            \
-                       elm = RB_PARENT(elm, field);                    \
-               }                                                       \
-       }                                                               \
-       return (elm);                                                   \
-}                                                                      \
-                                                                       \
-/* ARGSUSED */                                                         \
-attr struct type *                                                     \
-name##_RB_PREV(struct type *elm)                                       \
-{                                                                      \
-       if (RB_LEFT(elm, field)) {                                      \
-               elm = RB_LEFT(elm, field);                              \
-               while (RB_RIGHT(elm, field))                            \
-                       elm = RB_RIGHT(elm, field);                     \
-       } else {                                                        \
-               if (RB_PARENT(elm, field) &&                            \
-                   (elm == RB_RIGHT(RB_PARENT(elm, field), field)))    \
-                       elm = RB_PARENT(elm, field);                    \
-               else {                                                  \
-                       while (RB_PARENT(elm, field) &&                 \
-                           (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
-                               elm = RB_PARENT(elm, field);            \
-                       elm = RB_PARENT(elm, field);                    \
-               }                                                       \
-       }                                                               \
-       return (elm);                                                   \
-}                                                                      \
-                                                                       \
-attr struct type *                                                     \
-name##_RB_MINMAX(struct name *head, int val)                           \
-{                                                                      \
-       struct type *tmp = RB_ROOT(head);                               \
-       struct type *parent = NULL;                                     \
-       while (tmp) {                                                   \
-               parent = tmp;                                           \
-               if (val < 0)                                            \
-                       tmp = RB_LEFT(tmp, field);                      \
-               else                                                    \
-                       tmp = RB_RIGHT(tmp, field);                     \
-       }                                                               \
-       return (parent);                                                \
-}
-
-#define RB_NEGINF      -1
-#define RB_INF 1
-
-#define RB_INSERT(name, x, y)  name##_RB_INSERT(x, y)
-#define RB_REMOVE(name, x, y)  name##_RB_REMOVE(x, y)
-#define RB_FIND(name, x, y)    name##_RB_FIND(x, y)
-#define RB_NFIND(name, x, y)   name##_RB_NFIND(x, y)
-#define RB_NEXT(name, x, y)    name##_RB_NEXT(y)
-#define RB_PREV(name, x, y)    name##_RB_PREV(y)
-#define RB_MIN(name, x)                name##_RB_MINMAX(x, RB_NEGINF)
-#define RB_MAX(name, x)                name##_RB_MINMAX(x, RB_INF)
-
-#define RB_FOREACH(x, name, head)                                      \
-       for ((x) = RB_MIN(name, head);                                  \
-            (x) != NULL;                                               \
-            (x) = name##_RB_NEXT(x))
-
-#define RB_FOREACH_REVERSE(x, name, head)                              \
-       for ((x) = RB_MAX(name, head);                                  \
-            (x) != NULL;                                               \
-            (x) = name##_RB_PREV(x))
-
-#endif /* _SYS_TREE_H_ */

-----------------------------------------------------------------------

Summary of changes:
 configure.ac             |   20 +
 libbase/Makefile.am      |    4 +-
 libbase/jemalloc.c       | 3984 +++++++++++++++++++++++++---------------------
 libbase/jemalloc.h       |   76 +
 libbase/jemalloc_gnash.h |   95 ++
 libbase/jemalloc_rb.h    |  982 ++++++++++++
 libbase/jemalloc_types.h |   84 +
 libbase/jemtree.h        |  743 ---------
 8 files changed, 3396 insertions(+), 2592 deletions(-)
 create mode 100644 libbase/jemalloc.h
 create mode 100644 libbase/jemalloc_gnash.h
 create mode 100644 libbase/jemalloc_rb.h
 create mode 100644 libbase/jemalloc_types.h
 delete mode 100644 libbase/jemtree.h


hooks/post-receive
-- 
Gnash



reply via email to

[Prev in Thread] Current Thread [Next in Thread]