emacs-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: address@hidden: Re: emacs-22.1 with GTK dumps core when Gnome wigets


From: YAMAMOTO Mitsuharu
Subject: Re: address@hidden: Re: emacs-22.1 with GTK dumps core when Gnome wigets clicked]
Date: Mon, 18 Jun 2007 10:11:42 +0900
User-agent: Wanderlust/2.14.0 (Africa) SEMI/1.14.6 (Maruoka) FLIM/1.14.8 (Shijō) APEL/10.6 Emacs/22.1.50 (sparc-sun-solaris2.8) MULE/5.0 (SAKAKI)

>>>>> On Sun, 17 Jun 2007 17:49:28 -0400, Richard Stallman <address@hidden> 
>>>>> said:

> Would someone else (other that Mitsuharu) please study this patch to
> check whether it is really correct?  Things like this can be tricky.

Please take a look at the following one instead.  It tries to fix the
following problems of gmalloc.c with HAVE_GTK_AND_PTHREAD in Emacs 22.1.

* HAVE_GTK_AND_PTHREAD was checked before including config.h.
* malloc_initialize_1 called pthread_mutexattr_init that may call malloc.
* _aligned_blocks was not protected.
* __malloc_hook etc. may be modified between its NULL-check and the use.

                                     YAMAMOTO Mitsuharu
                                address@hidden

Index: src/gmalloc.c
===================================================================
RCS file: /cvsroot/emacs/emacs/src/gmalloc.c,v
retrieving revision 1.21
diff -c -p -r1.21 gmalloc.c
*** src/gmalloc.c       28 Mar 2007 08:16:05 -0000      1.21
--- src/gmalloc.c       18 Jun 2007 01:00:00 -0000
***************
*** 1,9 ****
  /* This file is no longer automatically generated from libc.  */
  
  #define _MALLOC_INTERNAL
- #ifdef HAVE_GTK_AND_PTHREAD
- #define USE_PTHREAD
- #endif
  
  /* The malloc headers and source files from the C library follow here.  */
  
--- 1,6 ----
*************** Fifth Floor, Boston, MA 02110-1301, USA.
*** 40,45 ****
--- 37,46 ----
  #include <config.h>
  #endif
  
+ #ifdef HAVE_GTK_AND_PTHREAD
+ #define USE_PTHREAD
+ #endif
+ 
  #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
        || defined STDC_HEADERS || defined PROTOTYPES) \
       && ! defined (BROKEN_PROTOTYPES))
*************** extern __malloc_size_t _bytes_free;
*** 235,248 ****
  extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
  extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
  extern void _free_internal PP ((__ptr_t __ptr));
  
  #ifdef USE_PTHREAD
! extern pthread_mutex_t _malloc_mutex;
  #define LOCK()     pthread_mutex_lock (&_malloc_mutex)
  #define UNLOCK()   pthread_mutex_unlock (&_malloc_mutex)
  #else
  #define LOCK()
  #define UNLOCK()
  #endif
  
  #endif /* _MALLOC_INTERNAL.  */
--- 236,256 ----
  extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
  extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
  extern void _free_internal PP ((__ptr_t __ptr));
+ extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
+ extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t 
__size));
+ extern void _free_internal_nolock PP ((__ptr_t __ptr));
  
  #ifdef USE_PTHREAD
! extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
  #define LOCK()     pthread_mutex_lock (&_malloc_mutex)
  #define UNLOCK()   pthread_mutex_unlock (&_malloc_mutex)
+ #define LOCK_ALIGNED_BLOCKS()     pthread_mutex_lock (&_aligned_blocks_mutex)
+ #define UNLOCK_ALIGNED_BLOCKS()   pthread_mutex_unlock 
(&_aligned_blocks_mutex)
  #else
  #define LOCK()
  #define UNLOCK()
+ #define LOCK_ALIGNED_BLOCKS()
+ #define UNLOCK_ALIGNED_BLOCKS()
  #endif
  
  #endif /* _MALLOC_INTERNAL.  */
*************** register_heapinfo ()
*** 554,560 ****
  
  #ifdef USE_PTHREAD
  static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
! pthread_mutex_t _malloc_mutex;
  #endif
  
  static void
--- 562,569 ----
  
  #ifdef USE_PTHREAD
  static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
! pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
! pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
  #endif
  
  static void
*************** malloc_initialize_1 ()
*** 567,573 ****
    if (__malloc_initialize_hook)
      (*__malloc_initialize_hook) ();
  
! #ifdef USE_PTHREAD
    {
      pthread_mutexattr_t attr;
  
--- 576,584 ----
    if (__malloc_initialize_hook)
      (*__malloc_initialize_hook) ();
  
!   /* We don't use recursive mutex because pthread_mutexattr_init may
!      call malloc internally.  */
! #if 0 /* defined (USE_PTHREAD) */
    {
      pthread_mutexattr_t attr;
  
*************** static int morecore_recursing;
*** 616,624 ****
  
  /* Get neatly aligned memory, initializing or
     growing the heap info table as necessary. */
! static __ptr_t morecore PP ((__malloc_size_t));
  static __ptr_t
! morecore (size)
       __malloc_size_t size;
  {
    __ptr_t result;
--- 627,635 ----
  
  /* Get neatly aligned memory, initializing or
     growing the heap info table as necessary. */
! static __ptr_t morecore_nolock PP ((__malloc_size_t));
  static __ptr_t
! morecore_nolock (size)
       __malloc_size_t size;
  {
    __ptr_t result;
*************** morecore (size)
*** 661,667 ****
             `morecore_recursing' flag and return null.  */
          int save = errno;     /* Don't want to clobber errno with ENOMEM.  */
          morecore_recursing = 1;
!         newinfo = (malloc_info *) _realloc_internal
            (_heapinfo, newsize * sizeof (malloc_info));
          morecore_recursing = 0;
          if (newinfo == NULL)
--- 672,678 ----
             `morecore_recursing' flag and return null.  */
          int save = errno;     /* Don't want to clobber errno with ENOMEM.  */
          morecore_recursing = 1;
!         newinfo = (malloc_info *) _realloc_internal_nolock
            (_heapinfo, newsize * sizeof (malloc_info));
          morecore_recursing = 0;
          if (newinfo == NULL)
*************** morecore (size)
*** 717,723 ****
        /* Reset _heaplimit so _free_internal never decides
         it can relocate or resize the info table.  */
        _heaplimit = 0;
!       _free_internal (oldinfo);
        PROTECT_MALLOC_STATE (0);
  
        /* The new heap limit includes the new table just allocated.  */
--- 728,734 ----
        /* Reset _heaplimit so _free_internal never decides
         it can relocate or resize the info table.  */
        _heaplimit = 0;
!       _free_internal_nolock (oldinfo);
        PROTECT_MALLOC_STATE (0);
  
        /* The new heap limit includes the new table just allocated.  */
*************** morecore (size)
*** 732,738 ****
  
  /* Allocate memory from the heap.  */
  __ptr_t
! _malloc_internal (size)
       __malloc_size_t size;
  {
    __ptr_t result;
--- 743,749 ----
  
  /* Allocate memory from the heap.  */
  __ptr_t
! _malloc_internal_nolock (size)
       __malloc_size_t size;
  {
    __ptr_t result;
*************** _malloc_internal (size)
*** 752,758 ****
      return NULL;
  #endif
  
-   LOCK ();
    PROTECT_MALLOC_STATE (0);
  
    if (size < sizeof (struct list))
--- 763,768 ----
*************** _malloc_internal (size)
*** 802,809 ****
          /* No free fragments of the desired size, so get a new block
             and break it into fragments, returning the first.  */
  #ifdef GC_MALLOC_CHECK
!         result = _malloc_internal (BLOCKSIZE);
          PROTECT_MALLOC_STATE (0);
  #else
          result = malloc (BLOCKSIZE);
  #endif
--- 812,821 ----
          /* No free fragments of the desired size, so get a new block
             and break it into fragments, returning the first.  */
  #ifdef GC_MALLOC_CHECK
!         result = _malloc_internal_nolock (BLOCKSIZE);
          PROTECT_MALLOC_STATE (0);
+ #elif defined (USE_PTHREAD)
+         result = _malloc_internal_nolock (BLOCKSIZE);
  #else
          result = malloc (BLOCKSIZE);
  #endif
*************** _malloc_internal (size)
*** 874,880 ****
                  _heaplimit += wantblocks - lastblocks;
                  continue;
                }
!             result = morecore (wantblocks * BLOCKSIZE);
              if (result == NULL)
                goto out;
              block = BLOCK (result);
--- 886,892 ----
                  _heaplimit += wantblocks - lastblocks;
                  continue;
                }
!             result = morecore_nolock (wantblocks * BLOCKSIZE);
              if (result == NULL)
                goto out;
              block = BLOCK (result);
*************** _malloc_internal (size)
*** 932,938 ****
--- 944,962 ----
  
    PROTECT_MALLOC_STATE (1);
   out:
+   return result;
+ }
+ 
+ __ptr_t
+ _malloc_internal (size)
+      __malloc_size_t size;
+ {
+   __ptr_t result;
+ 
+   LOCK ();
+   result = _malloc_internal_nolock (size);
    UNLOCK ();
+ 
    return result;
  }
  
*************** __ptr_t
*** 940,949 ****
  malloc (size)
       __malloc_size_t size;
  {
    if (!__malloc_initialized && !__malloc_initialize ())
      return NULL;
  
!   return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
  }
  
  #ifndef _LIBC
--- 964,979 ----
  malloc (size)
       __malloc_size_t size;
  {
+   __ptr_t (*hook) (__malloc_size_t);
+ 
    if (!__malloc_initialized && !__malloc_initialize ())
      return NULL;
  
!   /* Copy the value of __malloc_hook to an automatic variable in case
!      __malloc_hook is modified in another thread between its
!      NULL-check and the use.  */
!   hook = __malloc_hook;
!   return (hook != NULL ? *hook : _malloc_internal) (size);
  }
  
  #ifndef _LIBC
*************** void (*__free_hook) PP ((__ptr_t __ptr))
*** 1024,1032 ****
  struct alignlist *_aligned_blocks = NULL;
  
  /* Return memory to the heap.
!    Like `free' but don't call a __free_hook if there is one.  */
  void
! _free_internal (ptr)
       __ptr_t ptr;
  {
    int type;
--- 1054,1062 ----
  struct alignlist *_aligned_blocks = NULL;
  
  /* Return memory to the heap.
!    Like `_free_internal' but don't lock mutex.  */
  void
! _free_internal_nolock (ptr)
       __ptr_t ptr;
  {
    int type;
*************** _free_internal (ptr)
*** 1043,1051 ****
    if (ptr == NULL)
      return;
  
-   LOCK ();
    PROTECT_MALLOC_STATE (0);
  
    for (l = _aligned_blocks; l != NULL; l = l->next)
      if (l->aligned == ptr)
        {
--- 1073,1081 ----
    if (ptr == NULL)
      return;
  
    PROTECT_MALLOC_STATE (0);
  
+   LOCK_ALIGNED_BLOCKS ();
    for (l = _aligned_blocks; l != NULL; l = l->next)
      if (l->aligned == ptr)
        {
*************** _free_internal (ptr)
*** 1053,1058 ****
--- 1083,1089 ----
        ptr = l->exact;
        break;
        }
+   UNLOCK_ALIGNED_BLOCKS ();
  
    block = BLOCK (ptr);
  
*************** _free_internal (ptr)
*** 1158,1164 ****
                 table's blocks to the system before we have copied them to
                 the new location.  */
              _heaplimit = 0;
!             _free_internal (_heapinfo);
              _heaplimit = oldlimit;
  
              /* Tell malloc to search from the beginning of the heap for
--- 1189,1195 ----
                 table's blocks to the system before we have copied them to
                 the new location.  */
              _heaplimit = 0;
!             _free_internal_nolock (_heapinfo);
              _heaplimit = oldlimit;
  
              /* Tell malloc to search from the beginning of the heap for
*************** _free_internal (ptr)
*** 1166,1173 ****
              _heapindex = 0;
  
              /* Allocate new space for the info table and move its data.  */
!             newinfo = (malloc_info *) _malloc_internal (info_blocks
!                                                         * BLOCKSIZE);
              PROTECT_MALLOC_STATE (0);
              memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
              _heapinfo = newinfo;
--- 1197,1204 ----
              _heapindex = 0;
  
              /* Allocate new space for the info table and move its data.  */
!             newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
!                                                                * BLOCKSIZE);
              PROTECT_MALLOC_STATE (0);
              memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
              _heapinfo = newinfo;
*************** _free_internal (ptr)
*** 1230,1237 ****
          _chunks_free -= BLOCKSIZE >> type;
          _bytes_free -= BLOCKSIZE;
  
! #ifdef GC_MALLOC_CHECK
!         _free_internal (ADDRESS (block));
  #else
          free (ADDRESS (block));
  #endif
--- 1261,1268 ----
          _chunks_free -= BLOCKSIZE >> type;
          _bytes_free -= BLOCKSIZE;
  
! #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
!         _free_internal_nolock (ADDRESS (block));
  #else
          free (ADDRESS (block));
  #endif
*************** _free_internal (ptr)
*** 1269,1274 ****
--- 1300,1315 ----
      }
  
    PROTECT_MALLOC_STATE (1);
+ }
+ 
+ /* Return memory to the heap.
+    Like `free' but don't call a __free_hook if there is one.  */
+ void
+ _free_internal (ptr)
+      __ptr_t ptr;
+ {
+   LOCK ();
+   _free_internal_nolock (ptr);
    UNLOCK ();
  }
  
*************** FREE_RETURN_TYPE
*** 1278,1285 ****
  free (ptr)
       __ptr_t ptr;
  {
!   if (__free_hook != NULL)
!     (*__free_hook) (ptr);
    else
      _free_internal (ptr);
  }
--- 1319,1328 ----
  free (ptr)
       __ptr_t ptr;
  {
!   void (*hook) (__ptr_t) = __free_hook;
! 
!   if (hook != NULL)
!     (*hook) (ptr);
    else
      _free_internal (ptr);
  }
*************** __ptr_t (*__realloc_hook) PP ((__ptr_t _
*** 1415,1421 ****
     new region.  This module has incestuous knowledge of the
     internals of both free and malloc. */
  __ptr_t
! _realloc_internal (ptr, size)
       __ptr_t ptr;
       __malloc_size_t size;
  {
--- 1458,1464 ----
     new region.  This module has incestuous knowledge of the
     internals of both free and malloc. */
  __ptr_t
! _realloc_internal_nolock (ptr, size)
       __ptr_t ptr;
       __malloc_size_t size;
  {
*************** _realloc_internal (ptr, size)
*** 1425,1439 ****
  
    if (size == 0)
      {
!       _free_internal (ptr);
!       return _malloc_internal (0);
      }
    else if (ptr == NULL)
!     return _malloc_internal (size);
  
    block = BLOCK (ptr);
  
-   LOCK ();
    PROTECT_MALLOC_STATE (0);
  
    type = _heapinfo[block].busy.type;
--- 1468,1481 ----
  
    if (size == 0)
      {
!       _free_internal_nolock (ptr);
!       return _malloc_internal_nolock (0);
      }
    else if (ptr == NULL)
!     return _malloc_internal_nolock (size);
  
    block = BLOCK (ptr);
  
    PROTECT_MALLOC_STATE (0);
  
    type = _heapinfo[block].busy.type;
*************** _realloc_internal (ptr, size)
*** 1443,1453 ****
        /* Maybe reallocate a large block to a small fragment.  */
        if (size <= BLOCKSIZE / 2)
        {
!         result = _malloc_internal (size);
          if (result != NULL)
            {
              memcpy (result, ptr, size);
!             _free_internal (ptr);
              goto out;
            }
        }
--- 1485,1495 ----
        /* Maybe reallocate a large block to a small fragment.  */
        if (size <= BLOCKSIZE / 2)
        {
!         result = _malloc_internal_nolock (size);
          if (result != NULL)
            {
              memcpy (result, ptr, size);
!             _free_internal_nolock (ptr);
              goto out;
            }
        }
*************** _realloc_internal (ptr, size)
*** 1467,1473 ****
             Now we will free this chunk; increment the statistics counter
             so it doesn't become wrong when _free_internal decrements it.  */
          ++_chunks_used;
!         _free_internal (ADDRESS (block + blocks));
          result = ptr;
        }
        else if (blocks == _heapinfo[block].busy.info.size)
--- 1509,1515 ----
             Now we will free this chunk; increment the statistics counter
             so it doesn't become wrong when _free_internal decrements it.  */
          ++_chunks_used;
!         _free_internal_nolock (ADDRESS (block + blocks));
          result = ptr;
        }
        else if (blocks == _heapinfo[block].busy.info.size)
*************** _realloc_internal (ptr, size)
*** 1482,1489 ****
          /* Prevent free from actually returning memory to the system.  */
          oldlimit = _heaplimit;
          _heaplimit = 0;
!         _free_internal (ptr);
!         result = _malloc_internal (size);
          PROTECT_MALLOC_STATE (0);
          if (_heaplimit == 0)
            _heaplimit = oldlimit;
--- 1524,1531 ----
          /* Prevent free from actually returning memory to the system.  */
          oldlimit = _heaplimit;
          _heaplimit = 0;
!         _free_internal_nolock (ptr);
!         result = _malloc_internal_nolock (size);
          PROTECT_MALLOC_STATE (0);
          if (_heaplimit == 0)
            _heaplimit = oldlimit;
*************** _realloc_internal (ptr, size)
*** 1493,1505 ****
                 the thing we just freed.  Unfortunately it might
                 have been coalesced with its neighbors.  */
              if (_heapindex == block)
!               (void) _malloc_internal (blocks * BLOCKSIZE);
              else
                {
                  __ptr_t previous
!                   = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
!                 (void) _malloc_internal (blocks * BLOCKSIZE);
!                 _free_internal (previous);
                }
              goto out;
            }
--- 1535,1547 ----
                 the thing we just freed.  Unfortunately it might
                 have been coalesced with its neighbors.  */
              if (_heapindex == block)
!               (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
              else
                {
                  __ptr_t previous
!                   = _malloc_internal_nolock ((block - _heapindex) * 
BLOCKSIZE);
!                 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
!                 _free_internal_nolock (previous);
                }
              goto out;
            }
*************** _realloc_internal (ptr, size)
*** 1519,1536 ****
        {
          /* The new size is different; allocate a new space,
             and copy the lesser of the new size and the old. */
!         result = _malloc_internal (size);
          if (result == NULL)
            goto out;
          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
!         _free_internal (ptr);
        }
        break;
      }
  
    PROTECT_MALLOC_STATE (1);
   out:
    UNLOCK ();
    return result;
  }
  
--- 1561,1591 ----
        {
          /* The new size is different; allocate a new space,
             and copy the lesser of the new size and the old. */
!         result = _malloc_internal_nolock (size);
          if (result == NULL)
            goto out;
          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
!         _free_internal_nolock (ptr);
        }
        break;
      }
  
    PROTECT_MALLOC_STATE (1);
   out:
+   return result;
+ }
+ 
+ __ptr_t
+ _realloc_internal (ptr, size)
+      __ptr_t ptr;
+      __malloc_size_t size;
+ {
+   __ptr_t result;
+ 
+   LOCK();
+   result = _realloc_internal_nolock (ptr, size);
    UNLOCK ();
+ 
    return result;
  }
  
*************** realloc (ptr, size)
*** 1539,1549 ****
       __ptr_t ptr;
       __malloc_size_t size;
  {
    if (!__malloc_initialized && !__malloc_initialize ())
      return NULL;
  
!   return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
!     (ptr, size);
  }
  /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
  
--- 1594,1606 ----
       __ptr_t ptr;
       __malloc_size_t size;
  {
+   __ptr_t (*hook) (__ptr_t, __malloc_size_t);
+ 
    if (!__malloc_initialized && !__malloc_initialize ())
      return NULL;
  
!   hook = __realloc_hook;
!   return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
  }
  /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
  
*************** memalign (alignment, size)
*** 1681,1689 ****
  {
    __ptr_t result;
    unsigned long int adj, lastadj;
  
!   if (__memalign_hook)
!     return (*__memalign_hook) (alignment, size);
  
    /* Allocate a block with enough extra space to pad the block with up to
       (ALIGNMENT - 1) bytes if necessary.  */
--- 1738,1747 ----
  {
    __ptr_t result;
    unsigned long int adj, lastadj;
+   __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
  
!   if (hook)
!     return (*hook) (alignment, size);
  
    /* Allocate a block with enough extra space to pad the block with up to
       (ALIGNMENT - 1) bytes if necessary.  */
*************** memalign (alignment, size)
*** 1718,1723 ****
--- 1776,1782 ----
         of an allocated block.  */
  
        struct alignlist *l;
+       LOCK_ALIGNED_BLOCKS ();
        for (l = _aligned_blocks; l != NULL; l = l->next)
        if (l->aligned == NULL)
          /* This slot is free.  Use it.  */
*************** memalign (alignment, size)
*** 1725,1740 ****
        if (l == NULL)
        {
          l = (struct alignlist *) malloc (sizeof (struct alignlist));
!         if (l == NULL)
            {
!             free (result);
!             return NULL;
            }
-         l->next = _aligned_blocks;
-         _aligned_blocks = l;
        }
!       l->exact = result;
!       result = l->aligned = (char *) result + alignment - adj;
      }
  
    return result;
--- 1784,1806 ----
        if (l == NULL)
        {
          l = (struct alignlist *) malloc (sizeof (struct alignlist));
!         if (l != NULL)
            {
!             l->next = _aligned_blocks;
!             _aligned_blocks = l;
            }
        }
!       if (l != NULL)
!       {
!         l->exact = result;
!         result = l->aligned = (char *) result + alignment - adj;
!       }
!       UNLOCK_ALIGNED_BLOCKS ();
!       if (l == NULL)
!       {
!         free (result);
!         result = NULL;
!       }
      }
  
    return result;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]