Skip to content
Snippets Groups Projects
dlmalloc.c 100 KiB
Newer Older
  • Learn to ignore specific revisions
  • Wolfgang Denk's avatar
    Wolfgang Denk committed
    }
    
    void gcleanup ()
    {
    	BOOL rval;
    	assert ( (head == NULL) || (head->base == (void*)gAddressBase));
    	if (gAddressBase && (gNextAddress - gAddressBase))
    	{
    		rval = VirtualFree ((void*)gAddressBase,
    							gNextAddress - gAddressBase,
    							MEM_DECOMMIT);
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	assert (rval);
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	}
    	while (head)
    	{
    		GmListElement* next = head->next;
    		rval = VirtualFree (head->base, 0, MEM_RELEASE);
    		assert (rval);
    		LocalFree (head);
    		head = next;
    	}
    }
    
    static
    void* findRegion (void* start_address, unsigned long size)
    {
    	MEMORY_BASIC_INFORMATION info;
    	if (size >= TOP_MEMORY) return NULL;
    
    	while ((unsigned long)start_address + size < TOP_MEMORY)
    	{
    		VirtualQuery (start_address, &info, sizeof (info));
    		if ((info.State == MEM_FREE) && (info.RegionSize >= size))
    			return start_address;
    		else
    		{
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    			/* Requested region is not available so see if the */
    			/* next region is available.  Set 'start_address' */
    			/* to the next region and call 'VirtualQuery()' */
    			/* again. */
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
    			start_address = (char*)info.BaseAddress + info.RegionSize;
    
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    			/* Make sure we start looking for the next region */
    			/* on the *next* 64K boundary.  Otherwise, even if */
    			/* the new region is free according to */
    			/* 'VirtualQuery()', the subsequent call to */
    			/* 'VirtualAlloc()' (which follows the call to */
    			/* this routine in 'wsbrk()') will round *down* */
    			/* the requested address to a 64K boundary which */
    			/* we already know is an address in the */
    			/* unavailable region.  Thus, the subsequent call */
    			/* to 'VirtualAlloc()' will fail and bring us back */
    			/* here, causing us to go into an infinite loop. */
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
    			start_address =
    				(void *) AlignPage64K((unsigned long) start_address);
    		}
    	}
    	return NULL;
    
    }
    
    
    void* wsbrk (long size)
    {
    	void* tmp;
    	if (size > 0)
    	{
    		if (gAddressBase == 0)
    		{
    			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
    			gNextAddress = gAddressBase =
    				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
    											MEM_RESERVE, PAGE_NOACCESS);
    		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
    gAllocatedSize))
    		{
    			long new_size = max (NEXT_SIZE, AlignPage (size));
    			void* new_address = (void*)(gAddressBase+gAllocatedSize);
    			do
    			{
    				new_address = findRegion (new_address, new_size);
    
    				if (new_address == 0)
    					return (void*)-1;
    
    				gAddressBase = gNextAddress =
    					(unsigned int)VirtualAlloc (new_address, new_size,
    												MEM_RESERVE, PAGE_NOACCESS);
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    				/* repeat in case of race condition */
    				/* The region that we found has been snagged */
    				/* by another thread */
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    			}
    			while (gAddressBase == 0);
    
    			assert (new_address == (void*)gAddressBase);
    
    			gAllocatedSize = new_size;
    
    			if (!makeGmListElement ((void*)gAddressBase))
    				return (void*)-1;
    		}
    		if ((size + gNextAddress) > AlignPage (gNextAddress))
    		{
    			void* res;
    			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
    								(size + gNextAddress -
    								 AlignPage (gNextAddress)),
    								MEM_COMMIT, PAGE_READWRITE);
    			if (res == 0)
    				return (void*)-1;
    		}
    		tmp = (void*)gNextAddress;
    		gNextAddress = (unsigned int)tmp + size;
    		return tmp;
    	}
    	else if (size < 0)
    	{
    		unsigned int alignedGoal = AlignPage (gNextAddress + size);
    		/* Trim by releasing the virtual memory */
    		if (alignedGoal >= gAddressBase)
    		{
    			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
    						 MEM_DECOMMIT);
    			gNextAddress = gNextAddress + size;
    			return (void*)gNextAddress;
    		}
    		else
    		{
    			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
    						 MEM_DECOMMIT);
    			gNextAddress = gAddressBase;
    			return (void*)-1;
    		}
    	}
    	else
    	{
    		return (void*)gNextAddress;
    	}
    }
    
    #endif
    
    
    
    /*
      Type declarations
    */
    
    
    struct malloc_chunk
    {
      INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
      INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
      struct malloc_chunk* fd;   /* double links -- used only if free. */
      struct malloc_chunk* bk;
    };
    
    typedef struct malloc_chunk* mchunkptr;
    
    /*
    
       malloc_chunk details:
    
        (The following includes lightly edited explanations by Colin Plumb.)
    
        Chunks of memory are maintained using a `boundary tag' method as
        described in e.g., Knuth or Standish.  (See the paper by Paul
        Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
        survey of such techniques.)  Sizes of free chunks are stored both
        in the front of each chunk and at the end.  This makes
        consolidating fragmented chunks into bigger chunks very fast.  The
        size fields also hold bits representing whether chunks are free or
        in use.
    
        An allocated chunk looks like this:
    
    
        chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    |             Size of previous chunk, if allocated            | |
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    	    |             Size of chunk, in bytes                         |P|
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
          mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    |             User data starts here...                          .
    	    .                                                               .
    	    .             (malloc_usable_space() bytes)                     .
    	    .                                                               |
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    |             Size of chunk                                     |
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
    
        Where "chunk" is the front of the chunk for the purpose of most of
        the malloc code, but "mem" is the pointer that is returned to the
        user.  "Nextchunk" is the beginning of the next contiguous chunk.
    
        Chunks always begin on even word boundries, so the mem portion
        (which is returned to the user) is also on an even word boundary, and
        thus double-word aligned.
    
        Free chunks are stored in circular doubly-linked lists, and look like this:
    
        chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    |             Size of previous chunk                            |
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
        `head:' |             Size of chunk, in bytes                         |P|
          mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    |             Forward pointer to next chunk in list             |
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    	    |             Back pointer to previous chunk in list            |
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    	    |             Unused space (may be 0 bytes long)                .
    	    .                                                               .
    	    .                                                               |
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
        `foot:' |             Size of chunk, in bytes                           |
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
        The P (PREV_INUSE) bit, stored in the unused low-order bit of the
        chunk size (which is always a multiple of two words), is an in-use
        bit for the *previous* chunk.  If that bit is *clear*, then the
        word before the current chunk size contains the previous chunk
        size, and can be used to find the front of the previous chunk.
        (The very first chunk allocated always has this bit set,
        preventing access to non-existent (or non-owned) memory.)
    
        Note that the `foot' of the current chunk is actually represented
        as the prev_size of the NEXT chunk. (This makes it easier to
        deal with alignments etc).
    
        The two exceptions to all this are
    
         1. The special chunk `top', which doesn't bother using the
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	trailing size field since there is no
    	next contiguous chunk that would have to index off it. (After
    	initialization, `top' is forced to always exist.  If it would
    	become less than MINSIZE bytes long, it is replenished via
    	malloc_extend_top.)
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
         2. Chunks allocated via mmap, which have the second-lowest-order
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    	bit (IS_MMAPPED) set in their size fields.  Because they are
    	never merged or traversed from any other chunk, they have no
    	foot size or inuse information.
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    
        Available chunks are kept in any of several places (all declared below):
    
        * `av': An array of chunks serving as bin headers for consolidated
           chunks. Each bin is doubly linked.  The bins are approximately
           proportionally (log) spaced.  There are a lot of these bins
           (128). This may look excessive, but works very well in
           practice.  All procedures maintain the invariant that no
           consolidated chunk physically borders another one. Chunks in
           bins are kept in size order, with ties going to the
           approximately least recently used chunk.
    
           The chunks in each bin are maintained in decreasing sorted order by
           size.  This is irrelevant for the small bins, which all contain
           the same-sized chunks, but facilitates best-fit allocation for
           larger chunks. (These lists are just sequential. Keeping them in
           order almost never requires enough traversal to warrant using
           fancier ordered data structures.)  Chunks of the same size are
           linked with the most recently freed at the front, and allocations
           are taken from the back.  This results in LRU or FIFO allocation
           order, which tends to give each chunk an equal opportunity to be
           consolidated with adjacent freed chunks, resulting in larger free
           chunks and less fragmentation.
    
        * `top': The top-most available chunk (i.e., the one bordering the
           end of available memory) is treated specially. It is never
           included in any bin, is used only if no other chunk is
           available, and is released back to the system if it is very
           large (see M_TRIM_THRESHOLD).
    
        * `last_remainder': A bin holding only the remainder of the
           most recently split (non-top) chunk. This bin is checked
           before other non-fitting chunks, so as to provide better
           locality for runs of sequentially allocated chunks.
    
        *  Implicitly, through the host system's memory mapping tables.
           If supported, requests greater than a threshold are usually
           serviced via calls to mmap, and then later released via munmap.
    
    */
    
    /*  sizes, alignments */
    
    #define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
    #define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
    #define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
    #define MINSIZE                (sizeof(struct malloc_chunk))
    
    /* conversion from malloc headers to user pointers, and back */
    
    #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
    #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
    
    /* pad request bytes into a usable size */
    
    #define request2size(req) \
     (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
      (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
       (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
    
    /* Check if m has acceptable alignment */
    
    #define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
    
    
    
    
    /*
      Physical chunk operations
    */
    
    
    /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
    
    #define PREV_INUSE 0x1
    
    /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
    
    #define IS_MMAPPED 0x2
    
    /* Bits to mask off when extracting size */
    
    #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
    
    
    /* Ptr to next physical malloc_chunk. */
    
    #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
    
    /* Ptr to previous physical malloc_chunk */
    
    #define prev_chunk(p)\
       ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
    
    
    /* Treat space at ptr + offset as a chunk */
    
    #define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
    
    
    
    
    /*
      Dealing with use bits
    */
    
    /* extract p's inuse bit */
    
    #define inuse(p)\
    ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
    
    /* extract inuse bit of previous chunk */
    
    #define prev_inuse(p)  ((p)->size & PREV_INUSE)
    
    /* check for mmap()'ed chunk */
    
    #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
    
    /* set/clear chunk as in use without otherwise disturbing */
    
    #define set_inuse(p)\
    ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
    
    #define clear_inuse(p)\
    ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
    
    /* check/set/clear inuse bits in known places */
    
    #define inuse_bit_at_offset(p, s)\
     (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
    
    #define set_inuse_bit_at_offset(p, s)\
     (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
    
    #define clear_inuse_bit_at_offset(p, s)\
     (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
    
    
    
    
    /*
      Dealing with size fields
    */
    
    /* Get size, ignoring use bits */
    
    #define chunksize(p)          ((p)->size & ~(SIZE_BITS))
    
    /* Set size at head, without disturbing its use bit */
    
    #define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
    
    /* Set size/use ignoring previous bits in header */
    
    #define set_head(p, s)        ((p)->size = (s))
    
    /* Set size at footer (only when chunk is not in use) */
    
    #define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
    
    
    
    
    
    /*
       Bins
    
        The bins, `av_' are an array of pairs of pointers serving as the
        heads of (initially empty) doubly-linked lists of chunks, laid out
        in a way so that each pair can be treated as if it were in a
        malloc_chunk. (This way, the fd/bk offsets for linking bin heads
        and chunks are the same).
    
        Bins for sizes < 512 bytes contain chunks of all the same size, spaced
        8 bytes apart. Larger bins are approximately logarithmically
        spaced. (See the table below.) The `av_' array is never mentioned
        directly in the code, but instead via bin access macros.
    
        Bin layout:
    
        64 bins of size       8
        32 bins of size      64
        16 bins of size     512
         8 bins of size    4096
         4 bins of size   32768
         2 bins of size  262144
         1 bin  of size what's left
    
        There is actually a little bit of slop in the numbers in bin_index
        for the sake of speed. This makes no difference elsewhere.
    
        The special chunks `top' and `last_remainder' get their own bins,
        (this is implemented via yet more trickery with the av_ array),
        although `top' is never properly linked to its bin since it is
        always handled specially.
    
    */
    
    #define NAV             128   /* number of bins */
    
    typedef struct malloc_chunk* mbinptr;
    
    /* access macros */
    
    #define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
    #define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
    #define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
    
    /*
       The first 2 bins are never indexed. The corresponding av_ cells are instead
       used for bookkeeping. This is not to save space, but to simplify
       indexing, maintain locality, and avoid some initialization tests.
    */
    
    #define top            (bin_at(0)->fd)   /* The topmost chunk */
    #define last_remainder (bin_at(1))       /* remainder from last split */
    
    
    /*
       Because top initially points to its own bin with initial
       zero size, thus forcing extension on the first malloc request,
       we avoid having any special code in malloc to check whether
       it even exists yet. But we still need to in malloc_extend_top.
    */
    
    #define initial_top    ((mchunkptr)(bin_at(0)))
    
    /* Helper macro to initialize bins */
    
    #define IAV(i)  bin_at(i), bin_at(i)
    
    static mbinptr av_[NAV * 2 + 2] = {
     0, 0,
     IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
     IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
     IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
     IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
     IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
     IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
     IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
     IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
     IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
     IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
     IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
     IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
     IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
     IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
     IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
     IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
    };
    
    void malloc_bin_reloc (void)
    {
    	unsigned long *p = (unsigned long *)(&av_[2]);
    	int i;
    	for (i=2; i<(sizeof(av_)/sizeof(mbinptr)); ++i) {
    		*p++ += gd->reloc_off;
    	}
    }
    
    
    /* field-extraction macros */
    
    #define first(b) ((b)->fd)
    #define last(b)  ((b)->bk)
    
    /*
      Indexing into bins
    */
    
    #define bin_index(sz)                                                          \
    (((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
     ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
     ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
     ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
     ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
     ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    					  126)
    
    Wolfgang Denk's avatar
    Wolfgang Denk committed
    1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
    /*
      bins for chunks < 512 are all spaced 8 bytes apart, and hold
      identically sized chunks. This is exploited in malloc.
    */
    
    #define MAX_SMALLBIN         63
    #define MAX_SMALLBIN_SIZE   512
    #define SMALLBIN_WIDTH        8
    
    #define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
    
    /*
       Requests are `small' if both the corresponding and the next bin are small
    */
    
    #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
    
    
    
    /*
        To help compensate for the large number of bins, a one-level index
        structure is used for bin-by-bin searching.  `binblocks' is a
        one-word bitvector recording whether groups of BINBLOCKWIDTH bins
        have any (possibly) non-empty bins, so they can be skipped over
        all at once during during traversals. The bits are NOT always
        cleared as soon as all bins in a block are empty, but instead only
        when all are noticed to be empty during traversal in malloc.
    */
    
    #define BINBLOCKWIDTH     4   /* bins per block */
    
    #define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
    
    /* bin<->block macros */
    
    #define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))
    #define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
    #define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
    
    
    
    
    
    /*  Other static bookkeeping data */
    
    /* variables holding tunable values */
    
    static unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
    static unsigned long top_pad          = DEFAULT_TOP_PAD;
    static unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
    static unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
    
    /* The first value returned from sbrk */
    static char* sbrk_base = (char*)(-1);
    
    /* The maximum memory obtained from system via sbrk */
    static unsigned long max_sbrked_mem = 0;
    
    /* The maximum via either sbrk or mmap */
    static unsigned long max_total_mem = 0;
    
    /* internal working copy of mallinfo */
    static struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
    
    /* The total memory obtained from system via sbrk */
    #define sbrked_mem  (current_mallinfo.arena)
    
    /* Tracking mmaps */
    
    #if 0
    static unsigned int n_mmaps = 0;
    #endif	/* 0 */
    static unsigned long mmapped_mem = 0;
    #if HAVE_MMAP
    static unsigned int max_n_mmaps = 0;
    static unsigned long max_mmapped_mem = 0;
    #endif
    
    
    
    /*
      Debugging support
    */
    
    #ifdef DEBUG
    
    
    /*
      These routines make a number of assertions about the states
      of data structures that should be true at all times. If any
      are not true, it's very likely that a user program has somehow
      trashed memory. (It's also possible that there is a coding error
      in malloc. In which case, please report it!)
    */
    
    #if __STD_C
    static void do_check_chunk(mchunkptr p)
    #else
    static void do_check_chunk(p) mchunkptr p;
    #endif
    {
    #if 0	/* causes warnings because assert() is off */
      INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
    #endif	/* 0 */
    
      /* No checkable chunk is mmapped */
      assert(!chunk_is_mmapped(p));
    
      /* Check for legal address ... */
      assert((char*)p >= sbrk_base);
      if (p != top)
        assert((char*)p + sz <= (char*)top);
      else
        assert((char*)p + sz <= sbrk_base + sbrked_mem);
    
    }
    
    
    #if __STD_C
    static void do_check_free_chunk(mchunkptr p)
    #else
    static void do_check_free_chunk(p) mchunkptr p;
    #endif
    {
      INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
    #if 0	/* causes warnings because assert() is off */
      mchunkptr next = chunk_at_offset(p, sz);
    #endif	/* 0 */
    
      do_check_chunk(p);
    
      /* Check whether it claims to be free ... */
      assert(!inuse(p));
    
      /* Unless a special marker, must have OK fields */
      if ((long)sz >= (long)MINSIZE)
      {
        assert((sz & MALLOC_ALIGN_MASK) == 0);
        assert(aligned_OK(chunk2mem(p)));
        /* ... matching footer field */
        assert(next->prev_size == sz);
        /* ... and is fully consolidated */
        assert(prev_inuse(p));
        assert (next == top || inuse(next));
    
        /* ... and has minimally sane links */
        assert(p->fd->bk == p);
        assert(p->bk->fd == p);
      }
      else /* markers are always of size SIZE_SZ */
        assert(sz == SIZE_SZ);
    }
    
    #if __STD_C
    static void do_check_inuse_chunk(mchunkptr p)
    #else
    static void do_check_inuse_chunk(p) mchunkptr p;
    #endif
    {
      mchunkptr next = next_chunk(p);
      do_check_chunk(p);
    
      /* Check whether it claims to be in use ... */
      assert(inuse(p));
    
      /* ... and is surrounded by OK chunks.
        Since more things can be checked with free chunks than inuse ones,
        if an inuse chunk borders them and debug is on, it's worth doing them.
      */
      if (!prev_inuse(p))
      {
        mchunkptr prv = prev_chunk(p);
        assert(next_chunk(prv) == p);
        do_check_free_chunk(prv);
      }
      if (next == top)
      {
        assert(prev_inuse(next));
        assert(chunksize(next) >= MINSIZE);
      }
      else if (!inuse(next))
        do_check_free_chunk(next);
    
    }
    
    #if __STD_C
    static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
    #else
    static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
    #endif
    {
    #if 0	/* causes warnings because assert() is off */
      INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
      long room = sz - s;
    #endif	/* 0 */
    
      do_check_inuse_chunk(p);
    
      /* Legal size ... */
      assert((long)sz >= (long)MINSIZE);
      assert((sz & MALLOC_ALIGN_MASK) == 0);
      assert(room >= 0);
      assert(room < (long)MINSIZE);
    
      /* ... and alignment */
      assert(aligned_OK(chunk2mem(p)));
    
    
      /* ... and was allocated at front of an available chunk */
      assert(prev_inuse(p));
    
    }
    
    
    #define check_free_chunk(P)  do_check_free_chunk(P)
    #define check_inuse_chunk(P) do_check_inuse_chunk(P)
    #define check_chunk(P) do_check_chunk(P)
    #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
    #else
    #define check_free_chunk(P)
    #define check_inuse_chunk(P)
    #define check_chunk(P)
    #define check_malloced_chunk(P,N)
    #endif
    
    
    
    /*
      Macro-based internal utilities
    */
    
    
    /*
      Linking chunks in bin lists.
      Call these only with variables, not arbitrary expressions, as arguments.
    */
    
    /*
      Place chunk p of size s in its bin, in size order,
      putting it ahead of others of same size.
    */
    
    
    #define frontlink(P, S, IDX, BK, FD)                                          \
    {                                                                             \
      if (S < MAX_SMALLBIN_SIZE)                                                  \
      {                                                                           \
        IDX = smallbin_index(S);                                                  \
        mark_binblock(IDX);                                                       \
        BK = bin_at(IDX);                                                         \
        FD = BK->fd;                                                              \
        P->bk = BK;                                                               \
        P->fd = FD;                                                               \
        FD->bk = BK->fd = P;                                                      \
      }                                                                           \
      else                                                                        \
      {                                                                           \
        IDX = bin_index(S);                                                       \
        BK = bin_at(IDX);                                                         \
        FD = BK->fd;                                                              \
        if (FD == BK) mark_binblock(IDX);                                         \
        else                                                                      \
        {                                                                         \
          while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
          BK = FD->bk;                                                            \
        }                                                                         \
        P->bk = BK;                                                               \
        P->fd = FD;                                                               \
        FD->bk = BK->fd = P;                                                      \
      }                                                                           \
    }
    
    
    /* take a chunk off a list */
    
    #define unlink(P, BK, FD)                                                     \
    {                                                                             \
      BK = P->bk;                                                                 \
      FD = P->fd;                                                                 \
      FD->bk = BK;                                                                \
      BK->fd = FD;                                                                \
    }                                                                             \
    
    /* Place p as the last remainder */
    
    #define link_last_remainder(P)                                                \
    {                                                                             \
      last_remainder->fd = last_remainder->bk =  P;                               \
      P->fd = P->bk = last_remainder;                                             \
    }
    
    /* Clear the last_remainder bin */
    
    #define clear_last_remainder \
      (last_remainder->fd = last_remainder->bk = last_remainder)
    
    
    
    
    
    /* Routines dealing with mmap(). */
    
    #if HAVE_MMAP
    
    #if __STD_C
    static mchunkptr mmap_chunk(size_t size)
    #else
    static mchunkptr mmap_chunk(size) size_t size;
    #endif
    {
      size_t page_mask = malloc_getpagesize - 1;
      mchunkptr p;
    
    #ifndef MAP_ANONYMOUS
      static int fd = -1;
    #endif
    
      if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
    
      /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
       * there is no following chunk whose prev_size field could be used.
       */
      size = (size + SIZE_SZ + page_mask) & ~page_mask;
    
    #ifdef MAP_ANONYMOUS
      p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
    		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
    #else /* !MAP_ANONYMOUS */
      if (fd < 0)
      {
        fd = open("/dev/zero", O_RDWR);
        if(fd < 0) return 0;
      }
      p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
    #endif
    
      if(p == (mchunkptr)-1) return 0;
    
      n_mmaps++;
      if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
    
      /* We demand that eight bytes into a page must be 8-byte aligned. */
      assert(aligned_OK(chunk2mem(p)));
    
      /* The offset to the start of the mmapped region is stored
       * in the prev_size field of the chunk; normally it is zero,
       * but that can be changed in memalign().
       */
      p->prev_size = 0;
      set_head(p, size|IS_MMAPPED);
    
      mmapped_mem += size;
      if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
        max_mmapped_mem = mmapped_mem;
      if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
        max_total_mem = mmapped_mem + sbrked_mem;
      return p;
    }
    
    #if __STD_C
    static void munmap_chunk(mchunkptr p)
    #else
    static void munmap_chunk(p) mchunkptr p;
    #endif
    {
      INTERNAL_SIZE_T size = chunksize(p);
      int ret;
    
      assert (chunk_is_mmapped(p));
      assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
      assert((n_mmaps > 0));
      assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
    
      n_mmaps--;
      mmapped_mem -= (size + p->prev_size);
    
      ret = munmap((char *)p - p->prev_size, size + p->prev_size);
    
      /* munmap returns non-zero on failure */
      assert(ret == 0);
    }
    
    #if HAVE_MREMAP
    
    #if __STD_C
    static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
    #else
    static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
    #endif
    {
      size_t page_mask = malloc_getpagesize - 1;
      INTERNAL_SIZE_T offset = p->prev_size;
      INTERNAL_SIZE_T size = chunksize(p);
      char *cp;
    
      assert (chunk_is_mmapped(p));
      assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
      assert((n_mmaps > 0));
      assert(((size + offset) & (malloc_getpagesize-1)) == 0);
    
      /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
      new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
    
      cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
    
      if (cp == (char *)-1) return 0;
    
      p = (mchunkptr)(cp + offset);
    
      assert(aligned_OK(chunk2mem(p)));
    
      assert((p->prev_size == offset));
      set_head(p, (new_size - offset)|IS_MMAPPED);
    
      mmapped_mem -= size + offset;
      mmapped_mem += new_size;
      if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
        max_mmapped_mem = mmapped_mem;
      if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
        max_total_mem = mmapped_mem + sbrked_mem;
      return p;
    }
    
    #endif /* HAVE_MREMAP */
    
    #endif /* HAVE_MMAP */
    
    
    
    
    /*
      Extend the top-most chunk by obtaining memory from system.
      Main interface to sbrk (but see also malloc_trim).
    */
    
    #if __STD_C
    static void malloc_extend_top(INTERNAL_SIZE_T nb)
    #else
    static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
    #endif
    {
      char*     brk;                  /* return value from sbrk */
      INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
      INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
      char*     new_brk;              /* return of 2nd sbrk call */
      INTERNAL_SIZE_T top_size;       /* new size of top chunk */
    
      mchunkptr old_top     = top;  /* Record state of old top */
      INTERNAL_SIZE_T old_top_size = chunksize(old_top);
      char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
    
      /* Pad request with top_pad plus minimal overhead */
    
      INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
      unsigned long pagesz    = malloc_getpagesize;
    
      /* If not the first time through, round to preserve page boundary */
      /* Otherwise, we need to correct to a page size below anyway. */
      /* (We also correct below if an intervening foreign sbrk call.) */
    
      if (sbrk_base != (char*)(-1))
        sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
    
      brk = (char*)(MORECORE (sbrk_size));
    
      /* Fail if sbrk failed or if a foreign sbrk call killed our space */
      if (brk == (char*)(MORECORE_FAILURE) ||
          (brk < old_end && old_top != initial_top))
        return;
    
      sbrked_mem += sbrk_size;
    
      if (brk == old_end) /* can just add bytes to current top */
      {
        top_size = sbrk_size + old_top_size;
        set_head(top, top_size | PREV_INUSE);
      }