diff options
author | michael <michael@82007160-df01-0410-b94d-b575c5fd34c7> | 2012-11-16 19:43:21 +0000 |
---|---|---|
committer | michael <michael@82007160-df01-0410-b94d-b575c5fd34c7> | 2012-11-16 19:43:21 +0000 |
commit | 0ae34d13a7ef626ceac7442a880c02dbdf2ae295 (patch) | |
tree | ef4821f596999b0f3ca2ee876f7be1a31ae6e130 | |
parent | 6b90593f422a2f7094e7f59b1f68eb736889457f (diff) |
- add mempool.(c|h)
git-svn-id: svn://svn.ircd-hybrid.org/svnroot/ircd-hybrid/trunk@1656 82007160-df01-0410-b94d-b575c5fd34c7
-rw-r--r-- | include/balloc.h | 82 | ||||
-rw-r--r-- | include/mempool.h | 99 | ||||
-rw-r--r-- | src/balloc.c | 503 | ||||
-rw-r--r-- | src/mempool.c | 710 |
4 files changed, 809 insertions, 585 deletions
diff --git a/include/balloc.h b/include/balloc.h deleted file mode 100644 index 7e99788..0000000 --- a/include/balloc.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * ircd-hybrid: an advanced Internet Relay Chat Daemon(ircd). - * - * Copyright (C) 2002 by the past and present ircd coders, and others. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - */ - -/*! \file balloc.h - * \brief A block allocator - * \version $Id$ - * \todo Get rid of all typedefs in this file - */ - -#ifndef INCLUDED_balloc_h -#define INCLUDED_balloc_h - -#include "config.h" -#include "client.h" -#include "ircd_defs.h" - - -/*! \brief Block contains status information for - * an allocated block in our heap. - */ -struct Block -{ - int freeElems; /*!< Number of available elems */ - size_t alloc_size; /*!< Size of data space for each block */ - struct Block* next; /*!< Next in our chain of blocks */ - void* elems; /*!< Points to allocated memory */ - dlink_list free_list; /*!< Chain of free memory blocks */ -}; - -typedef struct Block Block; - -struct MemBlock -{ - dlink_node self; /*!< Node for linking into free_list */ - Block *block; /*!< Which block we belong to */ -}; - -typedef struct MemBlock MemBlock; - -/*! \brief BlockHeap contains the information for the root node of the - * memory heap. - */ -struct BlockHeap -{ - size_t elemSize; /*!< Size of each element to be stored */ - int elemsPerBlock; /*!< Number of elements per block */ - int blocksAllocated; /*!< Number of blocks allocated */ - int freeElems; /*!< Number of free elements */ - Block* base; /*!< Pointer to first block */ - const char *name; /*!< Name of the heap */ - struct BlockHeap *next; /*!< Pointer to next heap */ -}; - -typedef struct BlockHeap BlockHeap; - - -extern int BlockHeapFree(BlockHeap *, void *); -extern void * BlockHeapAlloc(BlockHeap *); - -extern BlockHeap* BlockHeapCreate(const char *const, size_t, int); -extern int BlockHeapDestroy(BlockHeap *); -extern void initBlockHeap(void); -extern void block_heap_report_stats(struct Client *); -#endif /* INCLUDED_balloc_h */ diff --git a/include/mempool.h b/include/mempool.h new file mode 100644 index 0000000..82bc747 --- /dev/null +++ b/include/mempool.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2007-2012, The Tor Project, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * * Neither the names of the copyright owners nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*! \file mempool.h + * \brief Pooling allocator + * \version $Id: mempool.h 1652 2012-11-13 20:28:53Z michael $ + */ + +#ifndef TOR_MEMPOOL_H +#define TOR_MEMPOOL_H + +/** A memory pool is a context in which a large number of fixed-sized +* objects can be allocated efficiently. See mempool.c for implementation +* details. */ +typedef struct mp_pool_t mp_pool_t; + +extern void mp_pool_init(void); +extern void *mp_pool_get(mp_pool_t *); +extern void mp_pool_release(void *); +extern mp_pool_t *mp_pool_new(size_t, size_t); +extern void mp_pool_clean(mp_pool_t *, int, int); +extern void mp_pool_destroy(mp_pool_t *); +extern void mp_pool_assert_ok(mp_pool_t *); +extern void mp_pool_log_status(mp_pool_t *); +extern void mp_pool_garbage_collect(void *); + +#define MEMPOOL_STATS + +struct mp_pool_t { + /** Next pool. A pool is usually linked into the mp_allocated_pools list. */ + mp_pool_t *next; + + /** Doubly-linked list of chunks in which no items have been allocated. + * The front of the list is the most recently emptied chunk. */ + struct mp_chunk_t *empty_chunks; + + /** Doubly-linked list of chunks in which some items have been allocated, + * but which are not yet full. The front of the list is the chunk that has + * most recently been modified. */ + struct mp_chunk_t *used_chunks; + + /** Doubly-linked list of chunks in which no more items can be allocated. + * The front of the list is the chunk that has most recently become full. */ + struct mp_chunk_t *full_chunks; + + /** Length of <b>empty_chunks</b>. */ + int n_empty_chunks; + + /** Lowest value of <b>empty_chunks</b> since last call to + * mp_pool_clean(-1). */ + int min_empty_chunks; + + /** Size of each chunk (in items). */ + int new_chunk_capacity; + + /** Size to allocate for each item, including overhead and alignment + * padding. */ + size_t item_alloc_size; +#ifdef MEMPOOL_STATS + /** Total number of items allocated ever. */ + uint64_t total_items_allocated; + + /** Total number of chunks allocated ever. */ + uint64_t total_chunks_allocated; + + /** Total number of chunks freed ever. */ + uint64_t total_chunks_freed; +#endif +}; +#endif diff --git a/src/balloc.c b/src/balloc.c deleted file mode 100644 index c2c0733..0000000 --- a/src/balloc.c +++ /dev/null @@ -1,503 +0,0 @@ -/* - * ircd-hybrid: an advanced Internet Relay Chat Daemon(ircd). - * - * Copyright (C) 2002 by the past and present ircd coders, and others. - * Original credit lines follow: - * - * File: balloc.c - * Owner: Wohali (Joan Touzet) - * - * Modified 2001/11/29 for mmap() support by Aaron Sethman <androsyn@ratbox.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - */ - -/*! \file balloc.c - * \brief A block allocator - * \version $Id$ - * - * About the block allocator - * - * Basically we have three ways of getting memory off of the operating - * system. Below are this list of methods and the order of preference. - * - * 1. mmap() anonymous pages with the MMAP_ANON flag.\n - * 2. mmap() via the /dev/zero trick.\n - * 3. malloc()\n - * - * The advantages of 1 and 2 are this. We can munmap() the pages which will - * return the pages back to the operating system, thus reducing the size - * of the process as the memory is unused. malloc() on many systems just keeps - * a heap of memory to itself, which never gets given back to the OS, except on - * exit. This of course is bad, if say we have an event that causes us to allocate - * say, 200MB of memory, while our normal memory consumption would be 15MB. In the - * malloc() case, the amount of memory allocated to our process never goes down, as - * malloc() has it locked up in its heap. With the mmap() method, we can munmap() - * the block and return it back to the OS, thus causing our memory consumption to go - * down after we no longer need it. - */ - - -#include "stdinc.h" -#ifdef HAVE_MMAP /* We've got mmap() that is good */ -#include <sys/mman.h> - -/* HP-UX sucks */ -#ifdef MAP_ANONYMOUS -#ifndef MAP_ANON -#define MAP_ANON MAP_ANONYMOUS -#endif -#endif /* MAP_ANONYMOUS */ -#endif - -#include "list.h" -#include "balloc.h" -#include "memory.h" -#include "irc_string.h" -#include "client.h" -#include "send.h" -#include "numeric.h" -#include "fdlist.h" -#include "event.h" - - -static BlockHeap *heap_list = NULL; - -static int BlockHeapGarbageCollect(BlockHeap *); -static void heap_garbage_collection(void *); - -/*! \brief Returns memory for the block back to either the malloc heap - * in case of !HAVE_MMAP, or back to the OS otherwise. - * \param ptr Pointer to memory to be freed - * \param size The size of the memory space - */ -static void -free_block(void *ptr, size_t size) -{ -#ifdef HAVE_MMAP - munmap(ptr, size); -#else - free(ptr); -#endif -} - -#ifdef HAVE_MMAP -#ifndef MAP_ANON /* But we cannot mmap() anonymous pages */ - /* So we mmap() /dev/zero, which is just as good */ -static fde_t dpfd; -#endif -#endif - -/*! \brief Opens /dev/zero and saves the file handle for - * future allocations. - */ -void -initBlockHeap(void) -{ -#ifdef HAVE_MMAP -#ifndef MAP_ANON - int zero_fd = open("/dev/zero", O_RDWR); - - if (zero_fd < 0) - outofmemory(); - fd_open(&dpfd, zero_fd, 0, "Anonymous mmap()"); -#endif - eventAdd("heap_garbage_collection", &heap_garbage_collection, NULL, 119); -#endif -} - -/*! - * \param size Size of block to allocate - * \return Address pointer to allocated data space - */ -static void * -get_block(size_t size) -{ -#ifdef HAVE_MMAP - void *ptr = NULL; - -#ifndef MAP_ANON - ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, dpfd.fd, 0); -#else - ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); -#endif - return ptr == MAP_FAILED ? NULL : ptr; -#else - return malloc(size); -#endif -} - -static void -heap_garbage_collection(void *arg) -{ - BlockHeap *bh; - - for (bh = heap_list; bh != NULL; bh = bh->next) - BlockHeapGarbageCollect(bh); -} - -/*! \brief Allocates a new block for addition to a blockheap - * \param bh Pointer to parent blockheap - * \return 0 if successful, 1 if not - */ -static int -newblock(BlockHeap *bh) -{ - MemBlock *newblk = NULL; - Block *b = NULL; - int i = 0; - void *offset = NULL; - - /* Setup the initial data structure. */ - if ((b = calloc(1, sizeof(Block))) == NULL) - return 1; - - b->freeElems = bh->elemsPerBlock; - b->next = bh->base; - b->alloc_size = bh->elemsPerBlock * (bh->elemSize + sizeof(MemBlock)); - b->elems = get_block(b->alloc_size); - - if (b->elems == NULL) - return 1; - - offset = b->elems; - - /* Setup our blocks now */ - for (; i < bh->elemsPerBlock; ++i) - { - void *data; - - newblk = offset; - newblk->block = b; - data = (void *)((size_t)offset + sizeof(MemBlock)); - - dlinkAdd(data, &newblk->self, &b->free_list); - offset = (void *)((size_t)offset + bh->elemSize + sizeof(MemBlock)); - } - - ++bh->blocksAllocated; - bh->freeElems += bh->elemsPerBlock; - bh->base = b; - - return 0; -} - -/*! \brief Creates a new blockheap - * - * Creates a new blockheap from which smaller blocks can be allocated. - * Intended to be used instead of multiple calls to malloc() when - * performance is an issue. - * - * \param name Name of the blockheap - * \param elemsize Size of the basic element to be stored - * \param elemsperblock Number of elements to be stored in a single block of - * memory. When the blockheap runs out of free memory, - * it will allocate elemsize * elemsperblock more. - * \return Pointer to new BlockHeap, or NULL if unsuccessful - */ -BlockHeap * -BlockHeapCreate(const char *const name, size_t elemsize, int elemsperblock) -{ - BlockHeap *bh = NULL; - assert(elemsize > 0 && elemsperblock > 0); - - /* Catch idiotic requests up front */ - if ((elemsize <= 0) || (elemsperblock <= 0)) - outofmemory(); /* die.. out of memory */ - - /* Allocate our new BlockHeap */ - if ((bh = calloc(1, sizeof(BlockHeap))) == NULL) - outofmemory(); /* die.. out of memory */ - - if ((elemsize % sizeof(void *)) != 0) - { - /* Pad to even pointer boundary */ - elemsize += sizeof(void *); - elemsize &= ~(sizeof(void *) - 1); - } - - bh->name = name; - bh->elemSize = elemsize; - bh->elemsPerBlock = elemsperblock; - - /* Be sure our malloc was successful */ - if (newblock(bh)) - { - if (bh != NULL) - free(bh); - - outofmemory(); /* die.. out of memory */ - } - - bh->next = heap_list; - heap_list = bh; - - return bh; -} - -/*! \brief Returns a pointer to a struct within our BlockHeap that's free for - * the taking. - * \param bh Pointer to the Blockheap - * \return Address pointer to allocated data space, or NULL if unsuccessful - */ -void * -BlockHeapAlloc(BlockHeap *bh) -{ - Block *walker = NULL; - dlink_node *new_node = NULL; - - assert(bh != NULL); - - if (bh->freeElems == 0) - { - /* Allocate new block and assign */ - /* newblock returns 1 if unsuccessful, 0 if not */ - if (newblock(bh)) - { - /* That didn't work..try to garbage collect */ - BlockHeapGarbageCollect(bh); - - if (newblock(bh)) - outofmemory(); /* Well that didn't work either...bail */ - } - } - - for (walker = bh->base; walker != NULL; walker = walker->next) - { - if (walker->freeElems > 0) - { - --bh->freeElems; - --walker->freeElems; - new_node = walker->free_list.head; - - dlinkDelete(new_node, &walker->free_list); - assert(new_node->data != NULL); - - memset(new_node->data, 0, bh->elemSize); - return new_node->data; - } - } - - assert(0 == 1); - outofmemory(); - return NULL; -} - -/*! \brief Returns an element to the free pool, does not free() - * \param bh Pointer to BlockHeap containing element - * \param ptr Pointer to element to be "freed" - * \return 0 if successful, 1 if element not contained within BlockHeap - */ -int -BlockHeapFree(BlockHeap *bh, void *ptr) -{ - Block *block = NULL; - struct MemBlock *memblock = NULL; - - assert(bh != NULL); - assert(ptr != NULL); - - memblock = (void *)((size_t)ptr - sizeof(MemBlock)); - assert(memblock->block != NULL); - - if (memblock->block == NULL) - outofmemory(); - - block = memblock->block; - ++bh->freeElems; - ++block->freeElems; - mem_frob(ptr, bh->elemSize); - - dlinkAdd(ptr, &memblock->self, &block->free_list); - return 0; -} - -/*! \brief Performs garbage collection on the block heap. - * - * Performs garbage collection on the block heap. Any blocks that are - * completely unallocated are removed from the heap. Garbage collection - * will \b never remove the root node of the heap. - * - * \param bh Pointer to the BlockHeap to be cleaned up - * \return 0 if successful, 1 if bh == NULL - */ -static int -BlockHeapGarbageCollect(BlockHeap *bh) -{ - Block *walker = NULL, *last = NULL; - - assert(bh != NULL); - - if (bh->freeElems < bh->elemsPerBlock || bh->blocksAllocated == 1) - { - /* There couldn't possibly be an entire free block. Return. */ - return 0; - } - - walker = bh->base; - - while (walker != NULL) - { - if (walker->freeElems == bh->elemsPerBlock) - { - free_block(walker->elems, walker->alloc_size); - - if (last != NULL) - { - last->next = walker->next; - - if (walker != NULL) - free(walker); - walker = last->next; - } - else - { - bh->base = walker->next; - - if (walker != NULL) - free(walker); - walker = bh->base; - } - - --bh->blocksAllocated; - bh->freeElems -= bh->elemsPerBlock; - } - else - { - last = walker; - walker = walker->next; - } - } - - return 0; -} - -/*! \brief Completely free()s a BlockHeap. Use for cleanup. - * \param bh Pointer to the BlockHeap to be destroyed - * \return 0 if successful, 1 if bh == NULL - */ -int -BlockHeapDestroy(BlockHeap *bh) -{ - Block *walker = NULL, *next = NULL; - - if (bh == NULL) - return 1; - - for (walker = bh->base; walker != NULL; walker = next) - { - next = walker->next; - free_block(walker->elems, walker->alloc_size); - - if (walker != NULL) - free(walker); - } - - if (heap_list == bh) - heap_list = bh->next; - else { - BlockHeap *prev; - - for (prev = heap_list; prev->next != bh; prev = prev->next) - /* nothing */ ; - prev->next = bh->next; - } - - free(bh); - return 0; -} - -/*! \brief Returns the number of bytes being used - * \param bh Pointer to a BlockHeap - * \return Number of bytes being used - */ -static size_t -block_heap_get_used_mem(const BlockHeap *const bh) -{ - return(((bh->blocksAllocated * - bh->elemsPerBlock)-bh->freeElems) * - (bh->elemSize + sizeof(MemBlock))); -} - -/*! \brief Returns the number of bytes being free for further allocations - * \param bh Pointer to a BlockHeap - * \return Number of bytes being free for further allocations - */ -static size_t -block_heap_get_free_mem(const BlockHeap *const bh) -{ - return(bh->freeElems * (bh->elemSize + sizeof(MemBlock))); -} - -/*! \brief Returns the total number of bytes of memory belonging to a heap - * \param bh Pointer to a BlockHeap - * \return Total number of bytes of memory belonging to a heap - */ -static size_t -block_heap_get_size_mem(const BlockHeap *const bh) -{ - return(((bh->blocksAllocated * - bh->elemsPerBlock)) * - (bh->elemSize + sizeof(MemBlock))); -} - -/*! \brief Returns the number of elements being used. - * \param bh Pointer to a BlockHeap - * \return Number of elements being free for further allocations - */ -static unsigned int -block_heap_get_used_elm(const BlockHeap *const bh) -{ - return((bh->blocksAllocated * - bh->elemsPerBlock)-bh->freeElems); -} - -/*! \brief Returns the number of elements being free for further allocations. - * \param bh Pointer to a BlockHeap - * \return Number of elements being free for further allocations - */ -static unsigned int -block_heap_get_free_elm(const BlockHeap *const bh) -{ - return(bh->freeElems); -} - -/*! \brief Returns the number of total elements belonging to a heap. - * Includes \b free and \b used elements. - * \param bh Pointer to a BlockHeap - * \return Number of total elements belonging to a heap - */ -static unsigned int -block_heap_get_size_elm(const BlockHeap *const bh) -{ - return(bh->blocksAllocated * bh->elemsPerBlock); -} - -void -block_heap_report_stats(struct Client *client_p) -{ - const BlockHeap *bh = NULL; - - for (bh = heap_list; bh != NULL; bh = bh->next) - sendto_one(client_p, ":%s %d %s z :%s mempool: used %u/%u free %u/%u (size %u/%u)", - me.name, RPL_STATSDEBUG, client_p->name, bh->name, - block_heap_get_used_elm(bh), - block_heap_get_used_mem(bh), - block_heap_get_free_elm(bh), - block_heap_get_free_mem(bh), - block_heap_get_size_elm(bh), - block_heap_get_size_mem(bh)); -} diff --git a/src/mempool.c b/src/mempool.c new file mode 100644 index 0000000..4713f16 --- /dev/null +++ b/src/mempool.c @@ -0,0 +1,710 @@ +/* + * Copyright (c) 2007-2012, The Tor Project, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * * Neither the names of the copyright owners nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*! \file mempool.c + * \brief A pooling allocator + * \version $Id: mempool.c 1652 2012-11-13 20:28:53Z michael $ + */ + +#include "stdinc.h" +#include "memory.h" +#include "event.h" +#include "log.h" +#include "mempool.h" + +/** Returns floor(log2(u64)). If u64 is 0, (incorrectly) returns 0. */ +static int +tor_log2(uint64_t u64) +{ + int r = 0; + + if (u64 >= (1LLU << 32)) + { + u64 >>= 32; + r = 32; + } + if (u64 >= (1LLU << 16)) + { + u64 >>= 16; + r += 16; + } + if (u64 >= (1LLU << 8)) + { + u64 >>= 8; + r += 8; + } + if (u64 >= (1LLU << 4)) + { + u64 >>= 4; + r += 4; + } + if (u64 >= (1LLU << 2)) + { + u64 >>= 2; + r += 2; + } + if (u64 >= (1LLU << 1)) + { + u64 >>= 1; + r += 1; + } + + return r; +} + +/** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>. If + * there are two powers of 2 equally close, round down. */ +static uint64_t +round_to_power_of_2(uint64_t u64) +{ + int lg2; + uint64_t low; + uint64_t high; + + if (u64 == 0) + return 1; + + lg2 = tor_log2(u64); + low = 1LLU << lg2; + + if (lg2 == 63) + return low; + + high = 1LLU << (lg2 + 1); + if (high - u64 < u64 - low) + return high; + else + return low; +} + +/* OVERVIEW: + * + * This is an implementation of memory pools for Tor cells. It may be + * useful for you too. + * + * Generally, a memory pool is an allocation strategy optimized for large + * numbers of identically-sized objects. Rather than the elaborate arena + * and coalescing strategies you need to get good performance for a + * general-purpose malloc(), pools use a series of large memory "chunks", + * each of which is carved into a bunch of smaller "items" or + * "allocations". + * + * To get decent performance, you need to: + * - Minimize the number of times you hit the underlying allocator. + * - Try to keep accesses as local in memory as possible. + * - Try to keep the common case fast. + * + * Our implementation uses three lists of chunks per pool. Each chunk can + * be either "full" (no more room for items); "empty" (no items); or + * "used" (not full, not empty). There are independent doubly-linked + * lists for each state. + * + * CREDIT: + * + * I wrote this after looking at 3 or 4 other pooling allocators, but + * without copying. The strategy this most resembles (which is funny, + * since that's the one I looked at longest ago) is the pool allocator + * underlying Python's obmalloc code. Major differences from obmalloc's + * pools are: + * - We don't even try to be threadsafe. + * - We only handle objects of one size. + * - Our list of empty chunks is doubly-linked, not singly-linked. + * (This could change pretty easily; it's only doubly-linked for + * consistency.) + * - We keep a list of full chunks (so we can have a "nuke everything" + * function). Obmalloc's pools leave full chunks to float unanchored. + * + * LIMITATIONS: + * - Not even slightly threadsafe. + * - Likes to have lots of items per chunks. + * - One pointer overhead per allocated thing. (The alternative is + * something like glib's use of an RB-tree to keep track of what + * chunk any given piece of memory is in.) + * - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE + * if you need doubles. + * - Could probably be optimized a bit; the representation contains + * a bit more info than it really needs to have. + */ + +/* Tuning parameters */ +/** Largest type that we need to ensure returned memory items are aligned to. + * Change this to "double" if we need to be safe for structs with doubles. */ +#define ALIGNMENT_TYPE void * +/** Increment that we need to align allocated. */ +#define ALIGNMENT sizeof(ALIGNMENT_TYPE) +/** Largest memory chunk that we should allocate. */ +#define MAX_CHUNK (8 *(1L << 20)) +/** Smallest memory chunk size that we should allocate. */ +#define MIN_CHUNK 4096 + +typedef struct mp_allocated_t mp_allocated_t; +typedef struct mp_chunk_t mp_chunk_t; + +/** Holds a single allocated item, allocated as part of a chunk. */ +struct mp_allocated_t { + /** The chunk that this item is allocated in. This adds overhead to each + * allocated item, thus making this implementation inappropriate for + * very small items. */ + mp_chunk_t *in_chunk; + + union { + /** If this item is free, the next item on the free list. */ + mp_allocated_t *next_free; + + /** If this item is not free, the actual memory contents of this item. + * (Not actual size.) */ + char mem[1]; + + /** An extra element to the union to insure correct alignment. */ + ALIGNMENT_TYPE dummy_; + } u; +}; + +/** 'Magic' value used to detect memory corruption. */ +#define MP_CHUNK_MAGIC 0x09870123 + +/** A chunk of memory. Chunks come from malloc; we use them */ +struct mp_chunk_t { + uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */ + mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */ + mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */ + mp_pool_t *pool; /**< The pool that this chunk is part of. */ + + /** First free item in the freelist for this chunk. Note that this may be + * NULL even if this chunk is not at capacity: if so, the free memory at + * next_mem has not yet been carved into items. + */ + mp_allocated_t *first_free; + int n_allocated; /**< Number of currently allocated items in this chunk. */ + int capacity; /**< Number of items that can be fit into this chunk. */ + size_t mem_size; /**< Number of usable bytes in mem. */ + char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */ + char mem[]; /**< Storage for this chunk. */ +}; + +static mp_pool_t *mp_allocated_pools = NULL; + +/** Number of extra bytes needed beyond mem_size to allocate a chunk. */ +#define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0]) + +/** Given a pointer to a mp_allocated_t, return a pointer to the memory + * item it holds. */ +#define A2M(a) (&(a)->u.mem) +/** Given a pointer to a memory_item_t, return a pointer to its enclosing + * mp_allocated_t. */ +#define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem)) + +void +mp_pool_init(void) +{ + eventAdd("mp_pool_garbage_collect", &mp_pool_garbage_collect, NULL, 119); +} + +/** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not + * link the chunk into any list. */ +static mp_chunk_t * +mp_chunk_new(mp_pool_t *pool) +{ + size_t sz = pool->new_chunk_capacity * pool->item_alloc_size; + mp_chunk_t *chunk = MyMalloc(CHUNK_OVERHEAD + sz); + +#ifdef MEMPOOL_STATS + ++pool->total_chunks_allocated; +#endif + chunk->magic = MP_CHUNK_MAGIC; + chunk->capacity = pool->new_chunk_capacity; + chunk->mem_size = sz; + chunk->next_mem = chunk->mem; + chunk->pool = pool; + return chunk; +} + +/** Take a <b>chunk</b> that has just been allocated or removed from + * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk + * list. */ +static void +add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk) +{ + chunk->next = pool->used_chunks; + if (chunk->next) + chunk->next->prev = chunk; + pool->used_chunks = chunk; + assert(!chunk->prev); +} + +/** Return a newly allocated item from <b>pool</b>. */ +void * +mp_pool_get(mp_pool_t *pool) +{ + mp_chunk_t *chunk; + mp_allocated_t *allocated; + + if (pool->used_chunks != NULL) { + /* + * Common case: there is some chunk that is neither full nor empty. Use + * that one. (We can't use the full ones, obviously, and we should fill + * up the used ones before we start on any empty ones. + */ + chunk = pool->used_chunks; + + } else if (pool->empty_chunks) { + /* + * We have no used chunks, but we have an empty chunk that we haven't + * freed yet: use that. (We pull from the front of the list, which should + * get us the most recently emptied chunk.) + */ + chunk = pool->empty_chunks; + + /* Remove the chunk from the empty list. */ + pool->empty_chunks = chunk->next; + if (chunk->next) + chunk->next->prev = NULL; + + /* Put the chunk on the 'used' list*/ + add_newly_used_chunk_to_used_list(pool, chunk); + + assert(!chunk->prev); + --pool->n_empty_chunks; + if (pool->n_empty_chunks < pool->min_empty_chunks) + pool->min_empty_chunks = pool->n_empty_chunks; + } else { + /* We have no used or empty chunks: allocate a new chunk. */ + chunk = mp_chunk_new(pool); + + /* Add the new chunk to the used list. */ + add_newly_used_chunk_to_used_list(pool, chunk); + } + + assert(chunk->n_allocated < chunk->capacity); + + if (chunk->first_free) { + /* If there's anything on the chunk's freelist, unlink it and use it. */ + allocated = chunk->first_free; + chunk->first_free = allocated->u.next_free; + allocated->u.next_free = NULL; /* For debugging; not really needed. */ + assert(allocated->in_chunk == chunk); + } else { + /* Otherwise, the chunk had better have some free space left on it. */ + assert(chunk->next_mem + pool->item_alloc_size <= + chunk->mem + chunk->mem_size); + + /* Good, it did. Let's carve off a bit of that free space, and use + * that. */ + allocated = (void *)chunk->next_mem; + chunk->next_mem += pool->item_alloc_size; + allocated->in_chunk = chunk; + allocated->u.next_free = NULL; /* For debugging; not really needed. */ + } + + ++chunk->n_allocated; +#ifdef MEMPOOL_STATS + ++pool->total_items_allocated; +#endif + + if (chunk->n_allocated == chunk->capacity) { + /* This chunk just became full. */ + assert(chunk == pool->used_chunks); + assert(chunk->prev == NULL); + + /* Take it off the used list. */ + pool->used_chunks = chunk->next; + if (chunk->next) + chunk->next->prev = NULL; + + /* Put it on the full list. */ + chunk->next = pool->full_chunks; + if (chunk->next) + chunk->next->prev = chunk; + pool->full_chunks = chunk; + } + /* And return the memory portion of the mp_allocated_t. */ + return A2M(allocated); +} + +/** Return an allocated memory item to its memory pool. */ +void +mp_pool_release(void *item) +{ + mp_allocated_t *allocated = (void *)M2A(item); + mp_chunk_t *chunk = allocated->in_chunk; + + assert(chunk); + assert(chunk->magic == MP_CHUNK_MAGIC); + assert(chunk->n_allocated > 0); + + allocated->u.next_free = chunk->first_free; + chunk->first_free = allocated; + + if (chunk->n_allocated == chunk->capacity) { + /* This chunk was full and is about to be used. */ + mp_pool_t *pool = chunk->pool; + /* unlink from the full list */ + if (chunk->prev) + chunk->prev->next = chunk->next; + if (chunk->next) + chunk->next->prev = chunk->prev; + if (chunk == pool->full_chunks) + pool->full_chunks = chunk->next; + + /* link to the used list. */ + chunk->next = pool->used_chunks; + chunk->prev = NULL; + if (chunk->next) + chunk->next->prev = chunk; + pool->used_chunks = chunk; + } else if (chunk->n_allocated == 1) { + /* This was used and is about to be empty. */ + mp_pool_t *pool = chunk->pool; + + /* Unlink from the used list */ + if (chunk->prev) + chunk->prev->next = chunk->next; + if (chunk->next) + chunk->next->prev = chunk->prev; + if (chunk == pool->used_chunks) + pool->used_chunks = chunk->next; + + /* Link to the empty list */ + chunk->next = pool->empty_chunks; + chunk->prev = NULL; + if (chunk->next) + chunk->next->prev = chunk; + pool->empty_chunks = chunk; + + /* Reset the guts of this chunk to defragment it, in case it gets + * used again. */ + chunk->first_free = NULL; + chunk->next_mem = chunk->mem; + + ++pool->n_empty_chunks; + } + + --chunk->n_allocated; +} + +/** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll + * try to fit about <b>chunk_capacity</b> bytes in each chunk. */ +mp_pool_t * +mp_pool_new(size_t item_size, size_t chunk_capacity) +{ + mp_pool_t *pool; + size_t alloc_size, new_chunk_cap; + +/* assert(item_size < SIZE_T_CEILING); + assert(chunk_capacity < SIZE_T_CEILING); + assert(SIZE_T_CEILING / item_size > chunk_capacity); +*/ + pool = MyMalloc(sizeof(mp_pool_t)); + /* + * First, we figure out how much space to allow per item. We'll want to + * use make sure we have enough for the overhead plus the item size. + */ + alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size); + /* + * If the item_size is less than sizeof(next_free), we need to make + * the allocation bigger. + */ + if (alloc_size < sizeof(mp_allocated_t)) + alloc_size = sizeof(mp_allocated_t); + + /* If we're not an even multiple of ALIGNMENT, round up. */ + if (alloc_size % ALIGNMENT) { + alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT); + } + if (alloc_size < ALIGNMENT) + alloc_size = ALIGNMENT; + assert((alloc_size % ALIGNMENT) == 0); + + /* + * Now we figure out how many items fit in each chunk. We need to fit at + * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long, + * or less than MIN_CHUNK. + */ + if (chunk_capacity > MAX_CHUNK) + chunk_capacity = MAX_CHUNK; + + /* + * Try to be around a power of 2 in size, since that's what allocators like + * handing out. 512K-1 byte is a lot better than 512K+1 byte. + */ + chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity); + while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD) + chunk_capacity *= 2; + if (chunk_capacity < MIN_CHUNK) + chunk_capacity = MIN_CHUNK; + + new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size; + assert(new_chunk_cap < INT_MAX); + pool->new_chunk_capacity = (int)new_chunk_cap; + + pool->item_alloc_size = alloc_size; + + pool->next = mp_allocated_pools; + mp_allocated_pools = pool; + + ilog(LOG_TYPE_IRCD, "Capacity is %lu, item size is %lu, alloc size is %lu", + (unsigned long)pool->new_chunk_capacity, + (unsigned long)pool->item_alloc_size, + (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size)); + + return pool; +} + +/** Helper function for qsort: used to sort pointers to mp_chunk_t into + * descending order of fullness. */ +static int +mp_pool_sort_used_chunks_helper(const void *_a, const void *_b) +{ + mp_chunk_t *a = *(mp_chunk_t * const *)_a; + mp_chunk_t *b = *(mp_chunk_t * const *)_b; + return b->n_allocated - a->n_allocated; +} + +/** Sort the used chunks in <b>pool</b> into descending order of fullness, + * so that we preferentially fill up mostly full chunks before we make + * nearly empty chunks less nearly empty. */ +static void +mp_pool_sort_used_chunks(mp_pool_t *pool) +{ + int i, n = 0, inverted = 0; + mp_chunk_t **chunks, *chunk; + + for (chunk = pool->used_chunks; chunk; chunk = chunk->next) { + ++n; + if (chunk->next && chunk->next->n_allocated > chunk->n_allocated) + ++inverted; + } + + if (!inverted) + return; + + chunks = MyMalloc(sizeof(mp_chunk_t *) * n); + + for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next) + chunks[i++] = chunk; + + qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper); + pool->used_chunks = chunks[0]; + chunks[0]->prev = NULL; + + for (i = 1; i < n; ++i) { + chunks[i - 1]->next = chunks[i]; + chunks[i]->prev = chunks[i - 1]; + } + + chunks[n - 1]->next = NULL; + MyFree(chunks); + mp_pool_assert_ok(pool); +} + +/** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the + * excess ones that have been empty for the longest. If + * <b>keep_recently_used</b> is true, do not free chunks unless they have been + * empty since the last call to this function. + **/ +void +mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used) +{ + mp_chunk_t *chunk, **first_to_free; + + mp_pool_sort_used_chunks(pool); + assert(n_to_keep >= 0); + + if (keep_recently_used) { + int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks; + if (n_to_keep < n_recently_used) + n_to_keep = n_recently_used; + } + + assert(n_to_keep >= 0); + + first_to_free = &pool->empty_chunks; + while (*first_to_free && n_to_keep > 0) { + first_to_free = &(*first_to_free)->next; + --n_to_keep; + } + if (!*first_to_free) { + pool->min_empty_chunks = pool->n_empty_chunks; + return; + } + + chunk = *first_to_free; + while (chunk) { + mp_chunk_t *next = chunk->next; + chunk->magic = 0xdeadbeef; + MyFree(chunk); +#ifdef MEMPOOL_STATS + ++pool->total_chunks_freed; +#endif + --pool->n_empty_chunks; + chunk = next; + } + + pool->min_empty_chunks = pool->n_empty_chunks; + *first_to_free = NULL; +} + +/** Helper: Given a list of chunks, free all the chunks in the list. */ +static void +destroy_chunks(mp_chunk_t *chunk) +{ + mp_chunk_t *next; + + while (chunk) { + chunk->magic = 0xd3adb33f; + next = chunk->next; + MyFree(chunk); + chunk = next; + } +} + +/** Helper: make sure that a given chunk list is not corrupt. */ +static int +assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full) +{ + mp_allocated_t *allocated; + int n = 0; + + if (chunk) + assert(chunk->prev == NULL); + + while (chunk) { + n++; + assert(chunk->magic == MP_CHUNK_MAGIC); + assert(chunk->pool == pool); + for (allocated = chunk->first_free; allocated; + allocated = allocated->u.next_free) { + assert(allocated->in_chunk == chunk); + } + if (empty) + assert(chunk->n_allocated == 0); + else if (full) + assert(chunk->n_allocated == chunk->capacity); + else + assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity); + + assert(chunk->capacity == pool->new_chunk_capacity); + + assert(chunk->mem_size == + pool->new_chunk_capacity * pool->item_alloc_size); + + assert(chunk->next_mem >= chunk->mem && + chunk->next_mem <= chunk->mem + chunk->mem_size); + + if (chunk->next) + assert(chunk->next->prev == chunk); + + chunk = chunk->next; + } + + return n; +} + +/** Fail with an assertion if <b>pool</b> is not internally consistent. */ +void +mp_pool_assert_ok(mp_pool_t *pool) +{ + int n_empty; + + n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0); + assert_chunks_ok(pool, pool->full_chunks, 0, 1); + assert_chunks_ok(pool, pool->used_chunks, 0, 0); + + assert(pool->n_empty_chunks == n_empty); +} + +void +mp_pool_garbage_collect(void *arg) +{ + mp_pool_t *pool = mp_allocated_pools; + + for (; pool; pool = pool->next) + mp_pool_clean(pool, 0, 1); +} + +/** Dump information about <b>pool</b>'s memory usage to the Tor log at level + * <b>severity</b>. */ +void +mp_pool_log_status(mp_pool_t *pool) +{ + uint64_t bytes_used = 0; + uint64_t bytes_allocated = 0; + uint64_t bu = 0, ba = 0; + mp_chunk_t *chunk; + int n_full = 0, n_used = 0; + + assert(pool); + + for (chunk = pool->empty_chunks; chunk; chunk = chunk->next) + bytes_allocated += chunk->mem_size; + + ilog(LOG_TYPE_DEBUG, "%llu bytes in %d empty chunks", + bytes_allocated, pool->n_empty_chunks); + for (chunk = pool->used_chunks; chunk; chunk = chunk->next) { + ++n_used; + bu += chunk->n_allocated * pool->item_alloc_size; + ba += chunk->mem_size; + + ilog(LOG_TYPE_DEBUG, " used chunk: %d items allocated", + chunk->n_allocated); + } + + ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d partially full chunks", + bu, ba, n_used); + bytes_used += bu; + bytes_allocated += ba; + bu = ba = 0; + + for (chunk = pool->full_chunks; chunk; chunk = chunk->next) { + ++n_full; + bu += chunk->n_allocated * pool->item_alloc_size; + ba += chunk->mem_size; + } + + ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d full chunks", + bu, ba, n_full); + bytes_used += bu; + bytes_allocated += ba; + + ilog(LOG_TYPE_DEBUG, "Total: %llu/%llu bytes allocated " + "for cell pools are full.", + bytes_used, bytes_allocated); + +#ifdef MEMPOOL_STATS + ilog(LOG_TYPE_DEBUG, "%llu cell allocations ever; " + "%llu chunk allocations ever; " + "%llu chunk frees ever.", + pool->total_items_allocated, + pool->total_chunks_allocated, + pool->total_chunks_freed); +#endif +} |