提交 ea0dc132 authored 作者: Shane Bryldt's avatar Shane Bryldt

FS-10167: Rewrote the ks_pool allocator, no longer uses paging or internal block…

FS-10167: Rewrote the ks_pool allocator, no longer uses paging or internal block allocation, but still retains reference counting and auto cleanup callbacks, should be much more efficient now on windows than the original mmap approach, and all tests now run successfully!
上级 aaa26c6d
......@@ -312,7 +312,7 @@ KS_DECLARE(void) blade_connection_disconnect(blade_connection_t *bc)
{
ks_assert(bc);
if (bc->state != BLADE_CONNECTION_STATE_DETACH && bc->state != BLADE_CONNECTION_STATE_DISCONNECT) {
if (bc->state != BLADE_CONNECTION_STATE_DETACH && bc->state != BLADE_CONNECTION_STATE_DISCONNECT && bc->state != BLADE_CONNECTION_STATE_CLEANUP) {
ks_log(KS_LOG_DEBUG, "Connection (%s) disconnecting\n", bc->id);
blade_connection_state_set(bc, BLADE_CONNECTION_STATE_DETACH);
}
......
......@@ -77,7 +77,7 @@ KS_DECLARE(ks_status_t) blade_datastore_create(blade_datastore_t **bdsP, ks_pool
ks_assert(bdsP);
ks_assert(pool);
ks_assert(tpool);
//ks_assert(tpool);
bds = ks_pool_alloc(pool, sizeof(*bds));
bds->pool = pool;
......
......@@ -869,7 +869,7 @@ blade_connection_state_hook_t blade_transport_wss_on_state_disconnect(blade_conn
list_delete(&bt_wss->module->connected, bc);
if (bt_wss_init) blade_transport_wss_init_destroy(&bt_wss_init);
if (bt_wss) blade_transport_wss_destroy(&bt_wss);
if (bt_wss) blade_transport_wss_destroy(&bt_wss); // @TODO: Scream at this very loudly until I feel better for it wasting 2 days to track down, and then fix the issue it's causing
return BLADE_CONNECTION_STATE_HOOK_SUCCESS;
}
......@@ -1092,6 +1092,8 @@ blade_connection_state_hook_t blade_transport_wss_on_state_attach_inbound(blade_
// behaviour to simply go as far as assigning a session to the connection and let the system handle the rest
if (json_req) cJSON_Delete(json_req);
if (json_res) cJSON_Delete(json_res);
return ret;
}
......@@ -1223,6 +1225,7 @@ blade_connection_state_hook_t blade_transport_wss_on_state_attach_outbound(blade
done:
if (json_req) cJSON_Delete(json_req);
if (json_res) cJSON_Delete(json_res);
return ret;
}
......@@ -1261,15 +1264,15 @@ blade_connection_state_hook_t blade_transport_wss_on_state_ready_outbound(blade_
if (condition == BLADE_CONNECTION_STATE_CONDITION_PRE) {
blade_session_t *bs = NULL;
cJSON *req = NULL;
//cJSON *req = NULL;
ks_log(KS_LOG_DEBUG, "State Callback: %d\n", (int32_t)condition);
bs = blade_handle_sessions_get(blade_connection_handle_get(bc), blade_connection_session_get(bc));
ks_assert(bs);
blade_rpc_request_create(blade_connection_pool_get(bc), &req, NULL, NULL, "blade.test.echo");
blade_session_send(bs, req, blade_test_echo_response_handler);
//blade_rpc_request_create(blade_connection_pool_get(bc), &req, NULL, NULL, "blade.test.echo");
//blade_session_send(bs, req, blade_test_echo_response_handler);
blade_session_read_unlock(bs);
}
......
......@@ -159,6 +159,7 @@ KS_DECLARE(ks_status_t) blade_session_shutdown(blade_session_t *bs)
if (bs->state_thread) {
bs->shutdown = KS_TRUE;
ks_thread_join(bs->state_thread);
printf("FREEING SESSION THREAD %p\n", (void *)bs->state_thread);
ks_pool_free(bs->pool, &bs->state_thread);
bs->shutdown = KS_FALSE;
}
......
......@@ -122,10 +122,10 @@ void on_blade_session_state_callback(blade_session_t *bs, blade_session_state_co
if (condition == BLADE_SESSION_STATE_CONDITION_PRE) {
ks_log(KS_LOG_DEBUG, "Blade Session State Changed: %s, %d\n", blade_session_id_get(bs), state);
if (state == BLADE_SESSION_STATE_READY) {
cJSON *req = NULL;
blade_rpc_request_create(blade_session_pool_get(bs), &req, NULL, NULL, "blade.chat.join");
blade_session_send(bs, req, on_blade_chat_join_response);
cJSON_Delete(req);
//cJSON *req = NULL;
//blade_rpc_request_create(blade_session_pool_get(bs), &req, NULL, NULL, "blade.chat.join");
//blade_session_send(bs, req, on_blade_chat_join_response);
//cJSON_Delete(req);
}
}
}
......
......@@ -72,13 +72,13 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
blade_module_chat_on_load(&mod_chat, bh);
blade_module_chat_on_startup(mod_chat, config_blade);
//blade_module_chat_on_load(&mod_chat, bh);
//blade_module_chat_on_startup(mod_chat, config_blade);
loop(bh);
blade_module_chat_on_shutdown(mod_chat);
blade_module_chat_on_unload(mod_chat);
//blade_module_chat_on_shutdown(mod_chat);
//blade_module_chat_on_unload(mod_chat);
blade_module_wss_on_shutdown(mod_wss);
......
......@@ -72,16 +72,22 @@ _Check_return_ static __inline int _zstr(_In_opt_z_ const char *s)
#define ks_set_string(_x, _y) ks_copy_string(_x, _y, sizeof(_x))
#define ks_safe_free(_x) if (_x) free(_x); _x = NULL
#define end_of(_s) *(*_s == '\0' ? _s : _s + strlen(_s) - 1)
#define ks_test_flag(obj, flag) ((obj)->flags & flag)
#define ks_set_flag(obj, flag) (obj)->flags |= (flag)
#define ks_clear_flag(obj, flag) (obj)->flags &= ~(flag)
#define ks_recv(_h) ks_recv_event(_h, 0, NULL)
#define ks_recv_timed(_h, _ms) ks_recv_event_timed(_h, _ms, 0, NULL)
/*
* bitflag tools
*/
#define BIT_FLAG(x) (1 << (x))
#define BIT_SET(v,f) ((v) |= (f))
#define BIT_CLEAR(v,f) ((v) &= ~(f))
#define BIT_IS_SET(v,f) ((v) & (f))
#define BIT_TOGGLE(v,f) ((v) ^= (f))
KS_DECLARE(ks_status_t) ks_init(void);
KS_DECLARE(ks_status_t) ks_shutdown(void);
KS_DECLARE(ks_pool_t *) ks_global_pool(void);
KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_fn_t fn, void *arg);
KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_callback_t callback, void *arg);
KS_DECLARE(int) ks_vasprintf(char **ret, const char *fmt, va_list ap);
//KS_DECLARE_DATA extern ks_logger_t ks_logger;
......
......@@ -33,29 +33,7 @@ KS_BEGIN_EXTERN_C
*/
typedef enum {
KS_POOL_FLAG_DEFAULT = 0,
KS_POOL_FLAG_BEST_FIT = (1 << 0),
KS_POOL_FLAG_NO_ASSERT = (1 << 1),
KS_POOL_FLAG_NO_ZERO = (1 << 2),
/*
* Choose a best fit algorithm not first fit. This takes more CPU
* time but will result in a tighter heap.
*/
KS_POOL_FLAG_HEAVY_PACKING = (1 << 1)
/*
* This enables very heavy packing at the possible expense of CPU.
* This affects a number of parts of the library.
*
* By default the 1st page of memory is reserved for the main ks_pool
* structure. This flag will cause the rest of the 1st block to be
* available for use as user memory.
*
* By default the library looks through the memory when freed looking
* for a magic value. There is an internal max size that it will look
* and then it will give up. This flag forces it to look until it
* finds it.
*/
KS_POOL_FLAG_DEFAULT = 0
} ks_pool_flag_t;
/*
......@@ -83,7 +61,7 @@ typedef enum {
*
* ARGUMENT:
*
* mp_p -> Associated ks_pool address.
* pool -> Associated ks_pool address.
*
* func_id -> Integer function ID which identifies which ks_pool
* function is being called.
......@@ -102,10 +80,10 @@ typedef enum {
* old_byte_size -> Optionally specified old byte size. For
* ks_pool_resize only.
*/
typedef void (*ks_pool_log_func_t) (const void *mp_p,
typedef void (*ks_pool_log_func_t) (const void *pool,
const int func_id,
const unsigned long byte_size,
const unsigned long ele_n, const void *old_addr, const void *new_addr, const unsigned long old_byte_size);
const ks_size_t byte_size,
const ks_size_t ele_n, const void *old_addr, const void *new_addr, const ks_size_t old_byte_size);
/*
* ks_pool_t *ks_pool_open
......@@ -144,10 +122,10 @@ KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP);
*
* ARGUMENTS:
*
* mp_pp <-> Pointer to pointer of our memory pool.
* poolP <-> Pointer to pointer of our memory pool.
*/
KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP);
KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **poolP);
/*
* int ks_pool_clear
......@@ -164,10 +142,10 @@ KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP);
*
* ARGUMENTS:
*
* mp_p <-> Pointer to our memory pool.
* pool <-> Pointer to our memory pool.
*/
KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p);
KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *pool);
/*
* void *ks_pool_alloc
......@@ -184,13 +162,12 @@ KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p);
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal malloc.
* pool -> Pointer to the memory pool.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
* size -> Number of bytes to allocate in the pool. Must be >0.
*
*/
KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size);
KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *pool, const ks_size_t size);
/*
* void *ks_pool_alloc_ex
......@@ -207,15 +184,14 @@ KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal malloc.
* pool -> Pointer to the memory pool.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
* size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p);
KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *pool, const ks_size_t size, ks_status_t *error_p);
/*
* void *ks_pool_calloc
......@@ -233,15 +209,14 @@ KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_si
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
* pool -> Pointer to the memory pool.
*
* ele_n -> Number of elements to allocate.
*
* ele_size -> Number of bytes per element being allocated.
*
*/
KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size);
KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size);
/*
* void *ks_pool_calloc_ex
......@@ -259,8 +234,7 @@ KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, co
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
* pool -> Pointer to the memory pool.
*
* ele_n -> Number of elements to allocate.
*
......@@ -269,7 +243,7 @@ KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, co
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size, ks_status_t *error_p);
KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size, ks_status_t *error_p);
/*
* int ks_pool_free
......@@ -286,14 +260,14 @@ KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n,
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* pool -> Pointer to the memory pool. If NULL then it will do a
* normal free.
*
* addr <-> Address to free.
*
*/
KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP);
KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *pool, void **addrP);
/*
......@@ -301,7 +275,7 @@ KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP);
*
* DESCRIPTION:
*
* Ref count increment an address in a memoory pool.
* Ref count increment an address in a memory pool.
*
* RETURNS:
*
......@@ -311,7 +285,7 @@ KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP);
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
* addr -> The addr to ref
*
......@@ -319,7 +293,7 @@ KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP);
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *error_p);
KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *pool, void *addr, ks_status_t *error_p);
#define ks_pool_ref(_p, _x) ks_pool_ref_ex(_p, _x, NULL)
......@@ -328,7 +302,7 @@ KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *erro
*
* DESCRIPTION:
*
* Reallocate an address in a mmeory pool to a new size.
* Reallocate an address in a memory pool to a new size.
*
* RETURNS:
*
......@@ -338,22 +312,22 @@ KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *erro
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* pool -> Pointer to the memory pool. If NULL then it will do a
* normal realloc.
*
* old_addr -> Previously allocated address.
*
* new_byte_size -> New size of the allocation.
* new_size -> New size of the allocation.
*
*/
KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size);
KS_DECLARE(void *) ks_pool_resize(ks_pool_t *pool, void *old_addr, const ks_size_t new_size);
/*
* void *ks_pool_resize_ex
*
* DESCRIPTION:
*
* Reallocate an address in a mmeory pool to a new size.
* Reallocate an address in a memory pool to a new size.
*
* RETURNS:
*
......@@ -363,17 +337,16 @@ KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigne
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal realloc.
* pool -> Pointer to the memory pool.
*
* old_addr -> Previously allocated address.
*
* new_byte_size -> New size of the allocation.
* new_size -> New size of the allocation.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size, ks_status_t *error_p);
KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *pool, void *old_addr, const ks_size_t new_size, ks_status_t *error_p);
/*
* int ks_pool_stats
......@@ -390,10 +363,7 @@ KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsi
*
* ARGUMENTS:
*
* mp_p -> Pointer to the memory pool.
*
* page_size_p <- Pointer to an unsigned integer which, if not NULL,
* will be set to the page-size of the pool.
* pool -> Pointer to the memory pool.
*
* num_alloced_p <- Pointer to an unsigned long which, if not NULL,
* will be set to the number of pointers currently allocated in pool.
......@@ -409,8 +379,7 @@ KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsi
* will be set to the total amount of space (including administrative
* overhead) used by the pool.
*/
KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_size_p,
unsigned long *num_alloced_p, unsigned long *user_alloced_p, unsigned long *max_alloced_p, unsigned long *tot_alloced_p);
KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *pool, ks_size_t *num_alloced_p, ks_size_t *user_alloced_p, ks_size_t *max_alloced_p, ks_size_t *tot_alloced_p);
/*
* int ks_pool_set_log_func
......@@ -428,39 +397,12 @@ KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
* log_func -> Log function (defined in ks_pool.h) which will be called
* with each ks_pool transaction.
*/
KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *mp_p, ks_pool_log_func_t log_func);
/*
* int ks_pool_set_max_pages
*
* DESCRIPTION:
*
* Set the maximum number of pages that the library will use. Once it
* hits the limit it will return KS_STATUS_NO_PAGES.
*
* NOTE: if the KS_POOL_FLAG_HEAVY_PACKING is set then this max-pages
* value will include the page with the ks_pool header structure in it.
* If the flag is _not_ set then the max-pages will not include this
* first page.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - ks_status_t error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* max_pages -> Maximum number of pages used by the library.
*/
KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned int max_pages);
KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *pool, ks_pool_log_func_t log_func);
/*
* const char *ks_pool_strerror
......@@ -481,16 +423,16 @@ KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned in
*/
KS_DECLARE(const char *) ks_pool_strerror(const ks_status_t error);
KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *mp_p, void *ptr, void *arg, int type, ks_pool_cleanup_fn_t fn);
KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_callback_t callback);
#define ks_pool_free(_p, _x) ks_pool_free_ex(_p, (void **)_x)
/*<<<<<<<<<< This is end of the auto-generated output from fillproto. */
KS_DECLARE(char *) ks_pstrdup(ks_pool_t *pool, const char *str);
KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len);
KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len);
KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len);
KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, ks_size_t len);
KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, ks_size_t len);
KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, ks_size_t len);
KS_DECLARE(char *) ks_pstrcat(ks_pool_t *pool, ...);
KS_DECLARE(char *) ks_psprintf(ks_pool_t *pool, const char *fmt, ...);
......
......@@ -219,7 +219,7 @@ typedef struct {
char host[48];
} ks_sockaddr_t;
typedef void (*ks_pool_cleanup_fn_t) (ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype);
typedef void (*ks_pool_cleanup_callback_t)(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type);
typedef void (*ks_logger_t) (const char *file, const char *func, int line, int level, const char *fmt, ...);
typedef void (*ks_listen_callback_t) (ks_socket_t server_sock, ks_socket_t client_sock, ks_sockaddr_t *addr, void *user_data);
......
......@@ -56,9 +56,9 @@ KS_DECLARE(void) ks_random_string(char *buf, uint16_t len, char *set)
}
KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_fn_t fn, void *arg)
KS_DECLARE(ks_status_t) ks_global_set_cleanup(ks_pool_cleanup_callback_t callback, void *arg)
{
return ks_pool_set_cleanup(ks_global_pool(), NULL, arg, 0, fn);
return ks_pool_set_cleanup(ks_global_pool(), NULL, arg, callback);
}
KS_DECLARE(ks_status_t) ks_init(void)
......
......@@ -140,7 +140,7 @@ const float max_load_factor = 0.65f;
/*****************************************************************************/
static void ks_hash_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_hash_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
//ks_hash_t *hash = (ks_hash_t *) ptr;
......@@ -270,7 +270,7 @@ ks_hash_create_ex(ks_hash_t **hp, unsigned int minsize,
*hp = h;
ks_pool_set_cleanup(pool, h, NULL, 0, ks_hash_cleanup);
ks_pool_set_cleanup(pool, h, NULL, ks_hash_cleanup);
return KS_STATUS_SUCCESS;
}
......
......@@ -42,7 +42,7 @@ struct ks_mutex {
uint8_t malloc;
};
static void ks_mutex_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_mutex_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_mutex_t *mutex = (ks_mutex_t *) ptr;
......@@ -150,7 +150,7 @@ KS_DECLARE(ks_status_t) ks_mutex_create(ks_mutex_t **mutex, unsigned int flags,
status = KS_STATUS_SUCCESS;
if (pool) {
ks_pool_set_cleanup(pool, check, NULL, 0, ks_mutex_cleanup);
ks_pool_set_cleanup(pool, check, NULL, ks_mutex_cleanup);
}
done:
......@@ -224,7 +224,7 @@ struct ks_cond {
uint8_t static_mutex;
};
static void ks_cond_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_cond_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_cond_t *cond = (ks_cond_t *) ptr;
......@@ -281,7 +281,7 @@ KS_DECLARE(ks_status_t) ks_cond_create_ex(ks_cond_t **cond, ks_pool_t *pool, ks_
*cond = check;
status = KS_STATUS_SUCCESS;
ks_pool_set_cleanup(pool, check, NULL, 0, ks_cond_cleanup);
ks_pool_set_cleanup(pool, check, NULL, ks_cond_cleanup);
done:
return status;
......@@ -436,7 +436,7 @@ struct ks_rwl {
uint32_t wlc;
};
static void ks_rwl_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_rwl_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
#ifndef WIN32
ks_rwl_t *rwlock = (ks_rwl_t *) ptr;
......@@ -494,7 +494,7 @@ KS_DECLARE(ks_status_t) ks_rwl_create(ks_rwl_t **rwlock, ks_pool_t *pool)
*rwlock = check;
status = KS_STATUS_SUCCESS;
ks_pool_set_cleanup(pool, check, NULL, 0, ks_rwl_cleanup);
ks_pool_set_cleanup(pool, check, NULL, ks_rwl_cleanup);
done:
return status;
}
......
......@@ -30,37 +30,34 @@
#include "ks.h"
#define KS_POOL_MAGIC 0xABACABA /* magic for struct */
#define BLOCK_MAGIC 0xB1B1007 /* magic for blocks */
#define FENCE_MAGIC0 (unsigned char)(0xFAU) /* 1st magic mem byte */
#define FENCE_MAGIC1 (unsigned char)(0xD3U) /* 2nd magic mem byte */
#define FENCE_SIZE 2 /* fence space */
#define MIN_ALLOCATION (sizeof(ks_pool_free_t)) /* min alloc */
#define MAX_FREE_SEARCH 10240 /* max size to search */
#define MAX_FREE_LIST_SEARCH 100 /* max looking for free mem */
#define PRE_MAGIC1 0x33U
#define PRE_MAGIC2 0xCCU
typedef struct alloc_prefix_s {
unsigned char m1;
unsigned long size;
unsigned char m2;
unsigned int refs;
unsigned int padding;
} alloc_prefix_t;
#define PREFIX_SIZE sizeof(struct alloc_prefix_s)
//#define DEBUG 1
/*
* bitflag tools for Variable and a Flag
*/
#define BIT_FLAG(x) (1 << (x))
#define BIT_SET(v,f) (v) |= (f)
#define BIT_CLEAR(v,f) (v) &= ~(f)
#define BIT_IS_SET(v,f) ((v) & (f))
#define BIT_TOGGLE(v,f) (v) ^= (f)
//#define DEBUG 1
#define KS_POOL_MAGIC 0xDEADBEEF /* magic for struct */
#define KS_POOL_PREFIX_MAGIC 0xDEADBEEF
#define KS_POOL_FENCE_MAGIC0 (ks_byte_t)(0xFAU) /* 1st magic mem byte */
#define KS_POOL_FENCE_MAGIC1 (ks_byte_t)(0xD3U) /* 2nd magic mem byte */
#define KS_POOL_FENCE_SIZE 2 /* fence space */
typedef struct ks_pool_prefix_s ks_pool_prefix_t;
struct ks_pool_prefix_s {
ks_size_t magic1;
ks_size_t size;
ks_size_t magic2;
ks_size_t refs;
ks_pool_prefix_t *prev;
ks_pool_prefix_t *next;
ks_size_t magic3;
ks_pool_cleanup_callback_t cleanup_callback;
void *cleanup_arg;
ks_size_t magic4;
ks_size_t reserved[2];
};
#define KS_POOL_PREFIX_SIZE sizeof(ks_pool_prefix_t)
#define SET_POINTER(pnt, val) \
do { \
......@@ -69,441 +66,126 @@ typedef struct alloc_prefix_s {
} \
} while(0)
#define BLOCK_FLAG_USED BIT_FLAG(0) /* block is used */
#define BLOCK_FLAG_FREE BIT_FLAG(1) /* block is free */
#define DEFAULT_PAGE_MULT 16 /* pagesize = this * getpagesize */
/* How many pages SIZE bytes resides in. We add in the block header. */
#define PAGES_IN_SIZE(mp_p, size) (((size) + sizeof(ks_pool_block_t) + \
(mp_p)->mp_page_size - 1) / \
(mp_p)->mp_page_size)
#define SIZE_OF_PAGES(mp_p, page_n) ((page_n) * (mp_p)->mp_page_size)
#define MAX_BITS 30 /* we only can allocate 1gb chunks */
#define MAX_BLOCK_USER_MEMORY(mp_p) ((mp_p)->mp_page_size - \
sizeof(ks_pool_block_t))
#define FIRST_ADDR_IN_BLOCK(block_p) (void *)((char *)(block_p) + \
sizeof(ks_pool_block_t))
#define MEMORY_IN_BLOCK(block_p) ((char *)(block_p)->mb_bounds_p - \
((char *)(block_p) + \
sizeof(ks_pool_block_t)))
typedef struct ks_pool_cleanup_node_s {
ks_pool_cleanup_fn_t fn;
void *ptr;
void *arg;
int type;
struct ks_pool_cleanup_node_s *next;
} ks_pool_cleanup_node_t;
struct ks_pool_s {
unsigned int mp_magic; /* magic number for struct */
unsigned int mp_flags; /* flags for the struct */
unsigned long mp_alloc_c; /* number of allocations */
unsigned long mp_user_alloc; /* user bytes allocated */
unsigned long mp_max_alloc; /* maximum user bytes allocated */
unsigned int mp_page_c; /* number of pages allocated */
unsigned int mp_max_pages; /* maximum number of pages to use */
unsigned int mp_page_size; /* page-size of our system */
off_t mp_top; /* top of our allocations in fd */
ks_pool_log_func_t mp_log_func; /* log callback function */
void *mp_min_p; /* min address in pool for checks */
void *mp_bounds_p; /* max address in pool for checks */
struct ks_pool_block_st *mp_first_p; /* first memory block we are using */
struct ks_pool_block_st *mp_last_p; /* last memory block we are using */
struct ks_pool_block_st *mp_free[MAX_BITS + 1]; /* free lists based on size */
unsigned int mp_magic2; /* upper magic for overwrite sanity */
ks_pool_cleanup_node_t *clfn_list;
ks_size_t magic1; /* magic number for struct */
ks_size_t flags; /* flags for the struct */
ks_size_t alloc_c; /* number of allocations */
ks_size_t user_alloc; /* user bytes allocated */
ks_size_t max_alloc; /* maximum user bytes allocated */
ks_pool_log_func_t log_func; /* log callback function */
ks_pool_prefix_t *first; /* first memory allocation we are using */
ks_pool_prefix_t *last; /* last memory allocation we are using */
ks_size_t magic2; /* upper magic for overwrite sanity */
ks_mutex_t *mutex;
ks_mutex_t *cleanup_mutex;
uint8_t cleaning_up;
ks_bool_t cleaning_up;
};
/* for debuggers to be able to interrogate the generic type in the .h file */
typedef ks_pool_t ks_pool_ext_t;
/*
* Block header structure. This structure *MUST* be long-word
* aligned.
*/
typedef struct ks_pool_block_st {
unsigned int mb_magic; /* magic number for block header */
void *mb_bounds_p; /* block boundary location */
struct ks_pool_block_st *mb_next_p; /* linked list next pointer */
unsigned int mb_magic2; /* upper magic for overwrite sanity */
} ks_pool_block_t;
/*
* Free list structure.
*/
typedef struct {
void *mf_next_p; /* pointer to the next free address */
unsigned long mf_size; /* size of the free block */
} ks_pool_free_t;
#ifndef MAP_ANON
#define MAP_ANON MAP_ANONYMOUS
#endif
/* local variables */
static int enabled_b = 0; /* lib initialized? */
static unsigned int min_bit_free_next = 0; /* min size of next pnt */
static unsigned int min_bit_free_size = 0; /* min size of next + size */
static unsigned long bit_array[MAX_BITS + 1]; /* size -> bit */
#ifdef _MSC_VER
#include <Windows.h>
long getpagesize(void)
{
static long g_pagesize = 0;
if (!g_pagesize) {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
g_pagesize = system_info.dwPageSize;
}
return g_pagesize;
}
#endif
/* We need mutex here probably or this notion of cleanup stuff cannot be threadsafe */
#if 0
static ks_pool_cleanup_node_t *find_cleanup_node(ks_pool_t *mp_p, void *ptr)
{
ks_pool_cleanup_node_t *np, *cnode = NULL;
ks_assert(mp_p);
ks_assert(ptr);
for (np = mp_p->clfn_list; np; np = np->next) {
if (np->ptr == ptr) {
cnode = np;
goto end;
}
}
end:
static ks_status_t check_pool(const ks_pool_t *pool);
static ks_status_t check_fence(const void *addr);
static void write_fence(void *addr);
static ks_status_t check_prefix(const ks_pool_prefix_t *prefix);
/* done, the nodes are all from the pool so they will be destroyed */
return cnode;
}
#endif
static void perform_pool_cleanup_on_free(ks_pool_t *mp_p, void *ptr)
static void perform_pool_cleanup_on_free(ks_pool_t *pool, ks_pool_prefix_t *prefix)
{
ks_pool_cleanup_node_t *np, *cnode, *last = NULL;
np = mp_p->clfn_list;
void *addr;
ks_mutex_lock(mp_p->mutex);
if (mp_p->cleaning_up) {
ks_mutex_unlock(mp_p->mutex);
return;
}
ks_mutex_unlock(mp_p->mutex);
ks_assert(pool);
ks_assert(prefix);
ks_mutex_lock(mp_p->cleanup_mutex);
while(np) {
if (np->ptr == ptr) {
if (last) {
last->next = np->next;
} else {
mp_p->clfn_list = np->next;
}
if (pool->cleaning_up) return;
cnode = np;
np = np->next;
cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
cnode->fn(mp_p, cnode->ptr, cnode->arg, cnode->type, KS_MPCL_DESTROY, KS_MPCL_FREE);
addr = (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE);
continue;
if (prefix->cleanup_callback) {
prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_FREE);
prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_FREE);
prefix->cleanup_callback(pool, addr, prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_FREE);
}
last = np;
np = np->next;
}
ks_mutex_unlock(mp_p->cleanup_mutex);
}
static void perform_pool_cleanup(ks_pool_t *mp_p)
static void perform_pool_cleanup(ks_pool_t *pool)
{
ks_pool_cleanup_node_t *np;
ks_pool_prefix_t *prefix;
ks_mutex_lock(mp_p->mutex);
if (mp_p->cleaning_up) {
ks_mutex_unlock(mp_p->mutex);
if (pool->cleaning_up) {
return;
}
mp_p->cleaning_up = 1;
ks_mutex_unlock(mp_p->mutex);
pool->cleaning_up = KS_TRUE;
ks_mutex_lock(mp_p->cleanup_mutex);
for (np = mp_p->clfn_list; np; np = np->next) {
np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_ANNOUNCE, KS_MPCL_GLOBAL_FREE);
}
for (np = mp_p->clfn_list; np; np = np->next) {
np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_TEARDOWN, KS_MPCL_GLOBAL_FREE);
}
for (np = mp_p->clfn_list; np; np = np->next) {
np->fn(mp_p, np->ptr, np->arg, np->type, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
for (prefix = pool->first; prefix; prefix = prefix->next) {
if (!prefix->cleanup_callback) continue;
prefix->cleanup_callback(pool, (void *)((uintptr_t)prefix + KS_POOL_PREFIX_SIZE), prefix->cleanup_arg, KS_MPCL_DESTROY, KS_MPCL_GLOBAL_FREE);
}
ks_mutex_unlock(mp_p->cleanup_mutex);
mp_p->clfn_list = NULL;
}
KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *mp_p, void *ptr, void *arg, int type, ks_pool_cleanup_fn_t fn)
KS_DECLARE(ks_status_t) ks_pool_set_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_cleanup_callback_t callback)
{
ks_pool_cleanup_node_t *cnode;
ks_status_t ret = KS_STATUS_SUCCESS;
ks_pool_prefix_t *prefix = NULL;
ks_assert(mp_p);
ks_assert(pool);
ks_assert(ptr);
ks_assert(fn);
ks_assert(callback);
/* don't set cleanup on this cnode obj or it will be an endless loop */
cnode = (ks_pool_cleanup_node_t *) ks_pool_alloc(mp_p, sizeof(*cnode));
prefix = (ks_pool_prefix_t *)((uintptr_t)ptr - KS_POOL_PREFIX_SIZE);
if (!cnode) {
return KS_STATUS_FAIL;
}
cnode->ptr = ptr;
cnode->arg = arg;
cnode->fn = fn;
cnode->type = type;
ks_mutex_lock(mp_p->cleanup_mutex);
cnode->next = mp_p->clfn_list;
mp_p->clfn_list = cnode;
ks_mutex_unlock(mp_p->cleanup_mutex);
return KS_STATUS_SUCCESS;
}
/****************************** local utilities ******************************/
/*
* static void startup
*
* DESCRIPTION:
*
* Perform any library level initialization.
*
* RETURNS:
*
* None.
*
* ARGUMENTS:
*
* None.
*/
static void startup(void)
{
int bit_c;
unsigned long size = 1;
if (enabled_b) {
return;
}
/* allocate our free bit array list */
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
bit_array[bit_c] = size;
/*
* Note our minimum number of bits that can store a pointer. This
* is smallest address that we can have a linked list for.
*/
if (min_bit_free_next == 0 && size >= sizeof(void *)) {
min_bit_free_next = bit_c;
}
/*
* Note our minimum number of bits that can store a pointer and
* the size of the block.
*/
if (min_bit_free_size == 0 && size >= sizeof(ks_pool_free_t)) {
min_bit_free_size = bit_c;
}
size *= 2;
}
enabled_b = 1;
}
/*
* static int size_to_bits
*
* DESCRIPTION:
*
* Calculate the number of bits in a size.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* size -> Size of memory of which to calculate the number of bits.
*/
static int size_to_bits(const unsigned long size)
{
int bit_c = 0;
ret = check_prefix(prefix);
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
if (size <= bit_array[bit_c]) {
break;
}
if (ret == KS_STATUS_SUCCESS) {
prefix->cleanup_arg = arg;
prefix->cleanup_callback = callback;
}
return bit_c;
return ret;
}
/*
* static int size_to_free_bits
*
* DESCRIPTION:
*
* Calculate the number of bits in a size going on the free list.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* size -> Size of memory of which to calculate the number of bits.
*/
static int size_to_free_bits(const unsigned long size)
{
int bit_c = 0;
if (size == 0) {
return 0;
}
for (bit_c = 0; bit_c <= MAX_BITS; bit_c++) {
if (size < bit_array[bit_c]) {
break;
}
}
return bit_c - 1;
}
/*
* static int bits_to_size
*
* DESCRIPTION:
*
* Calculate the size represented by a number of bits.
*
* RETURNS:
*
* Number of bits.
*
* ARGUMENTS:
*
* bit_n -> Number of bits
*/
static unsigned long bits_to_size(const int bit_n)
{
if (bit_n > MAX_BITS) {
return bit_array[MAX_BITS];
} else {
return bit_array[bit_n];
}
}
/****************************** local utilities ******************************/
/*
* static void *alloc_pages
*
* DESCRIPTION:
*
* Allocate space for a number of memory pages in the memory pool.
*
* RETURNS:
*
* Success - New pages of memory
*
* Failure - NULL
*
* ARGUMENTS:
*
* mp_p <-> Pointer to our memory pool.
*
* page_n -> Number of pages to alloc.
*
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
static void *alloc_pages(ks_pool_t *mp_p, const unsigned int page_n, ks_status_t *error_p)
* static ks_status_t check_pool
*
* DESCRIPTION:
*
* Check the validity of pool checksums.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - Ks_Pool error code
*
* ARGUMENTS:
*
* pool -> A pointer to a pool.
*/
static ks_status_t check_pool(const ks_pool_t *pool)
{
void *mem;
unsigned long size;
/* are we over our max-pages? */
if (mp_p->mp_max_pages > 0 && mp_p->mp_page_c >= mp_p->mp_max_pages) {
SET_POINTER(error_p, KS_STATUS_NO_PAGES);
return NULL;
}
size = SIZE_OF_PAGES(mp_p, page_n);
#ifdef DEBUG
(void) printf("allocating %u pages or %lu bytes\n", page_n, size);
#endif
ks_assert(pool);
if (pool->magic1 != KS_POOL_MAGIC) return KS_STATUS_PNT;
if (pool->magic2 != KS_POOL_MAGIC) return KS_STATUS_POOL_OVER;
mem = malloc(size);
ks_assert(mem);
memset(mem, 0, size);
mp_p->mp_top += size;
mp_p->mp_page_c += page_n;
SET_POINTER(error_p, KS_STATUS_SUCCESS);
return mem;
}
/*
* static int free_pages
*
* DESCRIPTION:
*
* Free previously allocated pages of memory.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - Ks_Pool error code
*
* ARGUMENTS:
*
* pages <-> Pointer to memory pages that we are freeing.
*
* size -> Size of the block that we are freeing.
*
*/
static int free_pages(void *pages, const unsigned long size)
{
free(pages);
return KS_STATUS_SUCCESS;
}
/*
* static int check_magic
* static ks_status_t check_fence
*
* DESCRIPTION:
*
* Check for the existance of the magic ID in a memory pointer.
* Check the validity of the fence checksums.
*
* RETURNS:
*
......@@ -513,26 +195,20 @@ static int free_pages(void *pages, const unsigned long size)
*
* ARGUMENTS:
*
* addr -> Address inside of the block that we are tryign to locate.
*
* size -> Size of the block.
* addr -> A pointer directly to the fence.
*/
static int check_magic(const void *addr, const unsigned long size)
static ks_status_t check_fence(const void *addr)
{
const unsigned char *mem_p;
const ks_byte_t *mem_p;
/* set our starting point */
mem_p = (unsigned char *) addr + size;
mem_p = (ks_byte_t *)addr;
if (*mem_p == FENCE_MAGIC0 && *(mem_p + 1) == FENCE_MAGIC1) {
return KS_STATUS_SUCCESS;
} else {
if (*mem_p == KS_POOL_FENCE_MAGIC0 && *(mem_p + 1) == KS_POOL_FENCE_MAGIC1) return KS_STATUS_SUCCESS;
return KS_STATUS_PNT_OVER;
}
}
/*
* static void write_magic
* static void write_fence
*
* DESCRIPTION:
*
......@@ -546,338 +222,35 @@ static int check_magic(const void *addr, const unsigned long size)
*
* addr -> Address where to write the magic.
*/
static void write_magic(const void *addr)
{
*(unsigned char *) addr = FENCE_MAGIC0;
*((unsigned char *) addr + 1) = FENCE_MAGIC1;
}
/*
* static void free_pointer
*
* DESCRIPTION:
*
* Moved a pointer into our free lists.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - Ks_Pool error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* addr <-> Address where to write the magic. We may write a next
* pointer to it.
*
* size -> Size of the address space.
*/
static int free_pointer(ks_pool_t *mp_p, void *addr, const unsigned long size)
static void write_fence(void *addr)
{
unsigned int bit_n;
unsigned long real_size;
ks_pool_free_t free_pnt;
#ifdef DEBUG
(void) printf("freeing a block at %lx of %lu bytes\n", (long) addr, size);
#endif
if (size == 0) {
return KS_STATUS_SUCCESS;
}
/*
* if the user size is larger then can fit in an entire block then
* we change the size
*/
if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
real_size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, size)) - sizeof(ks_pool_block_t);
} else {
real_size = size;
}
/*
* We use a specific free bits calculation here because if we are
* freeing 10 bytes then we will be putting it into the 8-byte free
* list and not the 16 byte list. size_to_bits(10) will return 4
* instead of 3.
*/
bit_n = size_to_free_bits(real_size);
/*
* Minimal error checking. We could go all the way through the
* list however this might be prohibitive.
*/
if (mp_p->mp_free[bit_n] == addr) {
return KS_STATUS_IS_FREE;
}
/* add the freed pointer to the free list */
if (bit_n < min_bit_free_next) {
/*
* Yes we know this will lose 99% of the allocations but what else
* can we do? No space for a next pointer.
*/
if (mp_p->mp_free[bit_n] == NULL) {
mp_p->mp_free[bit_n] = addr;
}
} else if (bit_n < min_bit_free_size) {
/* we copy, not assign, to maintain the free list */
memcpy(addr, mp_p->mp_free + bit_n, sizeof(void *));
mp_p->mp_free[bit_n] = addr;
} else {
/* setup our free list structure */
free_pnt.mf_next_p = mp_p->mp_free[bit_n];
free_pnt.mf_size = real_size;
/* we copy the structure in since we don't know about alignment */
memcpy(addr, &free_pnt, sizeof(free_pnt));
mp_p->mp_free[bit_n] = addr;
}
return KS_STATUS_SUCCESS;
*((ks_byte_t *)addr) = KS_POOL_FENCE_MAGIC0;
*((ks_byte_t *)addr + 1) = KS_POOL_FENCE_MAGIC1;
}
/*
* static int split_block
*
* DESCRIPTION:
*
* When freeing space in a multi-block chunk we have to create new
* blocks out of the upper areas being freed.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - Ks_Pool error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* free_addr -> Address that we are freeing.
*
* size -> Size of the space that we are taking from address.
*/
static int split_block(ks_pool_t *mp_p, void *free_addr, const unsigned long size)
* static ks_status_t check_prefix
*
* DESCRIPTION:
*
* Check the validity of prefix checksums.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - Ks_Pool error code
*
* ARGUMENTS:
*
* prefix -> A pointer to a prefix.
*/
static ks_status_t check_prefix(const ks_pool_prefix_t *prefix)
{
ks_pool_block_t *block_p, *new_block_p;
int ret, page_n;
void *end_p;
/*
* 1st we find the block pointer from our free addr. At this point
* the pointer must be the 1st one in the block if it is spans
* multiple blocks.
*/
block_p = (ks_pool_block_t *) ((char *) free_addr - sizeof(ks_pool_block_t));
if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
page_n = PAGES_IN_SIZE(mp_p, size);
/* we are creating a new block structure for the 2nd ... */
new_block_p = (ks_pool_block_t *) ((char *) block_p + SIZE_OF_PAGES(mp_p, page_n));
new_block_p->mb_magic = BLOCK_MAGIC;
/* New bounds is 1st block bounds. The 1st block's is reset below. */
new_block_p->mb_bounds_p = block_p->mb_bounds_p;
/* Continue the linked list. The 1st block will point to us below. */
new_block_p->mb_next_p = block_p->mb_next_p;
new_block_p->mb_magic2 = BLOCK_MAGIC;
/* bounds for the 1st block are reset to the 1st page only */
block_p->mb_bounds_p = (char *) new_block_p;
/* the next block pointer for the 1st block is now the new one */
block_p->mb_next_p = new_block_p;
/* only free the space in the 1st block if it is only 1 block in size */
if (page_n == 1) {
/* now free the rest of the 1st block block */
end_p = (char *) free_addr + size;
ret = free_pointer(mp_p, end_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) end_p));
if (ret != KS_STATUS_SUCCESS) {
return ret;
}
}
/* now free the rest of the block */
ret = free_pointer(mp_p, FIRST_ADDR_IN_BLOCK(new_block_p), (unsigned long)MEMORY_IN_BLOCK(new_block_p));
if (ret != KS_STATUS_SUCCESS) {
return ret;
}
if (!(prefix->magic1 == KS_POOL_PREFIX_MAGIC && prefix->magic2 == KS_POOL_PREFIX_MAGIC && prefix->magic3 == KS_POOL_PREFIX_MAGIC && prefix->magic4 == KS_POOL_PREFIX_MAGIC)) return KS_STATUS_INVALID_POINTER;
return KS_STATUS_SUCCESS;
}
/*
* static void *get_space
*
* DESCRIPTION:
*
* Moved a pointer into our free lists.
*
* RETURNS:
*
* Success - New address that we can use.
*
* Failure - NULL
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* byte_size -> Size of the address space that we need.
*
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
static void *get_space(ks_pool_t *mp_p, const unsigned long byte_size, unsigned long *padding, ks_status_t *error_p)
{
ks_pool_block_t *block_p;
ks_pool_free_t free_pnt;
int ret;
unsigned long size;
unsigned int bit_c, page_n, left;
void *free_addr = NULL, *free_end;
size = byte_size;
while ((size & (sizeof(void *) - 1)) > 0) {
size++;
}
if (size > byte_size) {
*padding = size - byte_size;
}
/*
* First we check the free lists looking for something with enough
* pages. Maybe we should only look X bits higher in the list.
*
* XXX: this is where we'd do the best fit. We'd look for the
* closest match. We then could put the rest of the allocation that
* we did not use in a lower free list. Have a define which states
* how deep in the free list to go to find the closest match.
*/
for (bit_c = size_to_bits(size); bit_c <= MAX_BITS; bit_c++) {
if (mp_p->mp_free[bit_c] != NULL) {
free_addr = mp_p->mp_free[bit_c];
break;
}
}
/*
* If we haven't allocated any blocks or if the last block doesn't
* have enough memory then we need a new block.
*/
if (bit_c > MAX_BITS) {
/* we need to allocate more space */
page_n = PAGES_IN_SIZE(mp_p, size);
/* now we try and get the pages we need/want */
block_p = alloc_pages(mp_p, page_n, error_p);
if (block_p == NULL) {
/* error_p set in alloc_pages */
return NULL;
}
/* init the block header */
block_p->mb_magic = BLOCK_MAGIC;
block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(mp_p, page_n);
block_p->mb_next_p = mp_p->mp_first_p;
block_p->mb_magic2 = BLOCK_MAGIC;
/*
* We insert it into the front of the queue. We could add it to
* the end but there is not much use.
*/
mp_p->mp_first_p = block_p;
if (mp_p->mp_last_p == NULL) {
mp_p->mp_last_p = block_p;
}
free_addr = FIRST_ADDR_IN_BLOCK(block_p);
#ifdef DEBUG
(void) printf("had to allocate space for %lx of %lu bytes\n", (long) free_addr, size);
#endif
free_end = (char *) free_addr + size;
left = (unsigned) ((char *) block_p->mb_bounds_p - (char *) free_end);
} else {
if (bit_c < min_bit_free_next) {
mp_p->mp_free[bit_c] = NULL;
/* calculate the number of left over bytes */
left = bits_to_size(bit_c) - size;
} else if (bit_c < min_bit_free_next) {
/* grab the next pointer from the freed address into our list */
memcpy(mp_p->mp_free + bit_c, free_addr, sizeof(void *));
/* calculate the number of left over bytes */
left = bits_to_size(bit_c) - size;
} else {
/* grab the free structure from the address */
memcpy(&free_pnt, free_addr, sizeof(free_pnt));
mp_p->mp_free[bit_c] = free_pnt.mf_next_p;
/* are we are splitting up a multiblock chunk into fewer blocks? */
if (PAGES_IN_SIZE(mp_p, free_pnt.mf_size) > PAGES_IN_SIZE(mp_p, size)) {
ret = split_block(mp_p, free_addr, size);
if (ret != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
/* left over memory was taken care of in split_block */
left = 0;
} else {
/* calculate the number of left over bytes */
left = free_pnt.mf_size - size;
}
}
#ifdef DEBUG
(void) printf("found a free block at %lx of %lu bytes\n", (long) free_addr, left + size);
#endif
free_end = (char *) free_addr + size;
}
/*
* If we have memory left over then we free it so someone else can
* use it. We do not free the space if we just allocated a
* multi-block chunk because we need to have every allocation easily
* find the start of the block. Every user address % page-size
* should take us to the start of the block.
*/
if (left > 0 && size <= MAX_BLOCK_USER_MEMORY(mp_p)) {
/* free the rest of the block */
ret = free_pointer(mp_p, free_end, left);
if (ret != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
}
/* update our bounds */
if (free_addr > mp_p->mp_bounds_p) {
mp_p->mp_bounds_p = free_addr;
} else if (free_addr < mp_p->mp_min_p) {
mp_p->mp_min_p = free_addr;
}
return free_addr;
}
/*
* static void *alloc_mem
*
......@@ -893,60 +266,58 @@ static void *get_space(ks_pool_t *mp_p, const unsigned long byte_size, unsigned
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal malloc.
* pool -> Pointer to the memory pool.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
static void *alloc_mem(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p)
static void *alloc_mem(ks_pool_t *pool, const ks_size_t size, ks_status_t *error_p)
{
unsigned long size, fence, padding = 0;
void *addr;
alloc_prefix_t *prefix;
/* make sure we have enough bytes */
if (byte_size < MIN_ALLOCATION) {
size = MIN_ALLOCATION;
} else {
size = byte_size;
}
fence = FENCE_SIZE;
/* get our free space + the space for the fence post */
addr = get_space(mp_p, size + fence + PREFIX_SIZE, &padding, error_p);
if (addr == NULL) {
/* error_p set in get_space */
return NULL;
}
prefix = (alloc_prefix_t *) addr;
prefix->m1 = PRE_MAGIC1;
prefix->m2 = PRE_MAGIC2;
prefix->size = size + fence + PREFIX_SIZE + padding;
ks_size_t required;
void *start = NULL;
void *addr = NULL;
void *fence = NULL;
ks_pool_prefix_t *prefix = NULL;
ks_assert(pool);
ks_assert(size);
required = KS_POOL_PREFIX_SIZE + size + KS_POOL_FENCE_SIZE;
start = malloc(required);
ks_assert(start);
memset(start, 0, required);
prefix = (ks_pool_prefix_t *)start;
addr = (void *)((ks_byte_t *)start + KS_POOL_PREFIX_SIZE);
fence = (void *)((ks_byte_t *)addr + size);
prefix->magic1 = KS_POOL_PREFIX_MAGIC;
prefix->size = size;
prefix->magic2 = KS_POOL_PREFIX_MAGIC;
prefix->refs = 1;
prefix->padding = padding;
prefix->next = pool->first;
if (pool->first) pool->first->prev = prefix;
pool->first = prefix;
if (!pool->last) pool->last = prefix;
prefix->magic3 = KS_POOL_PREFIX_MAGIC;
prefix->magic4 = KS_POOL_PREFIX_MAGIC;
write_magic((char *) prefix + prefix->size - padding - fence);
write_fence(fence);
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
}
/* maintain our stats */
mp_p->mp_alloc_c++;
mp_p->mp_user_alloc += prefix->size;
if (mp_p->mp_user_alloc > mp_p->mp_max_alloc) {
mp_p->mp_max_alloc = mp_p->mp_user_alloc;
pool->alloc_c++;
pool->user_alloc += prefix->size;
if (pool->user_alloc > pool->max_alloc) {
pool->max_alloc = pool->user_alloc;
}
SET_POINTER(error_p, KS_STATUS_SUCCESS);
return (uint8_t *)addr + PREFIX_SIZE;
return addr;
}
/*
......@@ -964,79 +335,62 @@ static void *alloc_mem(ks_pool_t *mp_p, const unsigned long byte_size, ks_status
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* normal free.
* pool -> Pointer to the memory pool.
*
* addr <-> Address to free.
* addr -> Address to free.
*
*/
static int free_mem(ks_pool_t *mp_p, void *addr)
static ks_status_t free_mem(ks_pool_t *pool, void *addr)
{
unsigned long size;
int ret;
ks_pool_block_t *block_p;
alloc_prefix_t *prefix;
ks_status_t ret = KS_STATUS_SUCCESS;
void *start = NULL;
void *fence = NULL;
ks_pool_prefix_t *prefix = NULL;
ks_assert(pool);
ks_assert(addr);
prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE);
if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_INVALID_POINTER;
}
start = (void *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
prefix = (ks_pool_prefix_t *)start;
if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) return ret;
if (prefix->refs > 0) {
prefix->refs--;
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_DECREF, prefix->size, prefix->refs, addr, NULL, 0);
}
}
if (prefix->refs > 0) {
return KS_STATUS_REFS_EXIST;
}
size = prefix->size;
/*
* If the size is larger than a block then the allocation must be at
* the front of the block.
*/
if (size > MAX_BLOCK_USER_MEMORY(mp_p)) {
block_p = (ks_pool_block_t *) ((char *) addr - PREFIX_SIZE - sizeof(ks_pool_block_t));
if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_INVALID_POINTER;
}
}
/* find the user's magic numbers */
ret = check_magic(prefix, prefix->size - FENCE_SIZE - prefix->padding);
fence = (void *)((uintptr_t)addr + prefix->size);
ret = check_fence(fence);
perform_pool_cleanup_on_free(mp_p, addr);
perform_pool_cleanup_on_free(pool, prefix);
/* move pointer to actual beginning */
addr = prefix;
if (ret != KS_STATUS_SUCCESS) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return ret;
if (!prefix->prev && !prefix->next) pool->first = pool->last = NULL;
else if (!prefix->prev) {
pool->first = prefix->next;
pool->first->prev = NULL;
}
ret = free_pointer(mp_p, addr, size);
if (ret != KS_STATUS_SUCCESS) {
return ret;
else if (!prefix->next) {
pool->last = prefix->prev;
pool->last->next = NULL;
} else {
prefix->prev->next = prefix->next;
prefix->next->prev = prefix->prev;
}
mp_p->mp_user_alloc -= size;
pool->alloc_c--;
pool->user_alloc -= prefix->size;
/* adjust our stats */
mp_p->mp_alloc_c--;
free(start);
return KS_STATUS_SUCCESS;
return ret;
}
/***************************** exported routines *****************************/
......@@ -1060,129 +414,23 @@ static int free_mem(ks_pool_t *mp_p, void *addr)
* flags -> Flags to set attributes of the memory pool. See the top
* of ks_pool.h.
*
* page_size -> Set the internal memory page-size. This must be a
* multiple of the getpagesize() value. Set to 0 for the default.
*
* start_addr -> Starting address to try and allocate memory pools.
*
* error_p <- Pointer to ks_status_t which, if not NULL, will be set with
* a ks_pool error code.
*/
static ks_pool_t *ks_pool_raw_open(const unsigned int flags, const unsigned int page_size, void *start_addr, ks_status_t *error_p)
static ks_pool_t *ks_pool_raw_open(const ks_size_t flags, ks_status_t *error_p)
{
ks_pool_block_t *block_p;
int page_n, ret;
ks_pool_t mp, *mp_p;
void *free_addr;
if (!enabled_b) {
startup();
}
/* zero our temp struct */
memset(&mp, 0, sizeof(mp));
mp.mp_magic = KS_POOL_MAGIC;
mp.mp_flags = flags;
mp.mp_alloc_c = 0;
mp.mp_user_alloc = 0;
mp.mp_max_alloc = 0;
mp.mp_page_c = 0;
/* mp.mp_page_size set below */
/* mp.mp_blocks_bit_n set below */
/* mp.mp_top set below */
mp.mp_log_func = NULL;
mp.mp_min_p = NULL;
mp.mp_bounds_p = NULL;
mp.mp_first_p = NULL;
mp.mp_last_p = NULL;
mp.mp_magic2 = KS_POOL_MAGIC;
/* get and sanity check our page size */
if (page_size > 0) {
mp.mp_page_size = page_size;
if (mp.mp_page_size % getpagesize() != 0) {
SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
return NULL;
}
} else {
mp.mp_page_size = getpagesize() * DEFAULT_PAGE_MULT;
if (mp.mp_page_size % 1024 != 0) {
SET_POINTER(error_p, KS_STATUS_PAGE_SIZE);
return NULL;
}
}
ks_pool_t *pool;
/* we start at the front of the file */
mp.mp_top = 0;
pool = malloc(sizeof(ks_pool_t));
ks_assert(pool);
memset(pool, 0, sizeof(ks_pool_t));
/*
* Find out how many pages we need for our ks_pool structure.
*
* NOTE: this adds possibly unneeded space for ks_pool_block_t which
* may not be in this block.
*/
page_n = PAGES_IN_SIZE(&mp, sizeof(ks_pool_t));
/* now allocate us space for the actual struct */
mp_p = alloc_pages(&mp, page_n, error_p);
if (mp_p == NULL) {
return NULL;
}
/*
* NOTE: we do not normally free the rest of the block here because
* we want to lesson the chance of an allocation overwriting the
* main structure.
*/
if (BIT_IS_SET(flags, KS_POOL_FLAG_HEAVY_PACKING)) {
/* we add a block header to the front of the block */
block_p = (ks_pool_block_t *) mp_p;
/* init the block header */
block_p->mb_magic = BLOCK_MAGIC;
block_p->mb_bounds_p = (char *) block_p + SIZE_OF_PAGES(&mp, page_n);
block_p->mb_next_p = NULL;
block_p->mb_magic2 = BLOCK_MAGIC;
/* the ks_pool pointer is then the 2nd thing in the block */
mp_p = FIRST_ADDR_IN_BLOCK(block_p);
free_addr = (char *) mp_p + sizeof(ks_pool_t);
/* free the rest of the block */
ret = free_pointer(&mp, free_addr, (unsigned long)((char *) block_p->mb_bounds_p - (char *) free_addr));
if (ret != KS_STATUS_SUCCESS) {
/* NOTE: after this line mp_p will be invalid */
(void) free_pages(block_p, SIZE_OF_PAGES(&mp, page_n));
SET_POINTER(error_p, ret);
return NULL;
}
/*
* NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool
* header is not on the block linked list.
*/
/* now copy our tmp structure into our new memory area */
memcpy(mp_p, &mp, sizeof(ks_pool_t));
/* we setup min/max to our current address which is as good as any */
mp_p->mp_min_p = block_p;
mp_p->mp_bounds_p = block_p->mb_bounds_p;
} else {
/* now copy our tmp structure into our new memory area */
memcpy(mp_p, &mp, sizeof(ks_pool_t));
/* we setup min/max to our current address which is as good as any */
mp_p->mp_min_p = mp_p;
mp_p->mp_bounds_p = (char *) mp_p + SIZE_OF_PAGES(mp_p, page_n);
}
pool->magic1 = KS_POOL_MAGIC;
pool->flags = flags;
pool->magic2 = KS_POOL_MAGIC;
SET_POINTER(error_p, KS_STATUS_SUCCESS);
return mp_p;
return pool;
}
/*
......@@ -1206,18 +454,18 @@ static ks_pool_t *ks_pool_raw_open(const unsigned int flags, const unsigned int
KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP)
{
ks_status_t err;
ks_pool_t *pool = ks_pool_raw_open(KS_POOL_FLAG_DEFAULT, 0, NULL, &err);
ks_status_t ret = KS_STATUS_SUCCESS;
ks_pool_t *pool = NULL;
ks_assert(poolP);
pool = ks_pool_raw_open(KS_POOL_FLAG_DEFAULT, &ret);
if (pool && (err == KS_STATUS_SUCCESS)) {
ks_mutex_create(&pool->mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
ks_mutex_create(&pool->cleanup_mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
*poolP = pool;
return KS_STATUS_SUCCESS;
} else {
*poolP = NULL;
return err;
}
if (pool) ks_mutex_create(&pool->mutex, KS_MUTEX_FLAG_DEFAULT, NULL);
return ret;
}
/*
......@@ -1236,91 +484,25 @@ KS_DECLARE(ks_status_t) ks_pool_open(ks_pool_t **poolP)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to our memory pool.
* pool -> Pointer to our memory pool.
*/
static ks_status_t ks_pool_raw_close(ks_pool_t *mp_p)
static ks_status_t ks_pool_raw_close(ks_pool_t *pool)
{
ks_pool_block_t *block_p, *next_p;
void *addr;
//unsigned long size;
int ret, final = KS_STATUS_SUCCESS;
ks_status_t ret = KS_STATUS_SUCCESS;
/* special case, just return no-error */
if (mp_p == NULL) {
return KS_STATUS_ARG_NULL;
}
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_PNT;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLOSE, 0, 0, NULL, NULL, 0);
}
perform_pool_cleanup(mp_p);
ks_mutex_t *mutex = mp_p->mutex, *cleanup_mutex = mp_p->cleanup_mutex;
ks_mutex_lock(mutex);
/*
* NOTE: if we are HEAVY_PACKING then the 1st block with the ks_pool
* header is not on the linked list.
*/
/* free/invalidate the blocks */
for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = next_p) {
if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
final = KS_STATUS_POOL_OVER;
break;
}
block_p->mb_magic = 0;
block_p->mb_magic2 = 0;
/* record the next pointer because it might be invalidated below */
next_p = block_p->mb_next_p;
if (next_p && (next_p->mb_magic != BLOCK_MAGIC || next_p->mb_magic2 != BLOCK_MAGIC)) {
final = KS_STATUS_POOL_OVER;
break;
}
ret = free_pages(block_p, (unsigned long)((char *) block_p->mb_bounds_p - (char *) block_p));
if (next_p && (next_p->mb_magic != BLOCK_MAGIC || next_p->mb_magic2 != BLOCK_MAGIC)) {
final = KS_STATUS_POOL_OVER;
break;
}
if (ret != KS_STATUS_SUCCESS) {
final = ret;
}
}
/* invalidate the ks_pool before we ditch it */
mp_p->mp_magic = 0;
mp_p->mp_magic2 = 0;
if ((ret = ks_pool_clear(pool)) != KS_STATUS_SUCCESS) goto done;
/* if we are heavy packing then we need to free the 1st block later */
if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) {
addr = (char *) mp_p - sizeof(ks_pool_block_t);
} else {
addr = mp_p;
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_CLOSE, 0, 0, NULL, NULL, 0);
}
//size = SIZE_OF_PAGES(mp_p, PAGES_IN_SIZE(mp_p, sizeof(ks_pool_t)));
free(addr);
ks_mutex_destroy(&pool->mutex);
ks_mutex_unlock(mutex);
ks_mutex_destroy(&mutex);
ks_mutex_destroy(&cleanup_mutex);
free(pool);
return final;
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return ret;
}
......@@ -1340,23 +522,19 @@ static ks_status_t ks_pool_raw_close(ks_pool_t *mp_p)
*
* ARGUMENTS:
*
* mp_pp <-> Pointer to pointer of our memory pool.
* error_p <- Pointer to error
* poolP <-> Pointer to pointer of our memory pool.
*/
KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP)
KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **poolP)
{
ks_status_t err;
ks_assert(mp_pP);
ks_status_t ret = KS_STATUS_SUCCESS;
err = ks_pool_raw_close(*mp_pP);
ks_assert(poolP);
ks_assert(*poolP);
if (err == KS_STATUS_SUCCESS) {
*mp_pP = NULL;
}
if ((ret = ks_pool_raw_close(*poolP)) == KS_STATUS_SUCCESS) *poolP = NULL;
return err;
return ret;
}
/*
......@@ -1374,64 +552,36 @@ KS_DECLARE(ks_status_t) ks_pool_close(ks_pool_t **mp_pP)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to our memory pool.
* pool -> Pointer to our memory pool.
*/
KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p)
KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *pool)
{
ks_pool_block_t *block_p;
int final = KS_STATUS_SUCCESS, bit_n, ret;
void *first_p;
/* special case, just return no-error */
if (mp_p == NULL) {
return KS_STATUS_ARG_NULL;
}
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_PNT;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
ks_status_t ret = KS_STATUS_SUCCESS;
ks_pool_prefix_t *prefix, *nprefix;
ks_mutex_lock(mp_p->mutex);
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CLEAR, 0, 0, NULL, NULL, 0);
}
ks_assert(pool);
perform_pool_cleanup(mp_p);
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
/* reset all of our free lists */
for (bit_n = 0; bit_n <= MAX_BITS; bit_n++) {
mp_p->mp_free[bit_n] = NULL;
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_CLEAR, 0, 0, NULL, NULL, 0);
}
/* free the blocks */
for (block_p = mp_p->mp_first_p; block_p != NULL; block_p = block_p->mb_next_p) {
if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
final = KS_STATUS_POOL_OVER;
break;
}
ks_mutex_lock(pool->mutex);
first_p = FIRST_ADDR_IN_BLOCK(block_p);
perform_pool_cleanup(pool);
/* free the memory */
ret = free_pointer(mp_p, first_p, (unsigned long)MEMORY_IN_BLOCK(block_p));
if (ret != KS_STATUS_SUCCESS) {
final = ret;
for (prefix = pool->first; prefix; prefix = nprefix) {
nprefix = prefix->next;
// @todo check_prefix()? still want to clear out properly if some has been cleared though, not leak memory if there has been corruption
free(prefix);
}
}
ks_mutex_unlock(mp_p->mutex);
return final;
ks_mutex_unlock(pool->mutex);
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return ret;
}
/*
......@@ -1449,56 +599,35 @@ KS_DECLARE(ks_status_t) ks_pool_clear(ks_pool_t *mp_p)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* pool -> Pointer to the memory pool.
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
* size -> Number of bytes to allocate in the pool. Must be >0.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_size, ks_status_t *error_p)
KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *pool, const ks_size_t size, ks_status_t *error_p)
{
void *addr;
ks_status_t ret = KS_STATUS_SUCCESS;
void *addr = NULL;
ks_assert(mp_p);
//if (1) return calloc(1, byte_size);
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_PNT);
return NULL;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_POOL_OVER);
return NULL;
}
ks_assert(pool);
ks_assert(size);
if (byte_size == 0) {
SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
return NULL;
}
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
ks_mutex_lock(mp_p->mutex);
addr = alloc_mem(mp_p, byte_size, error_p);
ks_mutex_unlock(mp_p->mutex);
ks_mutex_lock(pool->mutex);
addr = alloc_mem(pool, size, &ret);
ks_mutex_unlock(pool->mutex);
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_ALLOC, byte_size, 0, addr, NULL, 0);
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_ALLOC, size, 0, addr, NULL, 0);
}
ks_assert(addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
memset(addr, 0, byte_size);
}
ks_assert(addr);
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
......@@ -1517,15 +646,15 @@ KS_DECLARE(void *) ks_pool_alloc_ex(ks_pool_t *mp_p, const unsigned long byte_si
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
*
* byte_size -> Number of bytes to allocate in the pool. Must be >0.
* size -> Number of bytes to allocate in the pool. Must be >0.
*
*/
KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size)
KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *pool, const ks_size_t size)
{
return ks_pool_alloc_ex(mp_p, byte_size, NULL);
return ks_pool_alloc_ex(pool, size, NULL);
}
......@@ -1545,7 +674,7 @@ KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* pool -> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
*
* ele_n -> Number of elements to allocate.
......@@ -1555,52 +684,32 @@ KS_DECLARE(void *) ks_pool_alloc(ks_pool_t *mp_p, const unsigned long byte_size)
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size, ks_status_t *error_p)
KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size, ks_status_t *error_p)
{
void *addr;
unsigned long byte_size;
ks_assert(mp_p);
ks_status_t ret = KS_STATUS_SUCCESS;
void *addr = NULL;
ks_size_t size;
//if (1) return calloc(ele_n, ele_size);
ks_assert(pool);
ks_assert(ele_n);
ks_assert(ele_size);
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_PNT);
return NULL;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_POOL_OVER);
return NULL;
}
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
if (ele_n == 0 || ele_size == 0) {
SET_POINTER(error_p, KS_STATUS_ARG_INVALID);
return NULL;
}
size = ele_n * ele_size;
ks_mutex_lock(mp_p->mutex);
byte_size = ele_n * ele_size;
addr = alloc_mem(mp_p, byte_size, error_p);
if (addr != NULL) {
memset(addr, 0, byte_size);
}
ks_mutex_unlock(mp_p->mutex);
ks_mutex_lock(pool->mutex);
addr = alloc_mem(pool, size, &ret);
ks_mutex_unlock(pool->mutex);
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_CALLOC, ele_size, ele_n, addr, NULL, 0);
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_CALLOC, ele_size, ele_n, addr, NULL, 0);
}
ks_assert(addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
ks_assert(addr);
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
memset(addr, 0, ele_n * ele_size);
}
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
......@@ -1621,7 +730,7 @@ KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n,
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool. If NULL then it will do a
* pool -> Pointer to the memory pool. If NULL then it will do a
* normal calloc.
*
* ele_n -> Number of elements to allocate.
......@@ -1629,9 +738,9 @@ KS_DECLARE(void *) ks_pool_calloc_ex(ks_pool_t *mp_p, const unsigned long ele_n,
* ele_size -> Number of bytes per element being allocated.
*
*/
KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, const unsigned long ele_size)
KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *pool, const ks_size_t ele_n, const ks_size_t ele_size)
{
return ks_pool_calloc_ex(mp_p, ele_n, ele_size, NULL);
return ks_pool_calloc_ex(pool, ele_n, ele_size, NULL);
}
/*
......@@ -1649,69 +758,42 @@ KS_DECLARE(void *) ks_pool_calloc(ks_pool_t *mp_p, const unsigned long ele_n, co
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* pool -> Pointer to the memory pool.
*
* addr <-> pointer to pointer of Address to free.
* addr <-> Pointer to pointer of Address to free.
*
*/
KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP)
KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *pool, void **addrP)
{
ks_status_t r;
ks_status_t ret = KS_STATUS_SUCCESS;
void *addr;
ks_assert(pool);
ks_assert(addrP);
ks_assert(*addrP);
addr = *addrP;
ks_assert(mp_p);
ks_assert(addr);
//if (1) {
// *addrP = NULL;
// free(addr);
// return KS_STATUS_SUCCESS;
//}
ks_mutex_lock(mp_p->mutex);
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
r = KS_STATUS_PNT;
goto end;
}
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
r = KS_STATUS_POOL_OVER;
goto end;
}
ks_mutex_lock(pool->mutex);
if (mp_p->mp_log_func != NULL) {
alloc_prefix_t *prefix = (alloc_prefix_t *)((char *)addr - PREFIX_SIZE);
if (prefix->refs == 1) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_FREE, prefix->size, prefix->refs - 1, NULL, addr, 0);
} else {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_DECREF, prefix->size, prefix->refs - 1, NULL, addr, 0);
}
if (pool->log_func != NULL) {
ks_pool_prefix_t *prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
// @todo check_prefix()?
pool->log_func(pool, prefix->refs == 1 ? KS_POOL_FUNC_FREE : KS_POOL_FUNC_DECREF, prefix->size, prefix->refs - 1, addr, NULL, 0);
}
r = free_mem(mp_p, addr);
end:
ks_mutex_unlock(mp_p->mutex);
ret = free_mem(pool, addr);
ks_mutex_unlock(pool->mutex);
if (r == KS_STATUS_SUCCESS) {
done:
if (ret != KS_STATUS_REFS_EXIST) {
ks_assert(ret == KS_STATUS_SUCCESS);
*addrP = NULL;
}
return r;
return ret;
}
/*
......@@ -1729,51 +811,37 @@ KS_DECLARE(ks_status_t) ks_pool_free_ex(ks_pool_t *mp_p, void **addrP)
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
* addr -> The addr to ref
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *error_p)
KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *pool, void *addr, ks_status_t *error_p)
{
alloc_prefix_t *prefix;
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_PNT);
return NULL;
}
ks_status_t ret = KS_STATUS_SUCCESS;
ks_pool_prefix_t *prefix;
ks_size_t refs;
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_POOL_OVER);
return NULL;
}
ks_assert(pool);
ks_assert(addr);
ks_mutex_lock(mp_p->mutex);
prefix = (alloc_prefix_t *) ((char *) addr - PREFIX_SIZE);
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_INVALID_POINTER);
return NULL;
}
prefix = (ks_pool_prefix_t *)((uintptr_t)addr - KS_POOL_PREFIX_SIZE);
if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) goto done;
prefix->refs++;
ks_mutex_lock(pool->mutex);
refs = ++prefix->refs;
ks_mutex_unlock(pool->mutex);
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_INCREF, prefix->size, prefix->refs, NULL, addr, 0);
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_INCREF, prefix->size, refs, addr, NULL, 0);
}
ks_mutex_unlock(mp_p->mutex);
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return addr;
}
......@@ -1793,179 +861,79 @@ KS_DECLARE(void *) ks_pool_ref_ex(ks_pool_t *mp_p, void *addr, ks_status_t *erro
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
*
* old_addr -> Previously allocated address.
*
* new_byte_size -> New size of the allocation.
* new_size -> New size of the allocation.
*
* error_p <- Pointer to integer which, if not NULL, will be set with
* a ks_pool error code.
*/
KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size, ks_status_t *error_p)
KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *pool, void *old_addr, const ks_size_t new_size, ks_status_t *error_p)
{
unsigned long copy_size, new_size, old_byte_size;
void *new_addr;
ks_pool_block_t *block_p;
int ret;
alloc_prefix_t *prefix;
void *orig_old_addr = NULL;
ks_assert(mp_p);
//ks_assert(old_addr);
ks_status_t ret = KS_STATUS_SUCCESS;
ks_size_t old_size;
ks_pool_prefix_t *prefix;
void *new_addr = NULL;
ks_size_t required;
//if (1) return realloc(old_addr, new_byte_size);
if (!old_addr) {
return ks_pool_alloc_ex(mp_p, new_byte_size, error_p);
}
ks_assert(pool);
ks_assert(new_size);
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_PNT);
return NULL;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_POOL_OVER);
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
prefix = (alloc_prefix_t *) ((char *) old_addr - PREFIX_SIZE);
if (!(prefix->m1 == PRE_MAGIC1 && prefix->m2 == PRE_MAGIC2)) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_INVALID_POINTER);
return NULL;
if (!old_addr) {
return ks_pool_alloc_ex(pool, new_size, error_p);
}
ks_mutex_lock(mp_p->mutex);
if (prefix->refs > 1) {
SET_POINTER(error_p,KS_STATUS_NOT_ALLOWED);
prefix = (ks_pool_prefix_t *)((uintptr_t)old_addr - KS_POOL_PREFIX_SIZE);
if ((ret = check_prefix(prefix)) != KS_STATUS_SUCCESS) {
SET_POINTER(error_p, ret);
return NULL;
}
old_byte_size = prefix->size - PREFIX_SIZE - FENCE_SIZE - prefix->padding;
if (old_byte_size == new_byte_size) {
SET_POINTER(error_p, KS_STATUS_SUCCESS);
new_addr = old_addr;
goto end;
}
ks_mutex_lock(pool->mutex);
if (old_byte_size >= new_byte_size) {
unsigned long diff = old_byte_size - new_byte_size;
//prefix->size -= diff;
prefix->padding += diff;
write_magic((char *)prefix + prefix->size - prefix->padding - FENCE_SIZE);
SET_POINTER(error_p, KS_STATUS_SUCCESS);
new_addr = old_addr;
goto end;
if (prefix->refs > 1) {
ret = KS_STATUS_NOT_ALLOWED;
goto done;
}
{
unsigned long diff = new_byte_size - old_byte_size;
if (prefix->padding >= diff) {
prefix->padding -= diff;
write_magic((char *)prefix + prefix->size - prefix->padding - FENCE_SIZE);
SET_POINTER(error_p, KS_STATUS_SUCCESS);
if (new_size == prefix->size) {
new_addr = old_addr;
goto end;
}
}
/*
* If the size is larger than a block then the allocation must be at
* the front of the block.
*/
if (old_byte_size > MAX_BLOCK_USER_MEMORY(mp_p)) {
block_p = (ks_pool_block_t *) ((char *) old_addr - PREFIX_SIZE - sizeof(ks_pool_block_t));
if (block_p->mb_magic != BLOCK_MAGIC || block_p->mb_magic2 != BLOCK_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, KS_STATUS_POOL_OVER);
new_addr = NULL;
goto end;
goto done;
}
}
if (old_byte_size > 0) {
ret = check_magic(prefix, prefix->size - FENCE_SIZE - prefix->padding);
if (ret != KS_STATUS_SUCCESS) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
SET_POINTER(error_p, ret);
new_addr = NULL;
goto end;
}
}
orig_old_addr = old_addr;
/* move pointer to actual beginning */
old_addr = prefix;
/* make sure we have enough bytes */
if (new_byte_size < MIN_ALLOCATION) {
new_size = MIN_ALLOCATION;
} else {
new_size = new_byte_size;
}
old_size = prefix->size;
/*
* NOTE: we could here see if the size is the same or less and then
* use the current memory and free the space above. This is harder
* than it sounds if we are changing the block size of the
* allocation.
*/
required = KS_POOL_PREFIX_SIZE + new_size + KS_POOL_FENCE_SIZE;
new_addr = realloc((void *)prefix, required);
ks_assert(new_addr);
/* we need to get another address */
new_addr = alloc_mem(mp_p, new_size, error_p);
if (new_addr == NULL) {
/* error_p set in ks_pool_alloc */
new_addr = NULL;
goto end;
}
prefix = (ks_pool_prefix_t *)new_addr;
copy_size = old_byte_size;
memcpy(new_addr, orig_old_addr, copy_size);
prefix->size = new_size;
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ZERO)) {
memset(((unsigned char *)new_addr) + copy_size, 0, new_byte_size - old_byte_size);
}
new_addr = (void *)((uintptr_t)new_addr + KS_POOL_PREFIX_SIZE);
write_fence((void *)((uintptr_t)new_addr + new_size));
/* free the old address */
ret = free_mem(mp_p, (uint8_t *)old_addr + PREFIX_SIZE);
if (ret != KS_STATUS_SUCCESS) {
/* if the old free failed, try and free the new address */
(void) free_mem(mp_p, new_addr);
SET_POINTER(error_p, ret);
new_addr = NULL;
goto end;
}
if (prefix->prev) prefix->prev->next = prefix;
else pool->first = prefix;
if (prefix->next) prefix->next->prev = prefix;
else pool->last = prefix;
if (mp_p->mp_log_func != NULL) {
mp_p->mp_log_func(mp_p, KS_POOL_FUNC_RESIZE, new_byte_size, 0, new_addr, old_addr, old_byte_size);
if (pool->log_func != NULL) {
pool->log_func(pool, KS_POOL_FUNC_RESIZE, new_size, 0, old_addr, new_addr, old_size);
}
SET_POINTER(error_p, KS_STATUS_SUCCESS);
end:
ks_mutex_unlock(mp_p->mutex);
done:
ks_mutex_unlock(pool->mutex);
ks_assert(new_addr || (mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT));
ks_assert(ret == KS_STATUS_SUCCESS);
return new_addr;
}
......@@ -1986,17 +954,17 @@ KS_DECLARE(void *) ks_pool_resize_ex(ks_pool_t *mp_p, void *old_addr, const unsi
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
*
* old_addr -> Previously allocated address.
*
* new_byte_size -> New size of the allocation.
* new_size -> New size of the allocation.
*
*/
KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigned long new_byte_size)
KS_DECLARE(void *) ks_pool_resize(ks_pool_t *pool, void *old_addr, const ks_size_t new_size)
{
return ks_pool_resize_ex(mp_p, old_addr, new_byte_size, NULL);
return ks_pool_resize_ex(pool, old_addr, new_size, NULL);
}
/*
......@@ -2014,10 +982,7 @@ KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigne
*
* ARGUMENTS:
*
* mp_p -> Pointer to the memory pool.
*
* page_size_p <- Pointer to an unsigned integer which, if not NULL,
* will be set to the page-size of the pool.
* pool -> Pointer to the memory pool.
*
* num_alloced_p <- Pointer to an unsigned long which, if not NULL,
* will be set to the number of pointers currently allocated in pool.
......@@ -2033,32 +998,22 @@ KS_DECLARE(void *) ks_pool_resize(ks_pool_t *mp_p, void *old_addr, const unsigne
* will be set to the total amount of space (including administrative
* overhead) used by the pool.
*/
KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_size_p,
unsigned long *num_alloced_p, unsigned long *user_alloced_p, unsigned long *max_alloced_p, unsigned long *tot_alloced_p)
KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *pool, ks_size_t *num_alloced_p, ks_size_t *user_alloced_p, ks_size_t *max_alloced_p, ks_size_t *tot_alloced_p)
{
if (mp_p == NULL) {
return KS_STATUS_ARG_NULL;
}
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_PNT;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
ks_status_t ret = KS_STATUS_SUCCESS;
SET_POINTER(page_size_p, mp_p->mp_page_size);
SET_POINTER(num_alloced_p, mp_p->mp_alloc_c);
SET_POINTER(user_alloced_p, mp_p->mp_user_alloc);
SET_POINTER(max_alloced_p, mp_p->mp_max_alloc);
SET_POINTER(tot_alloced_p, SIZE_OF_PAGES(mp_p, mp_p->mp_page_c));
ks_assert(pool);
return KS_STATUS_SUCCESS;
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
SET_POINTER(num_alloced_p, pool->alloc_c);
SET_POINTER(user_alloced_p, pool->user_alloc);
SET_POINTER(max_alloced_p, pool->max_alloc);
SET_POINTER(tot_alloced_p, pool->user_alloc + (pool->alloc_c * (KS_POOL_PREFIX_SIZE + KS_POOL_FENCE_SIZE)));
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return ret;
}
/*
......@@ -2077,88 +1032,25 @@ KS_DECLARE(ks_status_t) ks_pool_stats(const ks_pool_t *mp_p, unsigned int *page_
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
* pool -> Pointer to the memory pool.
*
* log_func -> Log function (defined in ks_pool.h) which will be called
* with each ks_pool transaction.
*/
KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *mp_p, ks_pool_log_func_t log_func)
KS_DECLARE(ks_status_t) ks_pool_set_log_func(ks_pool_t *pool, ks_pool_log_func_t log_func)
{
if (mp_p == NULL) {
return KS_STATUS_ARG_NULL;
}
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_PNT;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
mp_p->mp_log_func = log_func;
ks_status_t ret = KS_STATUS_SUCCESS;
return KS_STATUS_SUCCESS;
}
ks_assert(pool);
ks_assert(log_func);
/*
* int ks_pool_set_max_pages
*
* DESCRIPTION:
*
* Set the maximum number of pages that the library will use. Once it
* hits the limit it will return KS_STATUS_NO_PAGES.
*
* NOTE: if the KS_POOL_FLAG_HEAVY_PACKING is set then this max-pages
* value will include the page with the ks_pool header structure in it.
* If the flag is _not_ set then the max-pages will not include this
* first page.
*
* RETURNS:
*
* Success - KS_STATUS_SUCCESS
*
* Failure - ks_status_t error code
*
* ARGUMENTS:
*
* mp_p <-> Pointer to the memory pool.
*
* max_pages -> Maximum number of pages used by the library.
*/
KS_DECLARE(ks_status_t) ks_pool_set_max_pages(ks_pool_t *mp_p, const unsigned int max_pages)
{
if (mp_p == NULL) {
return KS_STATUS_ARG_NULL;
}
if (mp_p->mp_magic != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_PNT;
}
if (mp_p->mp_magic2 != KS_POOL_MAGIC) {
if (!(mp_p->mp_flags & KS_POOL_FLAG_NO_ASSERT)) {
abort();
}
return KS_STATUS_POOL_OVER;
}
if ((ret = check_pool(pool)) != KS_STATUS_SUCCESS) goto done;
if (BIT_IS_SET(mp_p->mp_flags, KS_POOL_FLAG_HEAVY_PACKING)) {
mp_p->mp_max_pages = max_pages;
} else {
/*
* If we are not heavy-packing the pool then we don't count the
* 1st page allocated which holds the ks_pool header structure.
*/
mp_p->mp_max_pages = max_pages + 1;
}
pool->log_func = log_func;
return KS_STATUS_SUCCESS;
done:
ks_assert(ret == KS_STATUS_SUCCESS);
return ret;
}
/*
......@@ -2251,20 +1143,20 @@ KS_DECLARE(const char *) ks_pool_strerror(const ks_status_t error)
KS_DECLARE(char *) ks_pstrdup(ks_pool_t *pool, const char *str)
{
char *result;
unsigned long len;
ks_size_t len;
if (!str) {
return NULL;
}
len = (unsigned long)strlen(str) + 1;
len = (ks_size_t)strlen(str) + 1;
result = ks_pool_alloc(pool, len);
memcpy(result, str, len);
return result;
}
KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len)
KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, ks_size_t len)
{
char *result;
const char *end;
......@@ -2279,14 +1171,14 @@ KS_DECLARE(char *) ks_pstrndup(ks_pool_t *pool, const char *str, size_t len)
len = end - str;
}
result = ks_pool_alloc(pool, (unsigned long)(len + 1));
result = ks_pool_alloc(pool, len + 1);
memcpy(result, str, len);
result[len] = '\0';
return result;
}
KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len)
KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, ks_size_t len)
{
char *result;
......@@ -2294,14 +1186,14 @@ KS_DECLARE(char *) ks_pstrmemdup(ks_pool_t *pool, const char *str, size_t len)
return NULL;
}
result = ks_pool_alloc(pool, (unsigned long)(len + 1));
result = ks_pool_alloc(pool, len + 1);
memcpy(result, str, len);
result[len] = '\0';
return result;
}
KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len)
KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, ks_size_t len)
{
void *result;
......@@ -2309,7 +1201,7 @@ KS_DECLARE(void *) ks_pmemdup(ks_pool_t *pool, const void *buf, size_t len)
return NULL;
}
result = ks_pool_alloc(pool, (unsigned long)len);
result = ks_pool_alloc(pool, len);
memcpy(result, buf, len);
return result;
......@@ -2319,9 +1211,9 @@ KS_DECLARE(char *) ks_pstrcat(ks_pool_t *pool, ...)
{
char *endp, *argp;
char *result;
size_t lengths[10];
ks_size_t lengths[10];
int i = 0;
size_t len = 0;
ks_size_t len = 0;
va_list ap;
va_start(ap, pool);
......@@ -2329,14 +1221,14 @@ KS_DECLARE(char *) ks_pstrcat(ks_pool_t *pool, ...)
/* get lengths so we know what to allocate, cache some so we don't have to double strlen those */
while ((argp = va_arg(ap, char *))) {
size_t arglen = strlen(argp);
ks_size_t arglen = strlen(argp);
if (i < 10) lengths[i++] = arglen;
len += arglen;
}
va_end(ap);
result = (char *) ks_pool_alloc(pool, (unsigned long)(len + 1));
result = (char *) ks_pool_alloc(pool, len + 1);
endp = result;
va_start(ap, pool);
......
......@@ -56,12 +56,12 @@ struct ks_q_s {
uint8_t active;
};
static void ks_q_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_q_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_q_t *q = (ks_q_t *) ptr;
ks_qnode_t *np, *fp;
if (ctype == KS_MPCL_GLOBAL_FREE) {
if (type == KS_MPCL_GLOBAL_FREE) {
return;
}
......@@ -202,7 +202,7 @@ KS_DECLARE(ks_status_t) ks_q_create(ks_q_t **qP, ks_pool_t *pool, ks_size_t maxl
q->maxlen = maxlen;
q->active = 1;
ks_pool_set_cleanup(pool, q, NULL, 0, ks_q_cleanup);
ks_pool_set_cleanup(pool, q, NULL, ks_q_cleanup);
*qP = q;
......
......@@ -79,7 +79,7 @@ void ks_thread_override_default_stacksize(size_t size)
thread_default_stacksize = size;
}
static void ks_thread_cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
static void ks_thread_cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
ks_thread_t *thread = (ks_thread_t *) ptr;
......@@ -309,7 +309,7 @@ KS_DECLARE(ks_status_t) ks_thread_create_ex(ks_thread_t **rthread, ks_thread_fun
}
*rthread = thread;
ks_pool_set_cleanup(pool, thread, NULL, 0, ks_thread_cleanup);
ks_pool_set_cleanup(pool, thread, NULL, ks_thread_cleanup);
}
return status;
......
......@@ -22,7 +22,7 @@ int test1(void)
ks_hash_iterator_t *itt;
ks_hash_write_lock(hash);
for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) {
for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; ) {
const void *key;
void *val;
......@@ -31,6 +31,7 @@ int test1(void)
printf("%s=%s\n", (char *)key, (char *)val);
sum2 += atoi(val);
itt = ks_hash_next(&itt);
ks_hash_remove(hash, (char *)key);
}
ks_hash_write_unlock(hash);
......@@ -95,13 +96,15 @@ int test2(void)
ks_sleep(x * 1000000);
ks_hash_write_lock(hash);
for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; itt = ks_hash_next(&itt)) {
for (itt = ks_hash_first(hash, KS_UNLOCKED); itt; ) {
const void *key;
void *val;
ks_hash_this(itt, &key, NULL, &val);
printf("DEL %s=%s\n", (char *)key, (char *)val);
itt = ks_hash_next(&itt);
ks_hash_remove(hash, (char *)key);
}
ks_hash_write_unlock(hash);
......
......@@ -18,7 +18,7 @@ struct foo {
};
void cleanup(ks_pool_t *mpool, void *ptr, void *arg, int type, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t ctype)
void cleanup(ks_pool_t *mpool, void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
{
struct foo *foo = (struct foo *) ptr;
......@@ -158,7 +158,7 @@ int main(int argc, char **argv)
foo->x = 12;
foo->str = strdup("This is a test 1234 abcd; This will be called on explicit free\n");
ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup);
ks_pool_set_cleanup(pool, foo, NULL, cleanup);
printf("FREE OBJ:\n");
......@@ -184,7 +184,7 @@ int main(int argc, char **argv)
foo->x = 12;
foo->str = strdup("This is a second test 1234 abcd; This will be called on pool clear/destroy\n");
ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup);
ks_pool_set_cleanup(pool, foo, NULL, cleanup);
printf("ALLOC OBJ3: %p\n", (void *)pool);
......@@ -202,7 +202,7 @@ int main(int argc, char **argv)
printf("CLEANUP: %p\n", (void *)pool);
foo->x = 12;
foo->str = strdup("This is a third test 1234 abcd; This will be called on pool clear/destroy\n");
ks_pool_set_cleanup(pool, foo, NULL, 0, cleanup);
ks_pool_set_cleanup(pool, foo, NULL, cleanup);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论