[PATCH 4/5] Don't re-calculate the slab class id for slabs_free()
either.
dormando
dormando at rydia.net
Sun Mar 2 22:37:02 UTC 2008
This + previous patch slightly reduce user CPU time, especially during heavy evictions.
---
server/items.c | 4 +++-
server/memcached.h | 6 +++---
server/slabs.c | 3 +--
server/slabs.h | 2 +-
server/thread.c | 4 ++--
5 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/server/items.c b/server/items.c
index fa6d215..8741df3 100644
--- a/server/items.c
+++ b/server/items.c
@@ -144,16 +144,18 @@ item *do_item_alloc(char *key, const size_t nkey, const int flags, const rel_tim
void item_free(item *it) {
size_t ntotal = ITEM_ntotal(it);
+ unsigned int clsid;
assert((it->it_flags & ITEM_LINKED) == 0);
assert(it != heads[it->slabs_clsid]);
assert(it != tails[it->slabs_clsid]);
assert(it->refcount == 0);
/* so slab size changer can tell later if item is already free or not */
+ clsid = it->slabs_clsid;
it->slabs_clsid = 0;
it->it_flags |= ITEM_SLABBED;
DEBUG_REFCNT(it, 'F');
- slabs_free(it, ntotal);
+ slabs_free(it, ntotal, clsid);
}
/**
diff --git a/server/memcached.h b/server/memcached.h
index 6c70276..ffbe880 100644
--- a/server/memcached.h
+++ b/server/memcached.h
@@ -292,7 +292,7 @@ void mt_item_unlink(item *it);
void mt_item_update(item *it);
void mt_run_deferred_deletes(void);
void *mt_slabs_alloc(size_t size, unsigned int id);
-void mt_slabs_free(void *ptr, size_t size);
+void mt_slabs_free(void *ptr, size_t size, unsigned int id);
int mt_slabs_reassign(unsigned char srcid, unsigned char dstid);
char *mt_slabs_stats(int *buflen);
void mt_stats_lock(void);
@@ -321,7 +321,7 @@ int mt_store_item(item *item, int comm);
# define item_unlink(x) mt_item_unlink(x)
# define run_deferred_deletes() mt_run_deferred_deletes()
# define slabs_alloc(x,y) mt_slabs_alloc(x,y)
-# define slabs_free(x,y) mt_slabs_free(x,y)
+# define slabs_free(x,y,z) mt_slabs_free(x,y,z)
# define slabs_reassign(x,y) mt_slabs_reassign(x,y)
# define slabs_stats(x) mt_slabs_stats(x)
# define store_item(x,y) mt_store_item(x,y)
@@ -354,7 +354,7 @@ int mt_store_item(item *item, int comm);
# define item_update(x) do_item_update(x)
# define run_deferred_deletes() do_run_deferred_deletes()
# define slabs_alloc(x,y) do_slabs_alloc(x,y)
-# define slabs_free(x,y) do_slabs_free(x,y)
+# define slabs_free(x,y,z) do_slabs_free(x,y,z)
# define slabs_reassign(x,y) do_slabs_reassign(x,y)
# define slabs_stats(x) do_slabs_stats(x)
# define store_item(x,y) do_store_item(x,y)
diff --git a/server/slabs.c b/server/slabs.c
index b61ef84..dbd9740 100644
--- a/server/slabs.c
+++ b/server/slabs.c
@@ -257,8 +257,7 @@ void *do_slabs_alloc(const size_t size, unsigned int id) {
return NULL; /* shouldn't ever get here */
}
-void do_slabs_free(void *ptr, const size_t size) {
- unsigned char id = slabs_clsid(size);
+void do_slabs_free(void *ptr, const size_t size, unsigned int id) {
slabclass_t *p;
assert(((item *)ptr)->slabs_clsid == 0);
diff --git a/server/slabs.h b/server/slabs.h
index 2d62234..3dbccd3 100644
--- a/server/slabs.h
+++ b/server/slabs.h
@@ -20,7 +20,7 @@ unsigned int slabs_clsid(const size_t size);
void *do_slabs_alloc(const size_t size, unsigned int id);
/** Free previously allocated object */
-void do_slabs_free(void *ptr, size_t size);
+void do_slabs_free(void *ptr, size_t size, unsigned int id);
/** Fill buffer with stats */ /*@null@*/
char* do_slabs_stats(int *buflen);
diff --git a/server/thread.c b/server/thread.c
index 90fb266..e0303b0 100644
--- a/server/thread.c
+++ b/server/thread.c
@@ -580,9 +580,9 @@ void *mt_slabs_alloc(size_t size, unsigned int id) {
return ret;
}
-void mt_slabs_free(void *ptr, size_t size) {
+void mt_slabs_free(void *ptr, size_t size, unsigned int id) {
pthread_mutex_lock(&slabs_lock);
- do_slabs_free(ptr, size);
+ do_slabs_free(ptr, size, id);
pthread_mutex_unlock(&slabs_lock);
}
--
1.5.4.2
More information about the memcached
mailing list