OpenVZ Forum


Home » Mailing lists » Devel » What does glommer think about kmem cgroup ?
What does glommer think about kmem cgroup ? [message #43767] Thu, 13 October 2011 15:50
Glauber Costa is currently offline  Glauber Costa
Messages: 916
Registered: October 2011
Senior Member
Hi guys,

So, linuxcon is approaching. To help making our discussions more
productive, I sketched a basic prototype of a kmem cgroup that can
control the size of the dentry cache. I am sending the code here so you
guys can have an idea, but keep in mind this is a *sketch*. This is my
view of how our controller *could be*, not necessarily what it *should
be*. All your input is more than welcome.

Let me first explain a bit of my approach: (there are some comments
inline as well)

* So far it only works with the slab (you will see that something
similar can be done for at least the slub) Since most of us is
concerned mostly with memory abuse (I think), I neglected for simplicity
the initial memory allocated for the arrays. Only when
cache_grow is called to allocated more pages, is that we bill then.

* I avoid resorting to the shrinkers, trying to free the slab pages
themselves whenever possible.

* We don't limit the size of all caches. They have to register
themselves explicitly (and in this PoC, I am using the dentry cache as
an example)

* The object is billed to whoever touched it first. Other policies are
of course possible.

What I am *not* concerned about in this PoC: (left for future work, if
needed)
- unified user/memory kernel memory reclaim
- changes to the shrinkers.
- changes to the limit once it is already in place
- per-cgroup display in /proc/slabinfo
- task movement
- a whole lot of other stuff.

* Hey glommer, do you have numbers?
Yes, I have 8 numbers. And since 8 is also a number, then I have 9 numbers.

So what I did was to type "find /" in a freshly booted system (my
laptop). I just ran each iteration once, so nothing scientific. I halved
the limits until the allocations started to fail, which was
more or less around 256K hard limit. Find is also not a workload that
pins the dentries in memory for very long. Other kinds of workloads
will display different results here...

Base: (non-patched kernel)
real 0m16.091s
user 0m0.567s
sys 0m6.649s

Patched kernel, root cgroup (unlimited. max used mem: 22Mb)
real 0m15.853s
user 0m0.511s
sys 0m6.417s

16Mb/4Mb (HardLimit/SoftLimit)
real 0m16.596s
user 0m0.560s
sys 0m6.947s

8Mb/4Mb
real 0m16.975s
user 0m0.568s
sys 0m7.047s


4Mb/2Mb
real 0m16.713s
user 0m0.554s
sys 0m7.022s

2Mb/1Mb
real 0m17.001s
user 0m0.544s
sys 0m7.118s

1Mb/512K
real 0m16.671s
user 0m0.530s
sys 0m7.067s

512k/256k
real 0m17.395s
user 0m0.567s
sys 0m7.179s

So, what those initial numbers do tell us, is that the performance
penalty for the root cgroup is not expected to be that bad. When the
limits start to be hit, a penalty is incurred, which is under the
expectations.

diff --git a/fs/dcache.c b/fs/dcache.c
index a88948b..cd5d091 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -142,7 +142,7 @@ static void __d_free(struct rcu_head *head)
WARN_ON(!list_empty(&dentry->d_alias));
if (dname_external(dentry))
kfree(dentry->d_name.name);
- kmem_cache_free(dentry_cache, dentry);
+ kmem_cache_free(dentry->dentry_cache, dentry);
}

/*
@@ -1160,6 +1160,8 @@ void shrink_dcache_parent(struct dentry * parent)
}
EXPORT_SYMBOL(shrink_dcache_parent);

+static struct memcg_slab_ctlr dcache_ctlr;
+
/**
* __d_alloc - allocate a dcache entry
* @sb: filesystem it will belong to
@@ -1173,9 +1175,14 @@ EXPORT_SYMBOL(shrink_dcache_parent);
struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
{
struct dentry *dentry;
+ struct kmem_cache *current_dcache;
char *dname;

- dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
+ current_dcache = current_kmem_cache(dcache_ctlr.token);
+ if (unlikely(!current_dcache))
+ current_dcache = dentry_cache;
+
+ dentry = kmem_cache_alloc(current_dcache, GFP_KERNEL);
if (!dentry)
return NULL;

@@ -1210,6 +1217,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
INIT_LIST_HEAD(&dentry->d_alias);
INIT_LIST_HEAD(&dentry->d_u.d_child);
d_set_d_op(dentry, dentry->d_sb->s_d_op);
+ dentry->dentry_cache = current_dcache;

this_cpu_inc(nr_dentry);

@@ -2945,6 +2953,35 @@ static void __init dcache_init_early(void)
INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}

+static struct kmem_cache *dcache_new_kmem(struct mem_cgroup *memcg,
+ struct mem_cgroup *parent)
+{
+ char *name;
+
+ if (!parent)
+ return dentry_cache;
+
+ name = kasprintf(GFP_KERNEL, "memcg-dentry-%p", memcg);
+
+ return kmem_cache_create(name, sizeof(struct dentry),
+ __alignof__(struct dentry),
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD, NULL);
+}
+
+static void dcache_destroy_kmem(struct kmem_cache *cachep)
+{
+ if (cachep == dentry_cache)
+ return;
+
+ kfree(cachep->name);
+ kmem_cache_destroy(cachep);
+}
+
+static struct memcg_slab_ctlr dcache_ctlr = {
+ .memcg_create_kmem_cache = dcache_new_kmem,
+ .memcg_destroy_kmem_cache = dcache_destroy_kmem,
+};
+
static void __init dcache_init(void)
{
int loop;
@@ -2957,6 +2994,8 @@ static void __init dcache_init(void)
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);

+ register_memcg_slab_ctlr(&dcache_ctlr);
+
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
return;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 62157c0..a002292 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -142,6 +142,7 @@ struct dentry {
} d_u;
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
+ struct kmem_cache *dentry_cache;
};

/*
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 343bd76..1b7ef0c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -158,6 +158,9 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
bool mem_cgroup_bad_page_check(struct page *page);
void mem_cgroup_print_bad_page(struct page *page);
#endif
+
+struct kmem_cache *current_kmem_cache(int token);
+
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup;

@@ -376,5 +379,33 @@ mem_cgroup_print_bad_page(struct page *page)
}
#endif

+struct memcg_slab_ctlr {
+ struct list_head list;
+ struct kmem_cache *cachep;
+ struct kmem_cache *(*memcg_create_kmem_cache)(struct mem_cgroup *memcg,
+ struct mem_cgroup *parent);
+ void (*memcg_destroy_kmem_cache)(struct kmem_cache *cachep);
+ int token;
+};
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_kmem_charge(struct kmem_cache *cachep, gfp_t flags, int nr_pages);
+void mem_cgroup_kmem_uncharge(struct kmem_cache *cachep, int nr_pages);
+void register_memcg_slab_ctlr(struct memcg_slab_ctlr *ctlr);
+
+#else
+static inline int mem_cgroup_kmem_charge(struct kmem_cache *cachep, gfp_t flags, int nr_pages)
+{
+ return 0;
+}
+static inline void mem_cgroup_kmem_uncharge(struct kmem_cache *cachep, int nr_pages)
+{
+}
+
+static inline void register_memcg_slab_ctlr(struct memcg_slab_ctlr *ctlr)
+{
+}
+#endif
+
#endif /* _LINUX_MEMCONTROL_H */

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 573c809..0bdefe2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -103,6 +103,7 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
+int kmem_cache_shrink_pages(struct kmem_cache *, int nr_pages);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d00e0ba..1ec3bc0 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -23,6 +23,8 @@
* manages a cache.
*/

+struct mem_cgroup;
+
struct kmem_cache {
/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
@@ -55,6 +57,8 @@ struct kmem_cache {
/* 4) cache creation/removal */
const char *name;
struct list_head next;
+ struct mem_cgroup *memcg;
+ struct list_head memcg_list;

/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
@@ -111,6 +115,15 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

+static inline void slab_associate_memcg(struct kmem_cache *kmem, struct mem_cgroup *memcg)
+{
+ kmem->memcg = memcg;
+}
+static inline struct mem_cgroup *slab_cache_memcg(struct kmem_cache *kmem)
+{
+ return kmem->memcg;
+}
+
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size,
struct kmem_cache *cachep, gfp_t flags);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 71de028..616e9f1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -294,8 +294,16 @@ struct mem_cgroup {
*/
struct mem_cgroup_stat_cpu nocpu_base;
spinlock_t pcp_counter_lock;
+
+ struct kmem_cache *kmem_caches[1024];
+
+// struct kmem_cache *dentry_cache;
};

+static DEFINE_RWLOCK(memcg_slab_lock);
+static LIST_HEAD(memcg_slab_list);
+static int memcg_token = 0;
+
/* Stuffs for move charges at task migration. */
/*
* Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
@@ -2730,6 +2738,84 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
return 0;
}

+/* this header is purely bogus */
+int mem_cgroup_kmem_charge(struct kmem_cache *cachep, gfp_t gfp_mask, int pages)
+{
+ struct mem_cgroup *memcg = slab_cache_memcg(cachep);
+ struct res_counter *dummy;
+ unsigned long long excess;
+ int ret, shrink_attempts;
+
+ if (!memcg)
+ return 0;
+
+ ret = res_counter_charge(&memcg->kmem, PAGE_SIZE * pages, &dummy);
+
+ /* FIXME: This could very well come from charge for free!! */
+ excess = res_counter_soft_limit_excess(&memcg->kmem) >> PAGE_SHIFT;
+
+ /*
+ * If we really have a total kmem cgroup, this one assumes the current
+ * allocator is the one to blame. It might not be ideal in some cases,
+
...

Previous Topic: [PATCH v6 0/8] per-cgroup tcp memory pressure handling
Next Topic: [RFC PATCH 5/5] SUNRPC: pipefs per-net operations helper introduced
Goto Forum:
  


Current Time: Tue Mar 19 05:36:05 GMT 2024

Total time taken to generate the page: 0.02388 seconds