OpenVZ Forum


Home » Mailing lists » Devel » [RFC][PATCH][0/4] Memory controller (RSS Control)
[RFC][PATCH][3/4] Add reclaim support [message #10417 is a reply to message #10413] Mon, 19 February 2007 06:50 Go to previous messageGo to previous message
Balbir Singh is currently offline  Balbir Singh
Messages: 491
Registered: August 2006
Senior Member
This patch reclaims pages from a container when the container limit is hit.
The executable is oom'ed only when the container it is running in, is overlimit
and we could not reclaim any pages belonging to the container

A parameter called pushback, controls how much memory is reclaimed when the
limit is hit. It should be easy to expose this knob to user space, but
currently it is hard coded to 20% of the total limit of the container.

isolate_lru_pages() has been modified to isolate pages belonging to a
particular container, so that reclaim code will reclaim only container
pages. For shared pages, reclaim does not unmap all mappings of the page,
it only unmaps those mappings that are over their limit. This ensures
that other containers are not penalized while reclaiming shared pages.

Parallel reclaim per container is not allowed. Each controller has a wait
queue that ensures that only one task per control is running reclaim on
that container.


Signed-off-by: <balbir@in.ibm.com>
---

include/linux/memctlr.h | 8 ++
include/linux/rmap.h | 13 +++-
include/linux/swap.h | 4 +
mm/memctlr.c | 137 ++++++++++++++++++++++++++++++++++++++++++++----
mm/migrate.c | 2
mm/rmap.c | 96 +++++++++++++++++++++++++++++++--
mm/vmscan.c | 94 ++++++++++++++++++++++++++++----
7 files changed, 324 insertions(+), 30 deletions(-)

diff -puN include/linux/memctlr.h~memctlr-reclaim-on-limit include/linux/memctlr.h
--- linux-2.6.20/include/linux/memctlr.h~memctlr-reclaim-on-limi t 2007-02-18 23:29:14.000000000 +0530
+++ linux-2.6.20-balbir/include/linux/memctlr.h 2007-02-18 23:29:14.000000000 +0530
@@ -20,6 +20,7 @@ enum {
};

#ifdef CONFIG_CONTAINER_MEMCTLR
+#include <linux/wait.h>

struct res_counter {
atomic_long_t usage; /* The current usage of the resource being */
@@ -33,6 +34,9 @@ extern void memctlr_mm_free(struct mm_st
extern void memctlr_mm_assign_container(struct mm_struct *mm,
struct task_struct *p);
extern int memctlr_update_rss(struct mm_struct *mm, int count, bool check);
+extern int memctlr_mm_overlimit(struct mm_struct *mm, void *sc_cont);
+extern wait_queue_head_t memctlr_reclaim_wq;
+extern bool memctlr_reclaim_in_progress;

#else /* CONFIG_CONTAINER_MEMCTLR */

@@ -56,5 +60,9 @@ static inline int memctlr_update_rss(str
return 0;
}

+int memctlr_mm_overlimit(struct mm_struct *mm, void *sc_cont)
+{
+ return 0;
+}
#endif /* CONFIG_CONTAINER_MEMCTLR */
#endif /* _LINUX_MEMCTLR_H */
diff -puN include/linux/rmap.h~memctlr-reclaim-on-limit include/linux/rmap.h
--- linux-2.6.20/include/linux/rmap.h~memctlr-reclaim-on-limit 2007-02-18 23:29:14.000000000 +0530
+++ linux-2.6.20-balbir/include/linux/rmap.h 2007-02-18 23:29:14.000000000 +0530
@@ -90,7 +90,15 @@ static inline void page_dup_rmap(struct
* Called from mm/vmscan.c to handle paging out
*/
int page_referenced(struct page *, int is_locked);
-int try_to_unmap(struct page *, int ignore_refs);
+int try_to_unmap(struct page *, int ignore_refs, void *container);
+#ifdef CONFIG_CONTAINER_MEMCTLR
+bool page_in_container(struct page *page, struct zone *zone, void *container);
+#else
+static inline bool page_in_container(struct page *page, struct zone *zone, void *container)
+{
+ return true;
+}
+#endif /* CONFIG_CONTAINER_MEMCTLR */

/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -118,7 +126,8 @@ int page_mkclean(struct page *);
#define anon_vma_link(vma) do {} while (0)

#define page_referenced(page,l) TestClearPageReferenced(page)
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs, container) SWAP_FAIL
+#define page_in_container(page, zone, container) true

static inline int page_mkclean(struct page *page)
{
diff -puN include/linux/swap.h~memctlr-reclaim-on-limit include/linux/swap.h
--- linux-2.6.20/include/linux/swap.h~memctlr-reclaim-on-limit 2007-02-18 23:29:14.000000000 +0530
+++ linux-2.6.20-balbir/include/linux/swap.h 2007-02-18 23:29:14.000000000 +0530
@@ -188,6 +188,10 @@ extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zone **, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
+#ifdef CONFIG_CONTAINER_MEMCTLR
+extern unsigned long memctlr_shrink_mapped_memory(unsigned long nr_pages,
+ void *container);
+#endif
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern long vm_total_pages;
diff -puN mm/memctlr.c~memctlr-reclaim-on-limit mm/memctlr.c
--- linux-2.6.20/mm/memctlr.c~memctlr-reclaim-on-limit 2007-02-18 23:29:14.000000000 +0530
+++ linux-2.6.20-balbir/mm/memctlr.c 2007-02-18 23:34:51.000000000 +0530
@@ -17,16 +17,26 @@
#include <linux/fs.h>
#include <linux/container.h>
#include <linux/memctlr.h>
+#include <linux/swap.h>

#include <asm/uaccess.h>

-#define RES_USAGE_NO_LIMIT 0
+#define RES_USAGE_NO_LIMIT 0
static const char version[] = "0.1";
+/*
+ * Explore exporting these knobs to user space
+ */
+static const int pushback = 20; /* What percentage of memory to reclaim */
+static const int nr_retries = 5; /* How many times do we try to reclaim */
+
+static atomic_t nr_reclaim;

struct memctlr {
struct container_subsys_state css;
struct res_counter counter;
spinlock_t lock;
+ wait_queue_head_t wq;
+ bool reclaim_in_progress;
};

static struct container_subsys memctlr_subsys;
@@ -42,6 +52,44 @@ static inline struct memctlr *memctlr_fr
return memctlr_from_cont(task_container(p, &memctlr_subsys));
}

+/*
+ * checks if the mm's container and scan control passed container match, if
+ * so, is the container over it's limit. Returns 1 if the container is above
+ * its limit.
+ */
+int memctlr_mm_overlimit(struct mm_struct *mm, void *sc_cont)
+{
+ struct container *cont;
+ struct memctlr *mem;
+ long usage, limit;
+ int ret = 1;
+
+ if (!sc_cont)
+ goto out;
+
+ read_lock(&mm->container_lock);
+ cont = mm->container;
+
+ /*
+ * Regular reclaim, let it proceed as usual
+ */
+ if (!sc_cont)
+ goto out;
+
+ ret = 0;
+ if (cont != sc_cont)
+ goto out;
+
+ mem = memctlr_from_cont(cont);
+ usage = atomic_long_read(&mem->counter.usage);
+ limit = atomic_long_read(&mem->counter.limit);
+ if (limit && (usage > limit))
+ ret = 1;
+out:
+ read_unlock(&mm->container_lock);
+ return ret;
+}
+
int memctlr_mm_init(struct mm_struct *mm)
{
mm->counter = kmalloc(sizeof(struct res_counter), GFP_KERNEL);
@@ -77,6 +125,46 @@ void memctlr_mm_assign_container(struct
write_unlock(&mm->container_lock);
}

+static int memctlr_check_and_reclaim(struct container *cont, long usage,
+ long limit)
+{
+ unsigned long nr_pages = 0;
+ unsigned long nr_reclaimed = 0;
+ int retries = nr_retries;
+ int ret = 1;
+ struct memctlr *mem;
+
+ mem = memctlr_from_cont(cont);
+ spin_lock(&mem->lock);
+ while ((retries-- > 0) && limit && (usage > limit)) {
+ if (mem->reclaim_in_progress) {
+ spin_unlock(&mem->lock);
+ wait_event(mem->wq, !mem->reclaim_in_progress);
+ spin_lock(&mem->lock);
+ } else {
+ if (!nr_pages)
+ nr_pages = (pushback * limit) / 100;
+ mem->reclaim_in_progress = true;
+ spin_unlock(&mem->lock);
+ nr_reclaimed += memctlr_shrink_mapped_memory(nr_pages,
+ cont);
+ spin_lock(&mem->lock);
+ mem->reclaim_in_progress = false;
+ wake_up_all(&mem->wq);
+ }
+ /*
+ * Resample usage and limit after reclaim
+ */
+ usage = atomic_long_read(&mem->counter.usage);
+ limit = atomic_long_read(&mem->counter.limit);
+ }
+ spin_unlock(&mem->lock);
+
+ if (limit && (usage > limit))
+ ret = 0;
+ return ret;
+}
+
/*
* Update the rss usage counters for the mm_struct and the container it belongs
* to. We do not fail rss for pages shared during fork (see copy_one_pte()).
@@ -99,13 +187,14 @@ int memctlr_update_rss(struct mm_struct
usage = atomic_long_read(&mem->counter.usage);
limit = atomic_long_read(&mem->counter.limit);
usage += count;
- if (check && limit && (usage > limit))
- ret = 0; /* Above limit, fail */
- else {
- atomic_long_add(count, &mem->counter.usage);
- atomic_long_add(count, &mm->counter->usage);
- }

+ if (check) {
+ ret = memctlr_check_and_reclaim(cont, usage, limit);
+ if (!ret)
+ goto done;
+ }
+ atomic_long_add(count, &mem->counter.usage);
+ atomic_long_add(count, &mm->counter->usage);
done:
return ret;
}
@@ -120,6 +209,8 @@ static int memctlr_create(struct contain
cont->subsys[memctlr_subsys.subsys_id] = &mem->css;
atomic_long_set(&mem->counter.usage, 0);
atomic_long_set(&mem->counter.limit, 0);
+ init_waitqueue_head(&mem->wq);
+ mem->reclaim_in_progress = 0;
return 0;
}

@@ -134,8 +225,8 @@ static ssize_t memctlr_write(struct cont
size_t nbytes, loff_t *ppos)
{
char *buffer;
- int ret = 0;
- unsigned long limit;
+ int ret = nbytes;
+ unsigned long cur_limit, limit, usage;
struct memctlr *mem = memctlr_from_cont(cont);

BUG_ON(!mem);
@@ -162,8 +253,16 @@ static ssize_t memctlr_write(struct cont
goto out_unlock;

atomic_long_set(&mem->counter.limit, limit);
+ usage = atomic_read(&mem->counter.usage);
+ cur_limit = atomic_read(&mem->counter.limit);
+ if (limit && (usage > limit)) {
+ ret = memctlr_check_and_reclaim(cont, usage, cur_limit);
+ if (!ret) {
+ ret = -EAGAIN; /* Try again, later */
+ goto out_unlock;
+ }
+ }

- ret = nbytes;
out_unlock:
container_manage_unlock();
out_err:
@@ -233,6 +332,17 @@ static inline void memctlr_double_unlock
}
}

+int memctlr_can_attach(struct container_subsys *ss, struct container *cont,
+ struct task_struct *p)
+{
+ /*
+ * Allow only the thread grou
...

 
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Read Message
Previous Topic: [PATCH] ecryptfs ecryptfs_read_super path_lookup errh fix
Next Topic: Re: [patch 0/1] [RFC][net namespace] veth ioctl management
Goto Forum:
  


Current Time: Sat Aug 02 10:44:47 GMT 2025

Total time taken to generate the page: 1.07952 seconds