projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm: memcontrol: rewrite charge API
[cascardo/linux.git]
/
mm
/
memory.c
diff --git
a/mm/memory.c
b/mm/memory.c
index
dc47261
..
6d76487
100644
(file)
--- a/
mm/memory.c
+++ b/
mm/memory.c
@@
-2049,6
+2049,7
@@
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *dirty_page = NULL;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
struct page *dirty_page = NULL;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
+ struct mem_cgroup *memcg;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
@@
-2204,7
+2205,7
@@
gotten:
}
__SetPageUptodate(new_page);
}
__SetPageUptodate(new_page);
- if (mem_cgroup_
charge_anon(new_page, mm, GFP_KERNEL
))
+ if (mem_cgroup_
try_charge(new_page, mm, GFP_KERNEL, &memcg
))
goto oom_free_new;
mmun_start = address & PAGE_MASK;
goto oom_free_new;
mmun_start = address & PAGE_MASK;
@@
-2234,6
+2235,8
@@
gotten:
*/
ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
*/
ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
+ mem_cgroup_commit_charge(new_page, memcg, false);
+ lru_cache_add_active_or_unevictable(new_page, vma);
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
@@
-2271,7
+2274,7
@@
gotten:
new_page = old_page;
ret |= VM_FAULT_WRITE;
} else
new_page = old_page;
ret |= VM_FAULT_WRITE;
} else
- mem_cgroup_
uncharge_page(new_page
);
+ mem_cgroup_
cancel_charge(new_page, memcg
);
if (new_page)
page_cache_release(new_page);
if (new_page)
page_cache_release(new_page);
@@
-2410,10
+2413,10
@@
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
spinlock_t *ptl;
struct page *page, *swapcache;
{
spinlock_t *ptl;
struct page *page, *swapcache;
+ struct mem_cgroup *memcg;
swp_entry_t entry;
pte_t pte;
int locked;
swp_entry_t entry;
pte_t pte;
int locked;
- struct mem_cgroup *ptr;
int exclusive = 0;
int ret = 0;
int exclusive = 0;
int ret = 0;
@@
-2489,7
+2492,7
@@
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_page;
}
goto out_page;
}
- if (mem_cgroup_try_charge
_swapin(mm, page, GFP_KERNEL, &ptr
)) {
+ if (mem_cgroup_try_charge
(page, mm, GFP_KERNEL, &memcg
)) {
ret = VM_FAULT_OOM;
goto out_page;
}
ret = VM_FAULT_OOM;
goto out_page;
}
@@
-2514,10
+2517,6
@@
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
* while the page is counted on swap but not yet in mapcount i.e.
* before page_add_anon_rmap() and swap_free(); try_to_free_swap()
* must be called after the swap_free(), or it will never succeed.
* while the page is counted on swap but not yet in mapcount i.e.
* before page_add_anon_rmap() and swap_free(); try_to_free_swap()
* must be called after the swap_free(), or it will never succeed.
- * Because delete_from_swap_page() may be called by reuse_swap_page(),
- * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
- * in page->private. In this case, a record in swap_cgroup is silently
- * discarded at swap_free().
*/
inc_mm_counter_fast(mm, MM_ANONPAGES);
*/
inc_mm_counter_fast(mm, MM_ANONPAGES);
@@
-2533,12
+2532,14
@@
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
- if (page == swapcache)
+ if (page == swapcache)
{
do_page_add_anon_rmap(page, vma, address, exclusive);
do_page_add_anon_rmap(page, vma, address, exclusive);
- else /* ksm created a completely new copy */
+ mem_cgroup_commit_charge(page, memcg, true);
+ } else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, address);
page_add_new_anon_rmap(page, vma, address);
- /* It's better to call commit-charge after rmap is established */
- mem_cgroup_commit_charge_swapin(page, ptr);
+ mem_cgroup_commit_charge(page, memcg, false);
+ lru_cache_add_active_or_unevictable(page, vma);
+ }
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@
-2571,7
+2572,7
@@
unlock:
out:
return ret;
out_nomap:
out:
return ret;
out_nomap:
- mem_cgroup_cancel_charge
_swapin(ptr
);
+ mem_cgroup_cancel_charge
(page, memcg
);
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
@@
-2627,6
+2628,7
@@
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
+ struct mem_cgroup *memcg;
struct page *page;
spinlock_t *ptl;
pte_t entry;
struct page *page;
spinlock_t *ptl;
pte_t entry;
@@
-2660,7
+2662,7
@@
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
__SetPageUptodate(page);
*/
__SetPageUptodate(page);
- if (mem_cgroup_
charge_anon(page, mm, GFP_KERNEL
))
+ if (mem_cgroup_
try_charge(page, mm, GFP_KERNEL, &memcg
))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
@@
-2673,6
+2675,8
@@
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
+ mem_cgroup_commit_charge(page, memcg, false);
+ lru_cache_add_active_or_unevictable(page, vma);
setpte:
set_pte_at(mm, address, page_table, entry);
setpte:
set_pte_at(mm, address, page_table, entry);
@@
-2682,7
+2686,7
@@
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
- mem_cgroup_
uncharge_page(page
);
+ mem_cgroup_
cancel_charge(page, memcg
);
page_cache_release(page);
goto unlock;
oom_free_page:
page_cache_release(page);
goto unlock;
oom_free_page:
@@
-2766,7
+2770,8
@@
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}
update_mmu_cache(vma, address, pte);
}
-static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
+static unsigned long fault_around_bytes __read_mostly =
+ rounddown_pow_of_two(65536);
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
@@
-2918,6
+2923,7
@@
static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page, *new_page;
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page, *new_page;
+ struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
int ret;
spinlock_t *ptl;
pte_t *pte;
int ret;
@@
-2929,7
+2935,7
@@
static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (!new_page)
return VM_FAULT_OOM;
if (!new_page)
return VM_FAULT_OOM;
- if (mem_cgroup_
charge_anon(new_page, mm, GFP_KERNEL
)) {
+ if (mem_cgroup_
try_charge(new_page, mm, GFP_KERNEL, &memcg
)) {
page_cache_release(new_page);
return VM_FAULT_OOM;
}
page_cache_release(new_page);
return VM_FAULT_OOM;
}
@@
-2949,12
+2955,14
@@
static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto uncharge_out;
}
do_set_pte(vma, address, new_page, pte, true, true);
goto uncharge_out;
}
do_set_pte(vma, address, new_page, pte, true, true);
+ mem_cgroup_commit_charge(new_page, memcg, false);
+ lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
uncharge_out:
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
return ret;
uncharge_out:
- mem_cgroup_
uncharge_page(new_page
);
+ mem_cgroup_
cancel_charge(new_page, memcg
);
page_cache_release(new_page);
return ret;
}
page_cache_release(new_page);
return ret;
}
@@
-3612,11
+3620,13
@@
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
+#ifndef CONFIG_HAVE_IOREMAP_PROT
+ break;
+#else
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
-#ifdef CONFIG_HAVE_IOREMAP_PROT
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
@@
-3624,9
+3634,9
@@
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)
-#endif
break;
bytes = ret;
break;
bytes = ret;
+#endif
} else {
bytes = len;
offset = addr & (PAGE_SIZE-1);
} else {
bytes = len;
offset = addr & (PAGE_SIZE-1);