| Index: fs/proc/task_mmu.c
|
| diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
| index 246eae84b13b1be4bf36bdf48fe50b178a9cb407..7967535f7e27791cb2f5eaad4a55b2e9d03b231b 100644
|
| --- a/fs/proc/task_mmu.c
|
| +++ b/fs/proc/task_mmu.c
|
| @@ -747,6 +747,7 @@ enum clear_refs_types {
|
| CLEAR_REFS_ANON,
|
| CLEAR_REFS_MAPPED,
|
| CLEAR_REFS_SOFT_DIRTY,
|
| + CLEAR_REFS_MM_HIWATER_RSS,
|
| CLEAR_REFS_LAST,
|
| };
|
|
|
| @@ -852,59 +853,73 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
| if (!task)
|
| return -ESRCH;
|
| mm = get_task_mm(task);
|
| - if (mm) {
|
| - struct clear_refs_private cp = {
|
| - .type = type,
|
| - };
|
| - struct mm_walk clear_refs_walk = {
|
| - .pmd_entry = clear_refs_pte_range,
|
| - .mm = mm,
|
| - .private = &cp,
|
| - };
|
| - down_read(&mm->mmap_sem);
|
| - if (type == CLEAR_REFS_SOFT_DIRTY) {
|
| - for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
| - if (!(vma->vm_flags & VM_SOFTDIRTY))
|
| - continue;
|
| - up_read(&mm->mmap_sem);
|
| - down_write(&mm->mmap_sem);
|
| - for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
| - vma->vm_flags &= ~VM_SOFTDIRTY;
|
| - vma_set_page_prot(vma);
|
| - }
|
| - downgrade_write(&mm->mmap_sem);
|
| - break;
|
| - }
|
| - mmu_notifier_invalidate_range_start(mm, 0, -1);
|
| - }
|
| + if (!mm)
|
| + goto out_task;
|
| +
|
| + if (type == CLEAR_REFS_MM_HIWATER_RSS) {
|
| + /*
|
| + * Writing 5 to /proc/pid/clear_refs resets the peak resident
|
| + * set size.
|
| + */
|
| + down_write(&mm->mmap_sem);
|
| + reset_mm_hiwater_rss(mm);
|
| + up_write(&mm->mmap_sem);
|
| + goto out_mm;
|
| + }
|
| +
|
| + struct clear_refs_private cp = {
|
| + .type = type,
|
| + };
|
| + struct mm_walk clear_refs_walk = {
|
| + .pmd_entry = clear_refs_pte_range,
|
| + .mm = mm,
|
| + .private = &cp,
|
| + };
|
| + down_read(&mm->mmap_sem);
|
| + if (type == CLEAR_REFS_SOFT_DIRTY) {
|
| for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
| - cp.vma = vma;
|
| - if (is_vm_hugetlb_page(vma))
|
| + if (!(vma->vm_flags & VM_SOFTDIRTY))
|
| continue;
|
| - /*
|
| - * Writing 1 to /proc/pid/clear_refs affects all pages.
|
| - *
|
| - * Writing 2 to /proc/pid/clear_refs only affects
|
| - * Anonymous pages.
|
| - *
|
| - * Writing 3 to /proc/pid/clear_refs only affects file
|
| - * mapped pages.
|
| - *
|
| - * Writing 4 to /proc/pid/clear_refs affects all pages.
|
| - */
|
| - if (type == CLEAR_REFS_ANON && vma->vm_file)
|
| - continue;
|
| - if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
|
| - continue;
|
| - walk_page_range(vma->vm_start, vma->vm_end,
|
| - &clear_refs_walk);
|
| + up_read(&mm->mmap_sem);
|
| + down_write(&mm->mmap_sem);
|
| + for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
| + vma->vm_flags &= ~VM_SOFTDIRTY;
|
| + vma_set_page_prot(vma);
|
| + }
|
| + downgrade_write(&mm->mmap_sem);
|
| + break;
|
| }
|
| - if (type == CLEAR_REFS_SOFT_DIRTY)
|
| - mmu_notifier_invalidate_range_end(mm, 0, -1);
|
| - flush_tlb_mm(mm);
|
| - up_read(&mm->mmap_sem);
|
| - mmput(mm);
|
| + mmu_notifier_invalidate_range_start(mm, 0, -1);
|
| }
|
| + for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
| + cp.vma = vma;
|
| + if (is_vm_hugetlb_page(vma))
|
| + continue;
|
| + /*
|
| + * Writing 1 to /proc/pid/clear_refs affects all pages.
|
| + *
|
| + * Writing 2 to /proc/pid/clear_refs only affects anonymous
|
| + * pages.
|
| + *
|
| + * Writing 3 to /proc/pid/clear_refs only affects file mapped
|
| + * pages.
|
| + *
|
| + * Writing 4 to /proc/pid/clear_refs affects all pages.
|
| + */
|
| + if (type == CLEAR_REFS_ANON && vma->vm_file)
|
| + continue;
|
| + if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
|
| + continue;
|
| + walk_page_range(vma->vm_start, vma->vm_end, &clear_refs_walk);
|
| + }
|
| + if (type == CLEAR_REFS_SOFT_DIRTY)
|
| + mmu_notifier_invalidate_range_end(mm, 0, -1);
|
| + flush_tlb_mm(mm);
|
| + up_read(&mm->mmap_sem);
|
| +
|
| +out_mm:
|
| + mmput(mm);
|
| +out_task:
|
| put_task_struct(task);
|
|
|
| return count;
|
|
|