xref: /linux-tools/drivers/mremap/kernel-mremap-pid-patch-6.0.0.diff (revision 74ce4ce33d5b8318cee71b38976a25818e666ff3)
1diff --git a/include/linux/mm.h b/include/linux/mm.h
2index 8bbcccbc5565..797cc64fe182 100644
3--- a/include/linux/mm.h
4+++ b/include/linux/mm.h
5@@ -2721,6 +2721,7 @@ unsigned long randomize_stack_top(unsigned long stack_top);
6 unsigned long randomize_page(unsigned long start, unsigned long range);
7
8 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
9+extern unsigned long get_unmapped_area2(struct mm_struct *mm, struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
10
11 extern unsigned long mmap_region(struct file *file, unsigned long addr,
12 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
13@@ -3474,5 +3475,7 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
14  * default, the flag is not set.
15  */
16 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
17+unsigned long mremap_task(struct task_struct *current_task, unsigned long addr,
18+    unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr);
19
20 #endif /* _LINUX_MM_H */
21diff --git a/mm/mmap.c b/mm/mmap.c
22index 6e447544f07d..b34fbb6a6776 100644
23--- a/mm/mmap.c
24+++ b/mm/mmap.c
25@@ -1798,6 +1798,50 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
26
27 EXPORT_SYMBOL(get_unmapped_area);
28
29+unsigned long
30+get_unmapped_area2(struct mm_struct *mm, struct file *file, unsigned long addr, unsigned long len,
31+		unsigned long pgoff, unsigned long flags)
32+{
33+	unsigned long (*get_area)(struct file *, unsigned long,
34+				  unsigned long, unsigned long, unsigned long);
35+
36+	unsigned long error = arch_mmap_check(addr, len, flags);
37+	if (error)
38+		return error;
39+
40+	/* Careful about overflows.. */
41+	if (len > TASK_SIZE)
42+		return -ENOMEM;
43+
44+	get_area = mm->get_unmapped_area;
45+	if (file) {
46+		if (file->f_op->get_unmapped_area)
47+			get_area = file->f_op->get_unmapped_area;
48+	} else if (flags & MAP_SHARED) {
49+		/*
50+		 * mmap_region() will call shmem_zero_setup() to create a file,
51+		 * so use shmem's get_unmapped_area in case it can be huge.
52+		 * do_mmap() will clear pgoff, so match alignment.
53+		 */
54+		pgoff = 0;
55+		get_area = shmem_get_unmapped_area;
56+	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
57+		/* Ensures that larger anonymous mappings are THP aligned. */
58+		get_area = thp_get_unmapped_area;
59+	}
60+
61+	addr = get_area(file, addr, len, pgoff, flags);
62+	if (IS_ERR_VALUE(addr))
63+		return addr;
64+
65+	if (addr > TASK_SIZE - len)
66+		return -ENOMEM;
67+	if (offset_in_page(addr))
68+		return -EINVAL;
69+
70+	error = security_mmap_addr(addr);
71+	return error ? error : addr;
72+}
73 /**
74  * find_vma_intersection() - Look up the first VMA which intersects the interval
75  * @mm: The process address space.
76@@ -3308,8 +3352,8 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
77 {
78 	struct vm_special_mapping *sm = new_vma->vm_private_data;
79
80-	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
81-		return -EFAULT;
82+	// if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
83+	//	return -EFAULT;
84
85 	if (sm->mremap)
86 		return sm->mremap(sm, new_vma);
87diff --git a/mm/mremap.c b/mm/mremap.c
88index e465ffe279bb..01bba4bf97f6 100644
89--- a/mm/mremap.c
90+++ b/mm/mremap.c
91@@ -724,10 +724,11 @@ static unsigned long move_vma(struct vm_area_struct *vma,
92 	return new_addr;
93 }
94
95-static struct vm_area_struct *vma_to_resize(unsigned long addr,
96+static struct vm_area_struct *vma_to_resize(struct mm_struct *mm,
97+        unsigned long addr,
98 	unsigned long old_len, unsigned long new_len, unsigned long flags)
99 {
100-	struct mm_struct *mm = current->mm;
101+	// struct mm_struct *mm = current->mm;
102 	struct vm_area_struct *vma;
103 	unsigned long pgoff;
104
105@@ -778,13 +779,14 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
106 	return vma;
107 }
108
109-static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
110+static unsigned long mremap_to(struct mm_struct *mm,
111+        unsigned long addr, unsigned long old_len,
112 		unsigned long new_addr, unsigned long new_len, bool *locked,
113 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
114 		struct list_head *uf_unmap_early,
115 		struct list_head *uf_unmap)
116 {
117-	struct mm_struct *mm = current->mm;
118+	// struct mm_struct *mm = current->mm;
119 	struct vm_area_struct *vma;
120 	unsigned long ret = -EINVAL;
121 	unsigned long map_flags = 0;
122@@ -829,7 +831,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
123 		old_len = new_len;
124 	}
125
126-	vma = vma_to_resize(addr, old_len, new_len, flags);
127+	vma = vma_to_resize(mm, addr, old_len, new_len, flags);
128 	if (IS_ERR(vma)) {
129 		ret = PTR_ERR(vma);
130 		goto out;
131@@ -848,7 +850,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
132 	if (vma->vm_flags & VM_MAYSHARE)
133 		map_flags |= MAP_SHARED;
134
135-	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
136+	ret = get_unmapped_area2(mm, vma->vm_file, new_addr, new_len, vma->vm_pgoff +
137 				((addr - vma->vm_start) >> PAGE_SHIFT),
138 				map_flags);
139 	if (IS_ERR_VALUE(ret))
140@@ -890,7 +892,13 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
141 		unsigned long, new_len, unsigned long, flags,
142 		unsigned long, new_addr)
143 {
144-	struct mm_struct *mm = current->mm;
145+    return mremap_task(current, addr, old_len, new_len, flags, new_addr);
146+}
147+
148+unsigned long mremap_task(struct task_struct *current_task, unsigned long addr,
149+    unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr)
150+{
151+	struct mm_struct *mm = current_task->mm;
152 	struct vm_area_struct *vma;
153 	unsigned long ret = -EINVAL;
154 	bool locked = false;
155@@ -940,7 +948,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
156 	if (!new_len)
157 		return ret;
158
159-	if (mmap_write_lock_killable(current->mm))
160+	if (mmap_write_lock_killable(current_task->mm))
161 		return -EINTR;
162 	vma = vma_lookup(mm, addr);
163 	if (!vma) {
164@@ -969,7 +977,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
165 	}
166
167 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
168-		ret = mremap_to(addr, old_len, new_addr, new_len,
169+		ret = mremap_to(mm, addr, old_len, new_addr, new_len,
170 				&locked, flags, &uf, &uf_unmap_early,
171 				&uf_unmap);
172 		goto out;
173@@ -1002,7 +1010,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
174 	/*
175 	 * Ok, we need to grow..
176 	 */
177-	vma = vma_to_resize(addr, old_len, new_len, flags);
178+	vma = vma_to_resize(mm, addr, old_len, new_len, flags);
179 	if (IS_ERR(vma)) {
180 		ret = PTR_ERR(vma);
181 		goto out;
182@@ -1079,9 +1087,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
183 	if (offset_in_page(ret))
184 		locked = false;
185 	if (downgraded)
186-		mmap_read_unlock(current->mm);
187+		mmap_read_unlock(current_task->mm);
188 	else
189-		mmap_write_unlock(current->mm);
190+		mmap_write_unlock(current_task->mm);
191 	if (locked && new_len > old_len)
192 		mm_populate(new_addr + old_len, new_len - old_len);
193 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
194@@ -1089,3 +1097,4 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
195 	userfaultfd_unmap_complete(mm, &uf_unmap);
196 	return ret;
197 }
198+EXPORT_SYMBOL(mremap_task);
199