xref: /linux-tools/drivers/mremap/kernel-mremap-pid-patch-6.0.0.diff (revision 74ce4ce33d5b8318cee71b38976a25818e666ff3)
1*74ce4ce3SDavid Wangdiff --git a/include/linux/mm.h b/include/linux/mm.h
2*74ce4ce3SDavid Wangindex 8bbcccbc5565..797cc64fe182 100644
3*74ce4ce3SDavid Wang--- a/include/linux/mm.h
4*74ce4ce3SDavid Wang+++ b/include/linux/mm.h
5*74ce4ce3SDavid Wang@@ -2721,6 +2721,7 @@ unsigned long randomize_stack_top(unsigned long stack_top);
6*74ce4ce3SDavid Wang unsigned long randomize_page(unsigned long start, unsigned long range);
7*74ce4ce3SDavid Wang
8*74ce4ce3SDavid Wang extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
9*74ce4ce3SDavid Wang+extern unsigned long get_unmapped_area2(struct mm_struct *mm, struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
10*74ce4ce3SDavid Wang
11*74ce4ce3SDavid Wang extern unsigned long mmap_region(struct file *file, unsigned long addr,
12*74ce4ce3SDavid Wang 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
13*74ce4ce3SDavid Wang@@ -3474,5 +3475,7 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
14*74ce4ce3SDavid Wang  * default, the flag is not set.
15*74ce4ce3SDavid Wang  */
16*74ce4ce3SDavid Wang #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
17*74ce4ce3SDavid Wang+unsigned long mremap_task(struct task_struct *current_task, unsigned long addr,
18*74ce4ce3SDavid Wang+    unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr);
19*74ce4ce3SDavid Wang
20*74ce4ce3SDavid Wang #endif /* _LINUX_MM_H */
21*74ce4ce3SDavid Wangdiff --git a/mm/mmap.c b/mm/mmap.c
22*74ce4ce3SDavid Wangindex 6e447544f07d..b34fbb6a6776 100644
23*74ce4ce3SDavid Wang--- a/mm/mmap.c
24*74ce4ce3SDavid Wang+++ b/mm/mmap.c
25*74ce4ce3SDavid Wang@@ -1798,6 +1798,50 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
26*74ce4ce3SDavid Wang
27*74ce4ce3SDavid Wang EXPORT_SYMBOL(get_unmapped_area);
28*74ce4ce3SDavid Wang
29*74ce4ce3SDavid Wang+unsigned long
30*74ce4ce3SDavid Wang+get_unmapped_area2(struct mm_struct *mm, struct file *file, unsigned long addr, unsigned long len,
31*74ce4ce3SDavid Wang+		unsigned long pgoff, unsigned long flags)
32*74ce4ce3SDavid Wang+{
33*74ce4ce3SDavid Wang+	unsigned long (*get_area)(struct file *, unsigned long,
34*74ce4ce3SDavid Wang+				  unsigned long, unsigned long, unsigned long);
35*74ce4ce3SDavid Wang+
36*74ce4ce3SDavid Wang+	unsigned long error = arch_mmap_check(addr, len, flags);
37*74ce4ce3SDavid Wang+	if (error)
38*74ce4ce3SDavid Wang+		return error;
39*74ce4ce3SDavid Wang+
40*74ce4ce3SDavid Wang+	/* Careful about overflows.. */
41*74ce4ce3SDavid Wang+	if (len > TASK_SIZE)
42*74ce4ce3SDavid Wang+		return -ENOMEM;
43*74ce4ce3SDavid Wang+
44*74ce4ce3SDavid Wang+	get_area = mm->get_unmapped_area;
45*74ce4ce3SDavid Wang+	if (file) {
46*74ce4ce3SDavid Wang+		if (file->f_op->get_unmapped_area)
47*74ce4ce3SDavid Wang+			get_area = file->f_op->get_unmapped_area;
48*74ce4ce3SDavid Wang+	} else if (flags & MAP_SHARED) {
49*74ce4ce3SDavid Wang+		/*
50*74ce4ce3SDavid Wang+		 * mmap_region() will call shmem_zero_setup() to create a file,
51*74ce4ce3SDavid Wang+		 * so use shmem's get_unmapped_area in case it can be huge.
52*74ce4ce3SDavid Wang+		 * do_mmap() will clear pgoff, so match alignment.
53*74ce4ce3SDavid Wang+		 */
54*74ce4ce3SDavid Wang+		pgoff = 0;
55*74ce4ce3SDavid Wang+		get_area = shmem_get_unmapped_area;
56*74ce4ce3SDavid Wang+	} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
57*74ce4ce3SDavid Wang+		/* Ensures that larger anonymous mappings are THP aligned. */
58*74ce4ce3SDavid Wang+		get_area = thp_get_unmapped_area;
59*74ce4ce3SDavid Wang+	}
60*74ce4ce3SDavid Wang+
61*74ce4ce3SDavid Wang+	addr = get_area(file, addr, len, pgoff, flags);
62*74ce4ce3SDavid Wang+	if (IS_ERR_VALUE(addr))
63*74ce4ce3SDavid Wang+		return addr;
64*74ce4ce3SDavid Wang+
65*74ce4ce3SDavid Wang+	if (addr > TASK_SIZE - len)
66*74ce4ce3SDavid Wang+		return -ENOMEM;
67*74ce4ce3SDavid Wang+	if (offset_in_page(addr))
68*74ce4ce3SDavid Wang+		return -EINVAL;
69*74ce4ce3SDavid Wang+
70*74ce4ce3SDavid Wang+	error = security_mmap_addr(addr);
71*74ce4ce3SDavid Wang+	return error ? error : addr;
72*74ce4ce3SDavid Wang+}
73*74ce4ce3SDavid Wang /**
74*74ce4ce3SDavid Wang  * find_vma_intersection() - Look up the first VMA which intersects the interval
75*74ce4ce3SDavid Wang  * @mm: The process address space.
76*74ce4ce3SDavid Wang@@ -3308,8 +3352,8 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
77*74ce4ce3SDavid Wang {
78*74ce4ce3SDavid Wang 	struct vm_special_mapping *sm = new_vma->vm_private_data;
79*74ce4ce3SDavid Wang
80*74ce4ce3SDavid Wang-	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
81*74ce4ce3SDavid Wang-		return -EFAULT;
82*74ce4ce3SDavid Wang+	// if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
83*74ce4ce3SDavid Wang+	//	return -EFAULT;
84*74ce4ce3SDavid Wang
85*74ce4ce3SDavid Wang 	if (sm->mremap)
86*74ce4ce3SDavid Wang 		return sm->mremap(sm, new_vma);
87*74ce4ce3SDavid Wangdiff --git a/mm/mremap.c b/mm/mremap.c
88*74ce4ce3SDavid Wangindex e465ffe279bb..01bba4bf97f6 100644
89*74ce4ce3SDavid Wang--- a/mm/mremap.c
90*74ce4ce3SDavid Wang+++ b/mm/mremap.c
91*74ce4ce3SDavid Wang@@ -724,10 +724,11 @@ static unsigned long move_vma(struct vm_area_struct *vma,
92*74ce4ce3SDavid Wang 	return new_addr;
93*74ce4ce3SDavid Wang }
94*74ce4ce3SDavid Wang
95*74ce4ce3SDavid Wang-static struct vm_area_struct *vma_to_resize(unsigned long addr,
96*74ce4ce3SDavid Wang+static struct vm_area_struct *vma_to_resize(struct mm_struct *mm,
97*74ce4ce3SDavid Wang+        unsigned long addr,
98*74ce4ce3SDavid Wang 	unsigned long old_len, unsigned long new_len, unsigned long flags)
99*74ce4ce3SDavid Wang {
100*74ce4ce3SDavid Wang-	struct mm_struct *mm = current->mm;
101*74ce4ce3SDavid Wang+	// struct mm_struct *mm = current->mm;
102*74ce4ce3SDavid Wang 	struct vm_area_struct *vma;
103*74ce4ce3SDavid Wang 	unsigned long pgoff;
104*74ce4ce3SDavid Wang
105*74ce4ce3SDavid Wang@@ -778,13 +779,14 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
106*74ce4ce3SDavid Wang 	return vma;
107*74ce4ce3SDavid Wang }
108*74ce4ce3SDavid Wang
109*74ce4ce3SDavid Wang-static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
110*74ce4ce3SDavid Wang+static unsigned long mremap_to(struct mm_struct *mm,
111*74ce4ce3SDavid Wang+        unsigned long addr, unsigned long old_len,
112*74ce4ce3SDavid Wang 		unsigned long new_addr, unsigned long new_len, bool *locked,
113*74ce4ce3SDavid Wang 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
114*74ce4ce3SDavid Wang 		struct list_head *uf_unmap_early,
115*74ce4ce3SDavid Wang 		struct list_head *uf_unmap)
116*74ce4ce3SDavid Wang {
117*74ce4ce3SDavid Wang-	struct mm_struct *mm = current->mm;
118*74ce4ce3SDavid Wang+	// struct mm_struct *mm = current->mm;
119*74ce4ce3SDavid Wang 	struct vm_area_struct *vma;
120*74ce4ce3SDavid Wang 	unsigned long ret = -EINVAL;
121*74ce4ce3SDavid Wang 	unsigned long map_flags = 0;
122*74ce4ce3SDavid Wang@@ -829,7 +831,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
123*74ce4ce3SDavid Wang 		old_len = new_len;
124*74ce4ce3SDavid Wang 	}
125*74ce4ce3SDavid Wang
126*74ce4ce3SDavid Wang-	vma = vma_to_resize(addr, old_len, new_len, flags);
127*74ce4ce3SDavid Wang+	vma = vma_to_resize(mm, addr, old_len, new_len, flags);
128*74ce4ce3SDavid Wang 	if (IS_ERR(vma)) {
129*74ce4ce3SDavid Wang 		ret = PTR_ERR(vma);
130*74ce4ce3SDavid Wang 		goto out;
131*74ce4ce3SDavid Wang@@ -848,7 +850,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
132*74ce4ce3SDavid Wang 	if (vma->vm_flags & VM_MAYSHARE)
133*74ce4ce3SDavid Wang 		map_flags |= MAP_SHARED;
134*74ce4ce3SDavid Wang
135*74ce4ce3SDavid Wang-	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
136*74ce4ce3SDavid Wang+	ret = get_unmapped_area2(mm, vma->vm_file, new_addr, new_len, vma->vm_pgoff +
137*74ce4ce3SDavid Wang 				((addr - vma->vm_start) >> PAGE_SHIFT),
138*74ce4ce3SDavid Wang 				map_flags);
139*74ce4ce3SDavid Wang 	if (IS_ERR_VALUE(ret))
140*74ce4ce3SDavid Wang@@ -890,7 +892,13 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
141*74ce4ce3SDavid Wang 		unsigned long, new_len, unsigned long, flags,
142*74ce4ce3SDavid Wang 		unsigned long, new_addr)
143*74ce4ce3SDavid Wang {
144*74ce4ce3SDavid Wang-	struct mm_struct *mm = current->mm;
145*74ce4ce3SDavid Wang+    return mremap_task(current, addr, old_len, new_len, flags, new_addr);
146*74ce4ce3SDavid Wang+}
147*74ce4ce3SDavid Wang+
148*74ce4ce3SDavid Wang+unsigned long mremap_task(struct task_struct *current_task, unsigned long addr,
149*74ce4ce3SDavid Wang+    unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr)
150*74ce4ce3SDavid Wang+{
151*74ce4ce3SDavid Wang+	struct mm_struct *mm = current_task->mm;
152*74ce4ce3SDavid Wang 	struct vm_area_struct *vma;
153*74ce4ce3SDavid Wang 	unsigned long ret = -EINVAL;
154*74ce4ce3SDavid Wang 	bool locked = false;
155*74ce4ce3SDavid Wang@@ -940,7 +948,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
156*74ce4ce3SDavid Wang 	if (!new_len)
157*74ce4ce3SDavid Wang 		return ret;
158*74ce4ce3SDavid Wang
159*74ce4ce3SDavid Wang-	if (mmap_write_lock_killable(current->mm))
160*74ce4ce3SDavid Wang+	if (mmap_write_lock_killable(current_task->mm))
161*74ce4ce3SDavid Wang 		return -EINTR;
162*74ce4ce3SDavid Wang 	vma = vma_lookup(mm, addr);
163*74ce4ce3SDavid Wang 	if (!vma) {
164*74ce4ce3SDavid Wang@@ -969,7 +977,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
165*74ce4ce3SDavid Wang 	}
166*74ce4ce3SDavid Wang
167*74ce4ce3SDavid Wang 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
168*74ce4ce3SDavid Wang-		ret = mremap_to(addr, old_len, new_addr, new_len,
169*74ce4ce3SDavid Wang+		ret = mremap_to(mm, addr, old_len, new_addr, new_len,
170*74ce4ce3SDavid Wang 				&locked, flags, &uf, &uf_unmap_early,
171*74ce4ce3SDavid Wang 				&uf_unmap);
172*74ce4ce3SDavid Wang 		goto out;
173*74ce4ce3SDavid Wang@@ -1002,7 +1010,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
174*74ce4ce3SDavid Wang 	/*
175*74ce4ce3SDavid Wang 	 * Ok, we need to grow..
176*74ce4ce3SDavid Wang 	 */
177*74ce4ce3SDavid Wang-	vma = vma_to_resize(addr, old_len, new_len, flags);
178*74ce4ce3SDavid Wang+	vma = vma_to_resize(mm, addr, old_len, new_len, flags);
179*74ce4ce3SDavid Wang 	if (IS_ERR(vma)) {
180*74ce4ce3SDavid Wang 		ret = PTR_ERR(vma);
181*74ce4ce3SDavid Wang 		goto out;
182*74ce4ce3SDavid Wang@@ -1079,9 +1087,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
183*74ce4ce3SDavid Wang 	if (offset_in_page(ret))
184*74ce4ce3SDavid Wang 		locked = false;
185*74ce4ce3SDavid Wang 	if (downgraded)
186*74ce4ce3SDavid Wang-		mmap_read_unlock(current->mm);
187*74ce4ce3SDavid Wang+		mmap_read_unlock(current_task->mm);
188*74ce4ce3SDavid Wang 	else
189*74ce4ce3SDavid Wang-		mmap_write_unlock(current->mm);
190*74ce4ce3SDavid Wang+		mmap_write_unlock(current_task->mm);
191*74ce4ce3SDavid Wang 	if (locked && new_len > old_len)
192*74ce4ce3SDavid Wang 		mm_populate(new_addr + old_len, new_len - old_len);
193*74ce4ce3SDavid Wang 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
194*74ce4ce3SDavid Wang@@ -1089,3 +1097,4 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
195*74ce4ce3SDavid Wang 	userfaultfd_unmap_complete(mm, &uf_unmap);
196*74ce4ce3SDavid Wang 	return ret;
197*74ce4ce3SDavid Wang }
198*74ce4ce3SDavid Wang+EXPORT_SYMBOL(mremap_task);
199