1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
9
10 #define PIPE_PARANOIA /* for now */
11
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
38 }
39
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
61 }
62
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
70 continue; \
71 (void)(STEP); \
72 } \
73 }
74
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
91 } \
92 }
93
94 #define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
96 n = i->count; \
97 if (i->count) { \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
101 struct bio_vec v; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
114 } \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
124 } \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
127 } \
128 i->count -= n; \
129 i->iov_offset = skip; \
130 } \
131 }
132
copyout(void __user * to,const void * from,size_t n)133 static int copyout(void __user *to, const void *from, size_t n)
134 {
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
138 }
139 return n;
140 }
141
copyin(void * to,const void __user * from,size_t n)142 static int copyin(void *to, const void __user *from, size_t n)
143 {
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
147 }
148 return n;
149 }
150
copy_page_to_iter_iovec(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
152 struct iov_iter *i)
153 {
154 size_t skip, copy, left, wanted;
155 const struct iovec *iov;
156 char __user *buf;
157 void *kaddr, *from;
158
159 if (unlikely(bytes > i->count))
160 bytes = i->count;
161
162 if (unlikely(!bytes))
163 return 0;
164
165 might_fault();
166 wanted = bytes;
167 iov = i->iov;
168 skip = i->iov_offset;
169 buf = iov->iov_base + skip;
170 copy = min(bytes, iov->iov_len - skip);
171
172 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 kaddr = kmap_atomic(page);
174 from = kaddr + offset;
175
176 /* first chunk, usually the only one */
177 left = copyout(buf, from, copy);
178 copy -= left;
179 skip += copy;
180 from += copy;
181 bytes -= copy;
182
183 while (unlikely(!left && bytes)) {
184 iov++;
185 buf = iov->iov_base;
186 copy = min(bytes, iov->iov_len);
187 left = copyout(buf, from, copy);
188 copy -= left;
189 skip = copy;
190 from += copy;
191 bytes -= copy;
192 }
193 if (likely(!bytes)) {
194 kunmap_atomic(kaddr);
195 goto done;
196 }
197 offset = from - kaddr;
198 buf += copy;
199 kunmap_atomic(kaddr);
200 copy = min(bytes, iov->iov_len - skip);
201 }
202 /* Too bad - revert to non-atomic kmap */
203
204 kaddr = kmap(page);
205 from = kaddr + offset;
206 left = copyout(buf, from, copy);
207 copy -= left;
208 skip += copy;
209 from += copy;
210 bytes -= copy;
211 while (unlikely(!left && bytes)) {
212 iov++;
213 buf = iov->iov_base;
214 copy = min(bytes, iov->iov_len);
215 left = copyout(buf, from, copy);
216 copy -= left;
217 skip = copy;
218 from += copy;
219 bytes -= copy;
220 }
221 kunmap(page);
222
223 done:
224 if (skip == iov->iov_len) {
225 iov++;
226 skip = 0;
227 }
228 i->count -= wanted - bytes;
229 i->nr_segs -= iov - i->iov;
230 i->iov = iov;
231 i->iov_offset = skip;
232 return wanted - bytes;
233 }
234
copy_page_from_iter_iovec(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
236 struct iov_iter *i)
237 {
238 size_t skip, copy, left, wanted;
239 const struct iovec *iov;
240 char __user *buf;
241 void *kaddr, *to;
242
243 if (unlikely(bytes > i->count))
244 bytes = i->count;
245
246 if (unlikely(!bytes))
247 return 0;
248
249 might_fault();
250 wanted = bytes;
251 iov = i->iov;
252 skip = i->iov_offset;
253 buf = iov->iov_base + skip;
254 copy = min(bytes, iov->iov_len - skip);
255
256 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 kaddr = kmap_atomic(page);
258 to = kaddr + offset;
259
260 /* first chunk, usually the only one */
261 left = copyin(to, buf, copy);
262 copy -= left;
263 skip += copy;
264 to += copy;
265 bytes -= copy;
266
267 while (unlikely(!left && bytes)) {
268 iov++;
269 buf = iov->iov_base;
270 copy = min(bytes, iov->iov_len);
271 left = copyin(to, buf, copy);
272 copy -= left;
273 skip = copy;
274 to += copy;
275 bytes -= copy;
276 }
277 if (likely(!bytes)) {
278 kunmap_atomic(kaddr);
279 goto done;
280 }
281 offset = to - kaddr;
282 buf += copy;
283 kunmap_atomic(kaddr);
284 copy = min(bytes, iov->iov_len - skip);
285 }
286 /* Too bad - revert to non-atomic kmap */
287
288 kaddr = kmap(page);
289 to = kaddr + offset;
290 left = copyin(to, buf, copy);
291 copy -= left;
292 skip += copy;
293 to += copy;
294 bytes -= copy;
295 while (unlikely(!left && bytes)) {
296 iov++;
297 buf = iov->iov_base;
298 copy = min(bytes, iov->iov_len);
299 left = copyin(to, buf, copy);
300 copy -= left;
301 skip = copy;
302 to += copy;
303 bytes -= copy;
304 }
305 kunmap(page);
306
307 done:
308 if (skip == iov->iov_len) {
309 iov++;
310 skip = 0;
311 }
312 i->count -= wanted - bytes;
313 i->nr_segs -= iov - i->iov;
314 i->iov = iov;
315 i->iov_offset = skip;
316 return wanted - bytes;
317 }
318
319 #ifdef PIPE_PARANOIA
sanity(const struct iov_iter * i)320 static bool sanity(const struct iov_iter *i)
321 {
322 struct pipe_inode_info *pipe = i->pipe;
323 int idx = i->idx;
324 int next = pipe->curbuf + pipe->nrbufs;
325 if (i->iov_offset) {
326 struct pipe_buffer *p;
327 if (unlikely(!pipe->nrbufs))
328 goto Bad; // pipe must be non-empty
329 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 goto Bad; // must be at the last buffer...
331
332 p = &pipe->bufs[idx];
333 if (unlikely(p->offset + p->len != i->iov_offset))
334 goto Bad; // ... at the end of segment
335 } else {
336 if (idx != (next & (pipe->buffers - 1)))
337 goto Bad; // must be right after the last buffer
338 }
339 return true;
340 Bad:
341 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 for (idx = 0; idx < pipe->buffers; idx++)
345 printk(KERN_ERR "[%p %p %d %d]\n",
346 pipe->bufs[idx].ops,
347 pipe->bufs[idx].page,
348 pipe->bufs[idx].offset,
349 pipe->bufs[idx].len);
350 WARN_ON(1);
351 return false;
352 }
353 #else
354 #define sanity(i) true
355 #endif
356
next_idx(int idx,struct pipe_inode_info * pipe)357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
358 {
359 return (idx + 1) & (pipe->buffers - 1);
360 }
361
copy_page_to_iter_pipe(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 struct iov_iter *i)
364 {
365 struct pipe_inode_info *pipe = i->pipe;
366 struct pipe_buffer *buf;
367 size_t off;
368 int idx;
369
370 if (unlikely(bytes > i->count))
371 bytes = i->count;
372
373 if (unlikely(!bytes))
374 return 0;
375
376 if (!sanity(i))
377 return 0;
378
379 off = i->iov_offset;
380 idx = i->idx;
381 buf = &pipe->bufs[idx];
382 if (off) {
383 if (offset == off && buf->page == page) {
384 /* merge with the last one */
385 buf->len += bytes;
386 i->iov_offset += bytes;
387 goto out;
388 }
389 idx = next_idx(idx, pipe);
390 buf = &pipe->bufs[idx];
391 }
392 if (idx == pipe->curbuf && pipe->nrbufs)
393 return 0;
394 pipe->nrbufs++;
395 buf->ops = &page_cache_pipe_buf_ops;
396 buf->flags = 0;
397 get_page(buf->page = page);
398 buf->offset = offset;
399 buf->len = bytes;
400 i->iov_offset = offset + bytes;
401 i->idx = idx;
402 out:
403 i->count -= bytes;
404 return bytes;
405 }
406
407 /*
408 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
409 * bytes. For each iovec, fault in each page that constitutes the iovec.
410 *
411 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
412 * because it is an invalid address).
413 */
iov_iter_fault_in_readable(struct iov_iter * i,size_t bytes)414 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
415 {
416 size_t skip = i->iov_offset;
417 const struct iovec *iov;
418 int err;
419 struct iovec v;
420
421 if (iter_is_iovec(i)) {
422 iterate_iovec(i, bytes, v, iov, skip, ({
423 err = fault_in_pages_readable(v.iov_base, v.iov_len);
424 if (unlikely(err))
425 return err;
426 0;}))
427 }
428 return 0;
429 }
430 EXPORT_SYMBOL(iov_iter_fault_in_readable);
431
iov_iter_init(struct iov_iter * i,int direction,const struct iovec * iov,unsigned long nr_segs,size_t count)432 void iov_iter_init(struct iov_iter *i, int direction,
433 const struct iovec *iov, unsigned long nr_segs,
434 size_t count)
435 {
436 /* It will get better. Eventually... */
437 if (uaccess_kernel()) {
438 direction |= ITER_KVEC;
439 i->type = direction;
440 i->kvec = (struct kvec *)iov;
441 } else {
442 i->type = direction;
443 i->iov = iov;
444 }
445 i->nr_segs = nr_segs;
446 i->iov_offset = 0;
447 i->count = count;
448 }
449 EXPORT_SYMBOL(iov_iter_init);
450
memcpy_from_page(char * to,struct page * page,size_t offset,size_t len)451 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
452 {
453 char *from = kmap_atomic(page);
454 memcpy(to, from + offset, len);
455 kunmap_atomic(from);
456 }
457
memcpy_to_page(struct page * page,size_t offset,const char * from,size_t len)458 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
459 {
460 char *to = kmap_atomic(page);
461 memcpy(to + offset, from, len);
462 kunmap_atomic(to);
463 }
464
memzero_page(struct page * page,size_t offset,size_t len)465 static void memzero_page(struct page *page, size_t offset, size_t len)
466 {
467 char *addr = kmap_atomic(page);
468 memset(addr + offset, 0, len);
469 kunmap_atomic(addr);
470 }
471
allocated(struct pipe_buffer * buf)472 static inline bool allocated(struct pipe_buffer *buf)
473 {
474 return buf->ops == &default_pipe_buf_ops;
475 }
476
data_start(const struct iov_iter * i,int * idxp,size_t * offp)477 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
478 {
479 size_t off = i->iov_offset;
480 int idx = i->idx;
481 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
482 idx = next_idx(idx, i->pipe);
483 off = 0;
484 }
485 *idxp = idx;
486 *offp = off;
487 }
488
push_pipe(struct iov_iter * i,size_t size,int * idxp,size_t * offp)489 static size_t push_pipe(struct iov_iter *i, size_t size,
490 int *idxp, size_t *offp)
491 {
492 struct pipe_inode_info *pipe = i->pipe;
493 size_t off;
494 int idx;
495 ssize_t left;
496
497 if (unlikely(size > i->count))
498 size = i->count;
499 if (unlikely(!size))
500 return 0;
501
502 left = size;
503 data_start(i, &idx, &off);
504 *idxp = idx;
505 *offp = off;
506 if (off) {
507 left -= PAGE_SIZE - off;
508 if (left <= 0) {
509 pipe->bufs[idx].len += size;
510 return size;
511 }
512 pipe->bufs[idx].len = PAGE_SIZE;
513 idx = next_idx(idx, pipe);
514 }
515 while (idx != pipe->curbuf || !pipe->nrbufs) {
516 struct page *page = alloc_page(GFP_USER);
517 if (!page)
518 break;
519 pipe->nrbufs++;
520 pipe->bufs[idx].ops = &default_pipe_buf_ops;
521 pipe->bufs[idx].flags = 0;
522 pipe->bufs[idx].page = page;
523 pipe->bufs[idx].offset = 0;
524 if (left <= PAGE_SIZE) {
525 pipe->bufs[idx].len = left;
526 return size;
527 }
528 pipe->bufs[idx].len = PAGE_SIZE;
529 left -= PAGE_SIZE;
530 idx = next_idx(idx, pipe);
531 }
532 return size - left;
533 }
534
copy_pipe_to_iter(const void * addr,size_t bytes,struct iov_iter * i)535 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
536 struct iov_iter *i)
537 {
538 struct pipe_inode_info *pipe = i->pipe;
539 size_t n, off;
540 int idx;
541
542 if (!sanity(i))
543 return 0;
544
545 bytes = n = push_pipe(i, bytes, &idx, &off);
546 if (unlikely(!n))
547 return 0;
548 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
549 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
550 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
551 i->idx = idx;
552 i->iov_offset = off + chunk;
553 n -= chunk;
554 addr += chunk;
555 }
556 i->count -= bytes;
557 return bytes;
558 }
559
_copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)560 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
561 {
562 const char *from = addr;
563 if (unlikely(i->type & ITER_PIPE))
564 return copy_pipe_to_iter(addr, bytes, i);
565 if (iter_is_iovec(i))
566 might_fault();
567 iterate_and_advance(i, bytes, v,
568 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
569 memcpy_to_page(v.bv_page, v.bv_offset,
570 (from += v.bv_len) - v.bv_len, v.bv_len),
571 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
572 )
573
574 return bytes;
575 }
576 EXPORT_SYMBOL(_copy_to_iter);
577
578 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
copyout_mcsafe(void __user * to,const void * from,size_t n)579 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
580 {
581 if (access_ok(VERIFY_WRITE, to, n)) {
582 kasan_check_read(from, n);
583 n = copy_to_user_mcsafe((__force void *) to, from, n);
584 }
585 return n;
586 }
587
memcpy_mcsafe_to_page(struct page * page,size_t offset,const char * from,size_t len)588 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
589 const char *from, size_t len)
590 {
591 unsigned long ret;
592 char *to;
593
594 to = kmap_atomic(page);
595 ret = memcpy_mcsafe(to + offset, from, len);
596 kunmap_atomic(to);
597
598 return ret;
599 }
600
copy_pipe_to_iter_mcsafe(const void * addr,size_t bytes,struct iov_iter * i)601 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
602 struct iov_iter *i)
603 {
604 struct pipe_inode_info *pipe = i->pipe;
605 size_t n, off, xfer = 0;
606 int idx;
607
608 if (!sanity(i))
609 return 0;
610
611 bytes = n = push_pipe(i, bytes, &idx, &off);
612 if (unlikely(!n))
613 return 0;
614 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
615 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
616 unsigned long rem;
617
618 rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
619 chunk);
620 i->idx = idx;
621 i->iov_offset = off + chunk - rem;
622 xfer += chunk - rem;
623 if (rem)
624 break;
625 n -= chunk;
626 addr += chunk;
627 }
628 i->count -= xfer;
629 return xfer;
630 }
631
632 /**
633 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
634 * @addr: source kernel address
635 * @bytes: total transfer length
636 * @iter: destination iterator
637 *
638 * The pmem driver arranges for filesystem-dax to use this facility via
639 * dax_copy_to_iter() for protecting read/write to persistent memory.
640 * Unless / until an architecture can guarantee identical performance
641 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
642 * performance regression to switch more users to the mcsafe version.
643 *
644 * Otherwise, the main differences between this and typical _copy_to_iter().
645 *
646 * * Typical tail/residue handling after a fault retries the copy
647 * byte-by-byte until the fault happens again. Re-triggering machine
648 * checks is potentially fatal so the implementation uses source
649 * alignment and poison alignment assumptions to avoid re-triggering
650 * hardware exceptions.
651 *
652 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
653 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
654 * a short copy.
655 *
656 * See MCSAFE_TEST for self-test.
657 */
_copy_to_iter_mcsafe(const void * addr,size_t bytes,struct iov_iter * i)658 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
659 {
660 const char *from = addr;
661 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
662
663 if (unlikely(i->type & ITER_PIPE))
664 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
665 if (iter_is_iovec(i))
666 might_fault();
667 iterate_and_advance(i, bytes, v,
668 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
669 ({
670 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
671 (from += v.bv_len) - v.bv_len, v.bv_len);
672 if (rem) {
673 curr_addr = (unsigned long) from;
674 bytes = curr_addr - s_addr - rem;
675 return bytes;
676 }
677 }),
678 ({
679 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
680 v.iov_len);
681 if (rem) {
682 curr_addr = (unsigned long) from;
683 bytes = curr_addr - s_addr - rem;
684 return bytes;
685 }
686 })
687 )
688
689 return bytes;
690 }
691 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
692 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
693
_copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)694 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
695 {
696 char *to = addr;
697 if (unlikely(i->type & ITER_PIPE)) {
698 WARN_ON(1);
699 return 0;
700 }
701 if (iter_is_iovec(i))
702 might_fault();
703 iterate_and_advance(i, bytes, v,
704 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
705 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
706 v.bv_offset, v.bv_len),
707 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
708 )
709
710 return bytes;
711 }
712 EXPORT_SYMBOL(_copy_from_iter);
713
_copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)714 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
715 {
716 char *to = addr;
717 if (unlikely(i->type & ITER_PIPE)) {
718 WARN_ON(1);
719 return false;
720 }
721 if (unlikely(i->count < bytes))
722 return false;
723
724 if (iter_is_iovec(i))
725 might_fault();
726 iterate_all_kinds(i, bytes, v, ({
727 if (copyin((to += v.iov_len) - v.iov_len,
728 v.iov_base, v.iov_len))
729 return false;
730 0;}),
731 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
732 v.bv_offset, v.bv_len),
733 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
734 )
735
736 iov_iter_advance(i, bytes);
737 return true;
738 }
739 EXPORT_SYMBOL(_copy_from_iter_full);
740
_copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)741 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
742 {
743 char *to = addr;
744 if (unlikely(i->type & ITER_PIPE)) {
745 WARN_ON(1);
746 return 0;
747 }
748 iterate_and_advance(i, bytes, v,
749 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
750 v.iov_base, v.iov_len),
751 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
752 v.bv_offset, v.bv_len),
753 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
754 )
755
756 return bytes;
757 }
758 EXPORT_SYMBOL(_copy_from_iter_nocache);
759
760 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
761 /**
762 * _copy_from_iter_flushcache - write destination through cpu cache
763 * @addr: destination kernel address
764 * @bytes: total transfer length
765 * @iter: source iterator
766 *
767 * The pmem driver arranges for filesystem-dax to use this facility via
768 * dax_copy_from_iter() for ensuring that writes to persistent memory
769 * are flushed through the CPU cache. It is differentiated from
770 * _copy_from_iter_nocache() in that guarantees all data is flushed for
771 * all iterator types. The _copy_from_iter_nocache() only attempts to
772 * bypass the cache for the ITER_IOVEC case, and on some archs may use
773 * instructions that strand dirty-data in the cache.
774 */
_copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)775 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
776 {
777 char *to = addr;
778 if (unlikely(i->type & ITER_PIPE)) {
779 WARN_ON(1);
780 return 0;
781 }
782 iterate_and_advance(i, bytes, v,
783 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
784 v.iov_base, v.iov_len),
785 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
786 v.bv_offset, v.bv_len),
787 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
788 v.iov_len)
789 )
790
791 return bytes;
792 }
793 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
794 #endif
795
_copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)796 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
797 {
798 char *to = addr;
799 if (unlikely(i->type & ITER_PIPE)) {
800 WARN_ON(1);
801 return false;
802 }
803 if (unlikely(i->count < bytes))
804 return false;
805 iterate_all_kinds(i, bytes, v, ({
806 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
807 v.iov_base, v.iov_len))
808 return false;
809 0;}),
810 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
811 v.bv_offset, v.bv_len),
812 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
813 )
814
815 iov_iter_advance(i, bytes);
816 return true;
817 }
818 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
819
page_copy_sane(struct page * page,size_t offset,size_t n)820 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
821 {
822 struct page *head;
823 size_t v = n + offset;
824
825 /*
826 * The general case needs to access the page order in order
827 * to compute the page size.
828 * However, we mostly deal with order-0 pages and thus can
829 * avoid a possible cache line miss for requests that fit all
830 * page orders.
831 */
832 if (n <= v && v <= PAGE_SIZE)
833 return true;
834
835 head = compound_head(page);
836 v += (page - head) << PAGE_SHIFT;
837
838 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
839 return true;
840 WARN_ON(1);
841 return false;
842 }
843
copy_page_to_iter(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)844 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
845 struct iov_iter *i)
846 {
847 if (unlikely(!page_copy_sane(page, offset, bytes)))
848 return 0;
849 if (i->type & (ITER_BVEC|ITER_KVEC)) {
850 void *kaddr = kmap_atomic(page);
851 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
852 kunmap_atomic(kaddr);
853 return wanted;
854 } else if (likely(!(i->type & ITER_PIPE)))
855 return copy_page_to_iter_iovec(page, offset, bytes, i);
856 else
857 return copy_page_to_iter_pipe(page, offset, bytes, i);
858 }
859 EXPORT_SYMBOL(copy_page_to_iter);
860
copy_page_from_iter(struct page * page,size_t offset,size_t bytes,struct iov_iter * i)861 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
862 struct iov_iter *i)
863 {
864 if (unlikely(!page_copy_sane(page, offset, bytes)))
865 return 0;
866 if (unlikely(i->type & ITER_PIPE)) {
867 WARN_ON(1);
868 return 0;
869 }
870 if (i->type & (ITER_BVEC|ITER_KVEC)) {
871 void *kaddr = kmap_atomic(page);
872 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
873 kunmap_atomic(kaddr);
874 return wanted;
875 } else
876 return copy_page_from_iter_iovec(page, offset, bytes, i);
877 }
878 EXPORT_SYMBOL(copy_page_from_iter);
879
pipe_zero(size_t bytes,struct iov_iter * i)880 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
881 {
882 struct pipe_inode_info *pipe = i->pipe;
883 size_t n, off;
884 int idx;
885
886 if (!sanity(i))
887 return 0;
888
889 bytes = n = push_pipe(i, bytes, &idx, &off);
890 if (unlikely(!n))
891 return 0;
892
893 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
894 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
895 memzero_page(pipe->bufs[idx].page, off, chunk);
896 i->idx = idx;
897 i->iov_offset = off + chunk;
898 n -= chunk;
899 }
900 i->count -= bytes;
901 return bytes;
902 }
903
iov_iter_zero(size_t bytes,struct iov_iter * i)904 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
905 {
906 if (unlikely(i->type & ITER_PIPE))
907 return pipe_zero(bytes, i);
908 iterate_and_advance(i, bytes, v,
909 clear_user(v.iov_base, v.iov_len),
910 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
911 memset(v.iov_base, 0, v.iov_len)
912 )
913
914 return bytes;
915 }
916 EXPORT_SYMBOL(iov_iter_zero);
917
iov_iter_copy_from_user_atomic(struct page * page,struct iov_iter * i,unsigned long offset,size_t bytes)918 size_t iov_iter_copy_from_user_atomic(struct page *page,
919 struct iov_iter *i, unsigned long offset, size_t bytes)
920 {
921 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
922 if (unlikely(!page_copy_sane(page, offset, bytes))) {
923 kunmap_atomic(kaddr);
924 return 0;
925 }
926 if (unlikely(i->type & ITER_PIPE)) {
927 kunmap_atomic(kaddr);
928 WARN_ON(1);
929 return 0;
930 }
931 iterate_all_kinds(i, bytes, v,
932 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
933 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
934 v.bv_offset, v.bv_len),
935 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
936 )
937 kunmap_atomic(kaddr);
938 return bytes;
939 }
940 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
941
pipe_truncate(struct iov_iter * i)942 static inline void pipe_truncate(struct iov_iter *i)
943 {
944 struct pipe_inode_info *pipe = i->pipe;
945 if (pipe->nrbufs) {
946 size_t off = i->iov_offset;
947 int idx = i->idx;
948 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
949 if (off) {
950 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
951 idx = next_idx(idx, pipe);
952 nrbufs++;
953 }
954 while (pipe->nrbufs > nrbufs) {
955 pipe_buf_release(pipe, &pipe->bufs[idx]);
956 idx = next_idx(idx, pipe);
957 pipe->nrbufs--;
958 }
959 }
960 }
961
pipe_advance(struct iov_iter * i,size_t size)962 static void pipe_advance(struct iov_iter *i, size_t size)
963 {
964 struct pipe_inode_info *pipe = i->pipe;
965 if (unlikely(i->count < size))
966 size = i->count;
967 if (size) {
968 struct pipe_buffer *buf;
969 size_t off = i->iov_offset, left = size;
970 int idx = i->idx;
971 if (off) /* make it relative to the beginning of buffer */
972 left += off - pipe->bufs[idx].offset;
973 while (1) {
974 buf = &pipe->bufs[idx];
975 if (left <= buf->len)
976 break;
977 left -= buf->len;
978 idx = next_idx(idx, pipe);
979 }
980 i->idx = idx;
981 i->iov_offset = buf->offset + left;
982 }
983 i->count -= size;
984 /* ... and discard everything past that point */
985 pipe_truncate(i);
986 }
987
iov_iter_advance(struct iov_iter * i,size_t size)988 void iov_iter_advance(struct iov_iter *i, size_t size)
989 {
990 if (unlikely(i->type & ITER_PIPE)) {
991 pipe_advance(i, size);
992 return;
993 }
994 iterate_and_advance(i, size, v, 0, 0, 0)
995 }
996 EXPORT_SYMBOL(iov_iter_advance);
997
iov_iter_revert(struct iov_iter * i,size_t unroll)998 void iov_iter_revert(struct iov_iter *i, size_t unroll)
999 {
1000 if (!unroll)
1001 return;
1002 if (WARN_ON(unroll > MAX_RW_COUNT))
1003 return;
1004 i->count += unroll;
1005 if (unlikely(i->type & ITER_PIPE)) {
1006 struct pipe_inode_info *pipe = i->pipe;
1007 int idx = i->idx;
1008 size_t off = i->iov_offset;
1009 while (1) {
1010 size_t n = off - pipe->bufs[idx].offset;
1011 if (unroll < n) {
1012 off -= unroll;
1013 break;
1014 }
1015 unroll -= n;
1016 if (!unroll && idx == i->start_idx) {
1017 off = 0;
1018 break;
1019 }
1020 if (!idx--)
1021 idx = pipe->buffers - 1;
1022 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1023 }
1024 i->iov_offset = off;
1025 i->idx = idx;
1026 pipe_truncate(i);
1027 return;
1028 }
1029 if (unroll <= i->iov_offset) {
1030 i->iov_offset -= unroll;
1031 return;
1032 }
1033 unroll -= i->iov_offset;
1034 if (i->type & ITER_BVEC) {
1035 const struct bio_vec *bvec = i->bvec;
1036 while (1) {
1037 size_t n = (--bvec)->bv_len;
1038 i->nr_segs++;
1039 if (unroll <= n) {
1040 i->bvec = bvec;
1041 i->iov_offset = n - unroll;
1042 return;
1043 }
1044 unroll -= n;
1045 }
1046 } else { /* same logics for iovec and kvec */
1047 const struct iovec *iov = i->iov;
1048 while (1) {
1049 size_t n = (--iov)->iov_len;
1050 i->nr_segs++;
1051 if (unroll <= n) {
1052 i->iov = iov;
1053 i->iov_offset = n - unroll;
1054 return;
1055 }
1056 unroll -= n;
1057 }
1058 }
1059 }
1060 EXPORT_SYMBOL(iov_iter_revert);
1061
1062 /*
1063 * Return the count of just the current iov_iter segment.
1064 */
iov_iter_single_seg_count(const struct iov_iter * i)1065 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1066 {
1067 if (unlikely(i->type & ITER_PIPE))
1068 return i->count; // it is a silly place, anyway
1069 if (i->nr_segs == 1)
1070 return i->count;
1071 else if (i->type & ITER_BVEC)
1072 return min(i->count, i->bvec->bv_len - i->iov_offset);
1073 else
1074 return min(i->count, i->iov->iov_len - i->iov_offset);
1075 }
1076 EXPORT_SYMBOL(iov_iter_single_seg_count);
1077
iov_iter_kvec(struct iov_iter * i,int direction,const struct kvec * kvec,unsigned long nr_segs,size_t count)1078 void iov_iter_kvec(struct iov_iter *i, int direction,
1079 const struct kvec *kvec, unsigned long nr_segs,
1080 size_t count)
1081 {
1082 BUG_ON(!(direction & ITER_KVEC));
1083 i->type = direction;
1084 i->kvec = kvec;
1085 i->nr_segs = nr_segs;
1086 i->iov_offset = 0;
1087 i->count = count;
1088 }
1089 EXPORT_SYMBOL(iov_iter_kvec);
1090
iov_iter_bvec(struct iov_iter * i,int direction,const struct bio_vec * bvec,unsigned long nr_segs,size_t count)1091 void iov_iter_bvec(struct iov_iter *i, int direction,
1092 const struct bio_vec *bvec, unsigned long nr_segs,
1093 size_t count)
1094 {
1095 BUG_ON(!(direction & ITER_BVEC));
1096 i->type = direction;
1097 i->bvec = bvec;
1098 i->nr_segs = nr_segs;
1099 i->iov_offset = 0;
1100 i->count = count;
1101 }
1102 EXPORT_SYMBOL(iov_iter_bvec);
1103
iov_iter_pipe(struct iov_iter * i,int direction,struct pipe_inode_info * pipe,size_t count)1104 void iov_iter_pipe(struct iov_iter *i, int direction,
1105 struct pipe_inode_info *pipe,
1106 size_t count)
1107 {
1108 BUG_ON(direction != ITER_PIPE);
1109 WARN_ON(pipe->nrbufs == pipe->buffers);
1110 i->type = direction;
1111 i->pipe = pipe;
1112 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1113 i->iov_offset = 0;
1114 i->count = count;
1115 i->start_idx = i->idx;
1116 }
1117 EXPORT_SYMBOL(iov_iter_pipe);
1118
iov_iter_alignment(const struct iov_iter * i)1119 unsigned long iov_iter_alignment(const struct iov_iter *i)
1120 {
1121 unsigned long res = 0;
1122 size_t size = i->count;
1123
1124 if (unlikely(i->type & ITER_PIPE)) {
1125 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1126 return size | i->iov_offset;
1127 return size;
1128 }
1129 iterate_all_kinds(i, size, v,
1130 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1131 res |= v.bv_offset | v.bv_len,
1132 res |= (unsigned long)v.iov_base | v.iov_len
1133 )
1134 return res;
1135 }
1136 EXPORT_SYMBOL(iov_iter_alignment);
1137
iov_iter_gap_alignment(const struct iov_iter * i)1138 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1139 {
1140 unsigned long res = 0;
1141 size_t size = i->count;
1142
1143 if (unlikely(i->type & ITER_PIPE)) {
1144 WARN_ON(1);
1145 return ~0U;
1146 }
1147
1148 iterate_all_kinds(i, size, v,
1149 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1150 (size != v.iov_len ? size : 0), 0),
1151 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1152 (size != v.bv_len ? size : 0)),
1153 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1154 (size != v.iov_len ? size : 0))
1155 );
1156 return res;
1157 }
1158 EXPORT_SYMBOL(iov_iter_gap_alignment);
1159
__pipe_get_pages(struct iov_iter * i,size_t maxsize,struct page ** pages,int idx,size_t * start)1160 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1161 size_t maxsize,
1162 struct page **pages,
1163 int idx,
1164 size_t *start)
1165 {
1166 struct pipe_inode_info *pipe = i->pipe;
1167 ssize_t n = push_pipe(i, maxsize, &idx, start);
1168 if (!n)
1169 return -EFAULT;
1170
1171 maxsize = n;
1172 n += *start;
1173 while (n > 0) {
1174 get_page(*pages++ = pipe->bufs[idx].page);
1175 idx = next_idx(idx, pipe);
1176 n -= PAGE_SIZE;
1177 }
1178
1179 return maxsize;
1180 }
1181
pipe_get_pages(struct iov_iter * i,struct page ** pages,size_t maxsize,unsigned maxpages,size_t * start)1182 static ssize_t pipe_get_pages(struct iov_iter *i,
1183 struct page **pages, size_t maxsize, unsigned maxpages,
1184 size_t *start)
1185 {
1186 unsigned npages;
1187 size_t capacity;
1188 int idx;
1189
1190 if (!maxsize)
1191 return 0;
1192
1193 if (!sanity(i))
1194 return -EFAULT;
1195
1196 data_start(i, &idx, start);
1197 /* some of this one + all after this one */
1198 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1199 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1200
1201 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1202 }
1203
iov_iter_get_pages(struct iov_iter * i,struct page ** pages,size_t maxsize,unsigned maxpages,size_t * start)1204 ssize_t iov_iter_get_pages(struct iov_iter *i,
1205 struct page **pages, size_t maxsize, unsigned maxpages,
1206 size_t *start)
1207 {
1208 if (maxsize > i->count)
1209 maxsize = i->count;
1210
1211 if (unlikely(i->type & ITER_PIPE))
1212 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1213 iterate_all_kinds(i, maxsize, v, ({
1214 unsigned long addr = (unsigned long)v.iov_base;
1215 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1216 int n;
1217 int res;
1218
1219 if (len > maxpages * PAGE_SIZE)
1220 len = maxpages * PAGE_SIZE;
1221 addr &= ~(PAGE_SIZE - 1);
1222 n = DIV_ROUND_UP(len, PAGE_SIZE);
1223 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1224 if (unlikely(res < 0))
1225 return res;
1226 return (res == n ? len : res * PAGE_SIZE) - *start;
1227 0;}),({
1228 /* can't be more than PAGE_SIZE */
1229 *start = v.bv_offset;
1230 get_page(*pages = v.bv_page);
1231 return v.bv_len;
1232 }),({
1233 return -EFAULT;
1234 })
1235 )
1236 return 0;
1237 }
1238 EXPORT_SYMBOL(iov_iter_get_pages);
1239
get_pages_array(size_t n)1240 static struct page **get_pages_array(size_t n)
1241 {
1242 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1243 }
1244
pipe_get_pages_alloc(struct iov_iter * i,struct page *** pages,size_t maxsize,size_t * start)1245 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1246 struct page ***pages, size_t maxsize,
1247 size_t *start)
1248 {
1249 struct page **p;
1250 ssize_t n;
1251 int idx;
1252 int npages;
1253
1254 if (!maxsize)
1255 return 0;
1256
1257 if (!sanity(i))
1258 return -EFAULT;
1259
1260 data_start(i, &idx, start);
1261 /* some of this one + all after this one */
1262 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1263 n = npages * PAGE_SIZE - *start;
1264 if (maxsize > n)
1265 maxsize = n;
1266 else
1267 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1268 p = get_pages_array(npages);
1269 if (!p)
1270 return -ENOMEM;
1271 n = __pipe_get_pages(i, maxsize, p, idx, start);
1272 if (n > 0)
1273 *pages = p;
1274 else
1275 kvfree(p);
1276 return n;
1277 }
1278
iov_iter_get_pages_alloc(struct iov_iter * i,struct page *** pages,size_t maxsize,size_t * start)1279 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1280 struct page ***pages, size_t maxsize,
1281 size_t *start)
1282 {
1283 struct page **p;
1284
1285 if (maxsize > i->count)
1286 maxsize = i->count;
1287
1288 if (unlikely(i->type & ITER_PIPE))
1289 return pipe_get_pages_alloc(i, pages, maxsize, start);
1290 iterate_all_kinds(i, maxsize, v, ({
1291 unsigned long addr = (unsigned long)v.iov_base;
1292 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1293 int n;
1294 int res;
1295
1296 addr &= ~(PAGE_SIZE - 1);
1297 n = DIV_ROUND_UP(len, PAGE_SIZE);
1298 p = get_pages_array(n);
1299 if (!p)
1300 return -ENOMEM;
1301 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1302 if (unlikely(res < 0)) {
1303 kvfree(p);
1304 return res;
1305 }
1306 *pages = p;
1307 return (res == n ? len : res * PAGE_SIZE) - *start;
1308 0;}),({
1309 /* can't be more than PAGE_SIZE */
1310 *start = v.bv_offset;
1311 *pages = p = get_pages_array(1);
1312 if (!p)
1313 return -ENOMEM;
1314 get_page(*p = v.bv_page);
1315 return v.bv_len;
1316 }),({
1317 return -EFAULT;
1318 })
1319 )
1320 return 0;
1321 }
1322 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1323
csum_and_copy_from_iter(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)1324 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1325 struct iov_iter *i)
1326 {
1327 char *to = addr;
1328 __wsum sum, next;
1329 size_t off = 0;
1330 sum = *csum;
1331 if (unlikely(i->type & ITER_PIPE)) {
1332 WARN_ON(1);
1333 return 0;
1334 }
1335 iterate_and_advance(i, bytes, v, ({
1336 int err = 0;
1337 next = csum_and_copy_from_user(v.iov_base,
1338 (to += v.iov_len) - v.iov_len,
1339 v.iov_len, 0, &err);
1340 if (!err) {
1341 sum = csum_block_add(sum, next, off);
1342 off += v.iov_len;
1343 }
1344 err ? v.iov_len : 0;
1345 }), ({
1346 char *p = kmap_atomic(v.bv_page);
1347 next = csum_partial_copy_nocheck(p + v.bv_offset,
1348 (to += v.bv_len) - v.bv_len,
1349 v.bv_len, 0);
1350 kunmap_atomic(p);
1351 sum = csum_block_add(sum, next, off);
1352 off += v.bv_len;
1353 }),({
1354 next = csum_partial_copy_nocheck(v.iov_base,
1355 (to += v.iov_len) - v.iov_len,
1356 v.iov_len, 0);
1357 sum = csum_block_add(sum, next, off);
1358 off += v.iov_len;
1359 })
1360 )
1361 *csum = sum;
1362 return bytes;
1363 }
1364 EXPORT_SYMBOL(csum_and_copy_from_iter);
1365
csum_and_copy_from_iter_full(void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)1366 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1367 struct iov_iter *i)
1368 {
1369 char *to = addr;
1370 __wsum sum, next;
1371 size_t off = 0;
1372 sum = *csum;
1373 if (unlikely(i->type & ITER_PIPE)) {
1374 WARN_ON(1);
1375 return false;
1376 }
1377 if (unlikely(i->count < bytes))
1378 return false;
1379 iterate_all_kinds(i, bytes, v, ({
1380 int err = 0;
1381 next = csum_and_copy_from_user(v.iov_base,
1382 (to += v.iov_len) - v.iov_len,
1383 v.iov_len, 0, &err);
1384 if (err)
1385 return false;
1386 sum = csum_block_add(sum, next, off);
1387 off += v.iov_len;
1388 0;
1389 }), ({
1390 char *p = kmap_atomic(v.bv_page);
1391 next = csum_partial_copy_nocheck(p + v.bv_offset,
1392 (to += v.bv_len) - v.bv_len,
1393 v.bv_len, 0);
1394 kunmap_atomic(p);
1395 sum = csum_block_add(sum, next, off);
1396 off += v.bv_len;
1397 }),({
1398 next = csum_partial_copy_nocheck(v.iov_base,
1399 (to += v.iov_len) - v.iov_len,
1400 v.iov_len, 0);
1401 sum = csum_block_add(sum, next, off);
1402 off += v.iov_len;
1403 })
1404 )
1405 *csum = sum;
1406 iov_iter_advance(i, bytes);
1407 return true;
1408 }
1409 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1410
csum_and_copy_to_iter(const void * addr,size_t bytes,__wsum * csum,struct iov_iter * i)1411 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1412 struct iov_iter *i)
1413 {
1414 const char *from = addr;
1415 __wsum sum, next;
1416 size_t off = 0;
1417 sum = *csum;
1418 if (unlikely(i->type & ITER_PIPE)) {
1419 WARN_ON(1); /* for now */
1420 return 0;
1421 }
1422 iterate_and_advance(i, bytes, v, ({
1423 int err = 0;
1424 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1425 v.iov_base,
1426 v.iov_len, 0, &err);
1427 if (!err) {
1428 sum = csum_block_add(sum, next, off);
1429 off += v.iov_len;
1430 }
1431 err ? v.iov_len : 0;
1432 }), ({
1433 char *p = kmap_atomic(v.bv_page);
1434 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1435 p + v.bv_offset,
1436 v.bv_len, 0);
1437 kunmap_atomic(p);
1438 sum = csum_block_add(sum, next, off);
1439 off += v.bv_len;
1440 }),({
1441 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1442 v.iov_base,
1443 v.iov_len, 0);
1444 sum = csum_block_add(sum, next, off);
1445 off += v.iov_len;
1446 })
1447 )
1448 *csum = sum;
1449 return bytes;
1450 }
1451 EXPORT_SYMBOL(csum_and_copy_to_iter);
1452
iov_iter_npages(const struct iov_iter * i,int maxpages)1453 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1454 {
1455 size_t size = i->count;
1456 int npages = 0;
1457
1458 if (!size)
1459 return 0;
1460
1461 if (unlikely(i->type & ITER_PIPE)) {
1462 struct pipe_inode_info *pipe = i->pipe;
1463 size_t off;
1464 int idx;
1465
1466 if (!sanity(i))
1467 return 0;
1468
1469 data_start(i, &idx, &off);
1470 /* some of this one + all after this one */
1471 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1472 if (npages >= maxpages)
1473 return maxpages;
1474 } else iterate_all_kinds(i, size, v, ({
1475 unsigned long p = (unsigned long)v.iov_base;
1476 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1477 - p / PAGE_SIZE;
1478 if (npages >= maxpages)
1479 return maxpages;
1480 0;}),({
1481 npages++;
1482 if (npages >= maxpages)
1483 return maxpages;
1484 }),({
1485 unsigned long p = (unsigned long)v.iov_base;
1486 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1487 - p / PAGE_SIZE;
1488 if (npages >= maxpages)
1489 return maxpages;
1490 })
1491 )
1492 return npages;
1493 }
1494 EXPORT_SYMBOL(iov_iter_npages);
1495
dup_iter(struct iov_iter * new,struct iov_iter * old,gfp_t flags)1496 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1497 {
1498 *new = *old;
1499 if (unlikely(new->type & ITER_PIPE)) {
1500 WARN_ON(1);
1501 return NULL;
1502 }
1503 if (new->type & ITER_BVEC)
1504 return new->bvec = kmemdup(new->bvec,
1505 new->nr_segs * sizeof(struct bio_vec),
1506 flags);
1507 else
1508 /* iovec and kvec have identical layout */
1509 return new->iov = kmemdup(new->iov,
1510 new->nr_segs * sizeof(struct iovec),
1511 flags);
1512 }
1513 EXPORT_SYMBOL(dup_iter);
1514
1515 /**
1516 * import_iovec() - Copy an array of &struct iovec from userspace
1517 * into the kernel, check that it is valid, and initialize a new
1518 * &struct iov_iter iterator to access it.
1519 *
1520 * @type: One of %READ or %WRITE.
1521 * @uvector: Pointer to the userspace array.
1522 * @nr_segs: Number of elements in userspace array.
1523 * @fast_segs: Number of elements in @iov.
1524 * @iov: (input and output parameter) Pointer to pointer to (usually small
1525 * on-stack) kernel array.
1526 * @i: Pointer to iterator that will be initialized on success.
1527 *
1528 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1529 * then this function places %NULL in *@iov on return. Otherwise, a new
1530 * array will be allocated and the result placed in *@iov. This means that
1531 * the caller may call kfree() on *@iov regardless of whether the small
1532 * on-stack array was used or not (and regardless of whether this function
1533 * returns an error or not).
1534 *
1535 * Return: 0 on success or negative error code on error.
1536 */
import_iovec(int type,const struct iovec __user * uvector,unsigned nr_segs,unsigned fast_segs,struct iovec ** iov,struct iov_iter * i)1537 int import_iovec(int type, const struct iovec __user * uvector,
1538 unsigned nr_segs, unsigned fast_segs,
1539 struct iovec **iov, struct iov_iter *i)
1540 {
1541 ssize_t n;
1542 struct iovec *p;
1543 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1544 *iov, &p);
1545 if (n < 0) {
1546 if (p != *iov)
1547 kfree(p);
1548 *iov = NULL;
1549 return n;
1550 }
1551 iov_iter_init(i, type, p, nr_segs, n);
1552 *iov = p == *iov ? NULL : p;
1553 return 0;
1554 }
1555 EXPORT_SYMBOL(import_iovec);
1556
1557 #ifdef CONFIG_COMPAT
1558 #include <linux/compat.h>
1559
compat_import_iovec(int type,const struct compat_iovec __user * uvector,unsigned nr_segs,unsigned fast_segs,struct iovec ** iov,struct iov_iter * i)1560 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1561 unsigned nr_segs, unsigned fast_segs,
1562 struct iovec **iov, struct iov_iter *i)
1563 {
1564 ssize_t n;
1565 struct iovec *p;
1566 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1567 *iov, &p);
1568 if (n < 0) {
1569 if (p != *iov)
1570 kfree(p);
1571 *iov = NULL;
1572 return n;
1573 }
1574 iov_iter_init(i, type, p, nr_segs, n);
1575 *iov = p == *iov ? NULL : p;
1576 return 0;
1577 }
1578 #endif
1579
import_single_range(int rw,void __user * buf,size_t len,struct iovec * iov,struct iov_iter * i)1580 int import_single_range(int rw, void __user *buf, size_t len,
1581 struct iovec *iov, struct iov_iter *i)
1582 {
1583 if (len > MAX_RW_COUNT)
1584 len = MAX_RW_COUNT;
1585 if (unlikely(!access_ok(!rw, buf, len)))
1586 return -EFAULT;
1587
1588 iov->iov_base = buf;
1589 iov->iov_len = len;
1590 iov_iter_init(i, rw, iov, 1, len);
1591 return 0;
1592 }
1593 EXPORT_SYMBOL(import_single_range);
1594
iov_iter_for_each_range(struct iov_iter * i,size_t bytes,int (* f)(struct kvec * vec,void * context),void * context)1595 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1596 int (*f)(struct kvec *vec, void *context),
1597 void *context)
1598 {
1599 struct kvec w;
1600 int err = -EINVAL;
1601 if (!bytes)
1602 return 0;
1603
1604 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1605 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1606 w.iov_len = v.bv_len;
1607 err = f(&w, context);
1608 kunmap(v.bv_page);
1609 err;}), ({
1610 w = v;
1611 err = f(&w, context);})
1612 )
1613 return err;
1614 }
1615 EXPORT_SYMBOL(iov_iter_for_each_range);
1616