1 /*
2 * Berkeley style UIO structures - Alan Cox 1994.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9 #ifndef __LINUX_UIO_H
10 #define __LINUX_UIO_H
11
12 #include <linux/kernel.h>
13 #include <linux/thread_info.h>
14 #include <uapi/linux/uio.h>
15
16 struct page;
17 struct pipe_inode_info;
18
19 struct kvec {
20 void *iov_base; /* and that should *never* hold a userland pointer */
21 size_t iov_len;
22 };
23
24 enum {
25 ITER_IOVEC = 0,
26 ITER_KVEC = 2,
27 ITER_BVEC = 4,
28 ITER_PIPE = 8,
29 };
30
31 struct iov_iter {
32 int type;
33 size_t iov_offset;
34 size_t count;
35 union {
36 const struct iovec *iov;
37 const struct kvec *kvec;
38 const struct bio_vec *bvec;
39 struct pipe_inode_info *pipe;
40 };
41 union {
42 unsigned long nr_segs;
43 struct {
44 int idx;
45 int start_idx;
46 };
47 };
48 };
49
50 /*
51 * Total number of bytes covered by an iovec.
52 *
53 * NOTE that it is not safe to use this function until all the iovec's
54 * segment lengths have been validated. Because the individual lengths can
55 * overflow a size_t when added together.
56 */
iov_length(const struct iovec * iov,unsigned long nr_segs)57 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
58 {
59 unsigned long seg;
60 size_t ret = 0;
61
62 for (seg = 0; seg < nr_segs; seg++)
63 ret += iov[seg].iov_len;
64 return ret;
65 }
66
iov_iter_iovec(const struct iov_iter * iter)67 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
68 {
69 return (struct iovec) {
70 .iov_base = iter->iov->iov_base + iter->iov_offset,
71 .iov_len = min(iter->count,
72 iter->iov->iov_len - iter->iov_offset),
73 };
74 }
75
76 #define iov_for_each(iov, iter, start) \
77 if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
78 for (iter = (start); \
79 (iter).count && \
80 ((iov = iov_iter_iovec(&(iter))), 1); \
81 iov_iter_advance(&(iter), (iov).iov_len))
82
83 size_t iov_iter_copy_from_user_atomic(struct page *page,
84 struct iov_iter *i, unsigned long offset, size_t bytes);
85 void iov_iter_advance(struct iov_iter *i, size_t bytes);
86 void iov_iter_revert(struct iov_iter *i, size_t bytes);
87 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
88 size_t iov_iter_single_seg_count(const struct iov_iter *i);
89 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
90 struct iov_iter *i);
91 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
92 struct iov_iter *i);
93
94 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
95 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
96 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
97 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
98 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
99
100 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)101 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
102 {
103 if (unlikely(!check_copy_size(addr, bytes, true)))
104 return 0;
105 else
106 return _copy_to_iter(addr, bytes, i);
107 }
108
109 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)110 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
111 {
112 if (unlikely(!check_copy_size(addr, bytes, false)))
113 return 0;
114 else
115 return _copy_from_iter(addr, bytes, i);
116 }
117
118 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)119 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
120 {
121 if (unlikely(!check_copy_size(addr, bytes, false)))
122 return false;
123 else
124 return _copy_from_iter_full(addr, bytes, i);
125 }
126
127 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)128 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
129 {
130 if (unlikely(!check_copy_size(addr, bytes, false)))
131 return 0;
132 else
133 return _copy_from_iter_nocache(addr, bytes, i);
134 }
135
136 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)137 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
138 {
139 if (unlikely(!check_copy_size(addr, bytes, false)))
140 return false;
141 else
142 return _copy_from_iter_full_nocache(addr, bytes, i);
143 }
144
145 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
146 /*
147 * Note, users like pmem that depend on the stricter semantics of
148 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
149 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
150 * destination is flushed from the cache on return.
151 */
152 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
153 #else
154 #define _copy_from_iter_flushcache _copy_from_iter_nocache
155 #endif
156
157 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
158 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
159 #else
160 #define _copy_to_iter_mcsafe _copy_to_iter
161 #endif
162
163 static __always_inline __must_check
copy_from_iter_flushcache(void * addr,size_t bytes,struct iov_iter * i)164 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
165 {
166 if (unlikely(!check_copy_size(addr, bytes, false)))
167 return 0;
168 else
169 return _copy_from_iter_flushcache(addr, bytes, i);
170 }
171
172 static __always_inline __must_check
copy_to_iter_mcsafe(void * addr,size_t bytes,struct iov_iter * i)173 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174 {
175 if (unlikely(!check_copy_size(addr, bytes, true)))
176 return 0;
177 else
178 return _copy_to_iter_mcsafe(addr, bytes, i);
179 }
180
181 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
182 unsigned long iov_iter_alignment(const struct iov_iter *i);
183 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
184 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
185 unsigned long nr_segs, size_t count);
186 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
187 unsigned long nr_segs, size_t count);
188 void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
189 unsigned long nr_segs, size_t count);
190 void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
191 size_t count);
192 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
193 size_t maxsize, unsigned maxpages, size_t *start);
194 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
195 size_t maxsize, size_t *start);
196 int iov_iter_npages(const struct iov_iter *i, int maxpages);
197
198 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
199
iov_iter_count(const struct iov_iter * i)200 static inline size_t iov_iter_count(const struct iov_iter *i)
201 {
202 return i->count;
203 }
204
iter_is_iovec(const struct iov_iter * i)205 static inline bool iter_is_iovec(const struct iov_iter *i)
206 {
207 return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
208 }
209
210 /*
211 * Get one of READ or WRITE out of iter->type without any other flags OR'd in
212 * with it.
213 *
214 * The ?: is just for type safety.
215 */
216 #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
217
218 /*
219 * Cap the iov_iter by given limit; note that the second argument is
220 * *not* the new size - it's upper limit for such. Passing it a value
221 * greater than the amount of data in iov_iter is fine - it'll just do
222 * nothing in that case.
223 */
iov_iter_truncate(struct iov_iter * i,u64 count)224 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
225 {
226 /*
227 * count doesn't have to fit in size_t - comparison extends both
228 * operands to u64 here and any value that would be truncated by
229 * conversion in assignement is by definition greater than all
230 * values of size_t, including old i->count.
231 */
232 if (i->count > count)
233 i->count = count;
234 }
235
236 /*
237 * reexpand a previously truncated iterator; count must be no more than how much
238 * we had shrunk it.
239 */
iov_iter_reexpand(struct iov_iter * i,size_t count)240 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
241 {
242 i->count = count;
243 }
244 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
245 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
246 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
247
248 int import_iovec(int type, const struct iovec __user * uvector,
249 unsigned nr_segs, unsigned fast_segs,
250 struct iovec **iov, struct iov_iter *i);
251
252 #ifdef CONFIG_COMPAT
253 struct compat_iovec;
254 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
255 unsigned nr_segs, unsigned fast_segs,
256 struct iovec **iov, struct iov_iter *i);
257 #endif
258
259 int import_single_range(int type, void __user *buf, size_t len,
260 struct iovec *iov, struct iov_iter *i);
261
262 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
263 int (*f)(struct kvec *vec, void *context),
264 void *context);
265
266 #endif
267