1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15 #include "cache.h"
16
ceph_flags_sys2wire(u32 flags)17 static __le32 ceph_flags_sys2wire(u32 flags)
18 {
19 u32 wire_flags = 0;
20
21 switch (flags & O_ACCMODE) {
22 case O_RDONLY:
23 wire_flags |= CEPH_O_RDONLY;
24 break;
25 case O_WRONLY:
26 wire_flags |= CEPH_O_WRONLY;
27 break;
28 case O_RDWR:
29 wire_flags |= CEPH_O_RDWR;
30 break;
31 }
32
33 flags &= ~O_ACCMODE;
34
35 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
36
37 ceph_sys2wire(O_CREAT);
38 ceph_sys2wire(O_EXCL);
39 ceph_sys2wire(O_TRUNC);
40 ceph_sys2wire(O_DIRECTORY);
41 ceph_sys2wire(O_NOFOLLOW);
42
43 #undef ceph_sys2wire
44
45 if (flags)
46 dout("unused open flags: %x\n", flags);
47
48 return cpu_to_le32(wire_flags);
49 }
50
51 /*
52 * Ceph file operations
53 *
54 * Implement basic open/close functionality, and implement
55 * read/write.
56 *
57 * We implement three modes of file I/O:
58 * - buffered uses the generic_file_aio_{read,write} helpers
59 *
60 * - synchronous is used when there is multi-client read/write
61 * sharing, avoids the page cache, and synchronously waits for an
62 * ack from the OSD.
63 *
64 * - direct io takes the variant of the sync path that references
65 * user pages directly.
66 *
67 * fsync() flushes and waits on dirty pages, but just queues metadata
68 * for writeback: since the MDS can recover size and mtime there is no
69 * need to wait for MDS acknowledgement.
70 */
71
72 /*
73 * How many pages to get in one call to iov_iter_get_pages(). This
74 * determines the size of the on-stack array used as a buffer.
75 */
76 #define ITER_GET_BVECS_PAGES 64
77
__iter_get_bvecs(struct iov_iter * iter,size_t maxsize,struct bio_vec * bvecs)78 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
79 struct bio_vec *bvecs)
80 {
81 size_t size = 0;
82 int bvec_idx = 0;
83
84 if (maxsize > iov_iter_count(iter))
85 maxsize = iov_iter_count(iter);
86
87 while (size < maxsize) {
88 struct page *pages[ITER_GET_BVECS_PAGES];
89 ssize_t bytes;
90 size_t start;
91 int idx = 0;
92
93 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
94 ITER_GET_BVECS_PAGES, &start);
95 if (bytes < 0)
96 return size ?: bytes;
97
98 iov_iter_advance(iter, bytes);
99 size += bytes;
100
101 for ( ; bytes; idx++, bvec_idx++) {
102 struct bio_vec bv = {
103 .bv_page = pages[idx],
104 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
105 .bv_offset = start,
106 };
107
108 bvecs[bvec_idx] = bv;
109 bytes -= bv.bv_len;
110 start = 0;
111 }
112 }
113
114 return size;
115 }
116
117 /*
118 * iov_iter_get_pages() only considers one iov_iter segment, no matter
119 * what maxsize or maxpages are given. For ITER_BVEC that is a single
120 * page.
121 *
122 * Attempt to get up to @maxsize bytes worth of pages from @iter.
123 * Return the number of bytes in the created bio_vec array, or an error.
124 */
iter_get_bvecs_alloc(struct iov_iter * iter,size_t maxsize,struct bio_vec ** bvecs,int * num_bvecs)125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126 struct bio_vec **bvecs, int *num_bvecs)
127 {
128 struct bio_vec *bv;
129 size_t orig_count = iov_iter_count(iter);
130 ssize_t bytes;
131 int npages;
132
133 iov_iter_truncate(iter, maxsize);
134 npages = iov_iter_npages(iter, INT_MAX);
135 iov_iter_reexpand(iter, orig_count);
136
137 /*
138 * __iter_get_bvecs() may populate only part of the array -- zero it
139 * out.
140 */
141 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
142 if (!bv)
143 return -ENOMEM;
144
145 bytes = __iter_get_bvecs(iter, maxsize, bv);
146 if (bytes < 0) {
147 /*
148 * No pages were pinned -- just free the array.
149 */
150 kvfree(bv);
151 return bytes;
152 }
153
154 *bvecs = bv;
155 *num_bvecs = npages;
156 return bytes;
157 }
158
put_bvecs(struct bio_vec * bvecs,int num_bvecs,bool should_dirty)159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
160 {
161 int i;
162
163 for (i = 0; i < num_bvecs; i++) {
164 if (bvecs[i].bv_page) {
165 if (should_dirty)
166 set_page_dirty_lock(bvecs[i].bv_page);
167 put_page(bvecs[i].bv_page);
168 }
169 }
170 kvfree(bvecs);
171 }
172
173 /*
174 * Prepare an open request. Preallocate ceph_cap to avoid an
175 * inopportune ENOMEM later.
176 */
177 static struct ceph_mds_request *
prepare_open_request(struct super_block * sb,int flags,int create_mode)178 prepare_open_request(struct super_block *sb, int flags, int create_mode)
179 {
180 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
181 struct ceph_mds_client *mdsc = fsc->mdsc;
182 struct ceph_mds_request *req;
183 int want_auth = USE_ANY_MDS;
184 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
185
186 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
187 want_auth = USE_AUTH_MDS;
188
189 req = ceph_mdsc_create_request(mdsc, op, want_auth);
190 if (IS_ERR(req))
191 goto out;
192 req->r_fmode = ceph_flags_to_mode(flags);
193 req->r_args.open.flags = ceph_flags_sys2wire(flags);
194 req->r_args.open.mode = cpu_to_le32(create_mode);
195 out:
196 return req;
197 }
198
ceph_init_file_info(struct inode * inode,struct file * file,int fmode,bool isdir)199 static int ceph_init_file_info(struct inode *inode, struct file *file,
200 int fmode, bool isdir)
201 {
202 struct ceph_file_info *fi;
203
204 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
205 inode->i_mode, isdir ? "dir" : "regular");
206 BUG_ON(inode->i_fop->release != ceph_release);
207
208 if (isdir) {
209 struct ceph_dir_file_info *dfi =
210 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
211 if (!dfi) {
212 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
213 return -ENOMEM;
214 }
215
216 file->private_data = dfi;
217 fi = &dfi->file_info;
218 dfi->next_offset = 2;
219 dfi->readdir_cache_idx = -1;
220 } else {
221 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
222 if (!fi) {
223 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
224 return -ENOMEM;
225 }
226
227 file->private_data = fi;
228 }
229
230 fi->fmode = fmode;
231 spin_lock_init(&fi->rw_contexts_lock);
232 INIT_LIST_HEAD(&fi->rw_contexts);
233
234 return 0;
235 }
236
237 /*
238 * initialize private struct file data.
239 * if we fail, clean up by dropping fmode reference on the ceph_inode
240 */
ceph_init_file(struct inode * inode,struct file * file,int fmode)241 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
242 {
243 int ret = 0;
244
245 switch (inode->i_mode & S_IFMT) {
246 case S_IFREG:
247 ceph_fscache_register_inode_cookie(inode);
248 ceph_fscache_file_set_cookie(inode, file);
249 case S_IFDIR:
250 ret = ceph_init_file_info(inode, file, fmode,
251 S_ISDIR(inode->i_mode));
252 if (ret)
253 return ret;
254 break;
255
256 case S_IFLNK:
257 dout("init_file %p %p 0%o (symlink)\n", inode, file,
258 inode->i_mode);
259 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
260 break;
261
262 default:
263 dout("init_file %p %p 0%o (special)\n", inode, file,
264 inode->i_mode);
265 /*
266 * we need to drop the open ref now, since we don't
267 * have .release set to ceph_release.
268 */
269 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
270 BUG_ON(inode->i_fop->release == ceph_release);
271
272 /* call the proper open fop */
273 ret = inode->i_fop->open(inode, file);
274 }
275 return ret;
276 }
277
278 /*
279 * try renew caps after session gets killed.
280 */
ceph_renew_caps(struct inode * inode)281 int ceph_renew_caps(struct inode *inode)
282 {
283 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
284 struct ceph_inode_info *ci = ceph_inode(inode);
285 struct ceph_mds_request *req;
286 int err, flags, wanted;
287
288 spin_lock(&ci->i_ceph_lock);
289 wanted = __ceph_caps_file_wanted(ci);
290 if (__ceph_is_any_real_caps(ci) &&
291 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
292 int issued = __ceph_caps_issued(ci, NULL);
293 spin_unlock(&ci->i_ceph_lock);
294 dout("renew caps %p want %s issued %s updating mds_wanted\n",
295 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
296 ceph_check_caps(ci, 0, NULL);
297 return 0;
298 }
299 spin_unlock(&ci->i_ceph_lock);
300
301 flags = 0;
302 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
303 flags = O_RDWR;
304 else if (wanted & CEPH_CAP_FILE_RD)
305 flags = O_RDONLY;
306 else if (wanted & CEPH_CAP_FILE_WR)
307 flags = O_WRONLY;
308 #ifdef O_LAZY
309 if (wanted & CEPH_CAP_FILE_LAZYIO)
310 flags |= O_LAZY;
311 #endif
312
313 req = prepare_open_request(inode->i_sb, flags, 0);
314 if (IS_ERR(req)) {
315 err = PTR_ERR(req);
316 goto out;
317 }
318
319 req->r_inode = inode;
320 ihold(inode);
321 req->r_num_caps = 1;
322 req->r_fmode = -1;
323
324 err = ceph_mdsc_do_request(mdsc, NULL, req);
325 ceph_mdsc_put_request(req);
326 out:
327 dout("renew caps %p open result=%d\n", inode, err);
328 return err < 0 ? err : 0;
329 }
330
331 /*
332 * If we already have the requisite capabilities, we can satisfy
333 * the open request locally (no need to request new caps from the
334 * MDS). We do, however, need to inform the MDS (asynchronously)
335 * if our wanted caps set expands.
336 */
ceph_open(struct inode * inode,struct file * file)337 int ceph_open(struct inode *inode, struct file *file)
338 {
339 struct ceph_inode_info *ci = ceph_inode(inode);
340 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
341 struct ceph_mds_client *mdsc = fsc->mdsc;
342 struct ceph_mds_request *req;
343 struct ceph_file_info *fi = file->private_data;
344 int err;
345 int flags, fmode, wanted;
346
347 if (fi) {
348 dout("open file %p is already opened\n", file);
349 return 0;
350 }
351
352 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
353 flags = file->f_flags & ~(O_CREAT|O_EXCL);
354 if (S_ISDIR(inode->i_mode))
355 flags = O_DIRECTORY; /* mds likes to know */
356
357 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
358 ceph_vinop(inode), file, flags, file->f_flags);
359 fmode = ceph_flags_to_mode(flags);
360 wanted = ceph_caps_for_mode(fmode);
361
362 /* snapped files are read-only */
363 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
364 return -EROFS;
365
366 /* trivially open snapdir */
367 if (ceph_snap(inode) == CEPH_SNAPDIR) {
368 spin_lock(&ci->i_ceph_lock);
369 __ceph_get_fmode(ci, fmode);
370 spin_unlock(&ci->i_ceph_lock);
371 return ceph_init_file(inode, file, fmode);
372 }
373
374 /*
375 * No need to block if we have caps on the auth MDS (for
376 * write) or any MDS (for read). Update wanted set
377 * asynchronously.
378 */
379 spin_lock(&ci->i_ceph_lock);
380 if (__ceph_is_any_real_caps(ci) &&
381 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
382 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
383 int issued = __ceph_caps_issued(ci, NULL);
384
385 dout("open %p fmode %d want %s issued %s using existing\n",
386 inode, fmode, ceph_cap_string(wanted),
387 ceph_cap_string(issued));
388 __ceph_get_fmode(ci, fmode);
389 spin_unlock(&ci->i_ceph_lock);
390
391 /* adjust wanted? */
392 if ((issued & wanted) != wanted &&
393 (mds_wanted & wanted) != wanted &&
394 ceph_snap(inode) != CEPH_SNAPDIR)
395 ceph_check_caps(ci, 0, NULL);
396
397 return ceph_init_file(inode, file, fmode);
398 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
399 (ci->i_snap_caps & wanted) == wanted) {
400 __ceph_get_fmode(ci, fmode);
401 spin_unlock(&ci->i_ceph_lock);
402 return ceph_init_file(inode, file, fmode);
403 }
404
405 spin_unlock(&ci->i_ceph_lock);
406
407 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
408 req = prepare_open_request(inode->i_sb, flags, 0);
409 if (IS_ERR(req)) {
410 err = PTR_ERR(req);
411 goto out;
412 }
413 req->r_inode = inode;
414 ihold(inode);
415
416 req->r_num_caps = 1;
417 err = ceph_mdsc_do_request(mdsc, NULL, req);
418 if (!err)
419 err = ceph_init_file(inode, file, req->r_fmode);
420 ceph_mdsc_put_request(req);
421 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
422 out:
423 return err;
424 }
425
426
427 /*
428 * Do a lookup + open with a single request. If we get a non-existent
429 * file or symlink, return 1 so the VFS can retry.
430 */
ceph_atomic_open(struct inode * dir,struct dentry * dentry,struct file * file,unsigned flags,umode_t mode)431 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
432 struct file *file, unsigned flags, umode_t mode)
433 {
434 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
435 struct ceph_mds_client *mdsc = fsc->mdsc;
436 struct ceph_mds_request *req;
437 struct dentry *dn;
438 struct ceph_acls_info acls = {};
439 int mask;
440 int err;
441
442 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
443 dir, dentry, dentry,
444 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
445
446 if (dentry->d_name.len > NAME_MAX)
447 return -ENAMETOOLONG;
448
449 /*
450 * Do not truncate the file, since atomic_open is called before the
451 * permission check. The caller will do the truncation afterward.
452 */
453 flags &= ~O_TRUNC;
454
455 if (flags & O_CREAT) {
456 if (ceph_quota_is_max_files_exceeded(dir))
457 return -EDQUOT;
458 err = ceph_pre_init_acls(dir, &mode, &acls);
459 if (err < 0)
460 return err;
461 }
462
463 /* do the open */
464 req = prepare_open_request(dir->i_sb, flags, mode);
465 if (IS_ERR(req)) {
466 err = PTR_ERR(req);
467 goto out_acl;
468 }
469 req->r_dentry = dget(dentry);
470 req->r_num_caps = 2;
471 if (flags & O_CREAT) {
472 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
473 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
474 if (acls.pagelist) {
475 req->r_pagelist = acls.pagelist;
476 acls.pagelist = NULL;
477 }
478 }
479
480 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
481 if (ceph_security_xattr_wanted(dir))
482 mask |= CEPH_CAP_XATTR_SHARED;
483 req->r_args.open.mask = cpu_to_le32(mask);
484
485 req->r_parent = dir;
486 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
487 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
488 err = ceph_handle_snapdir(req, dentry, err);
489 if (err)
490 goto out_req;
491
492 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
493 err = ceph_handle_notrace_create(dir, dentry);
494
495 if (d_in_lookup(dentry)) {
496 dn = ceph_finish_lookup(req, dentry, err);
497 if (IS_ERR(dn))
498 err = PTR_ERR(dn);
499 } else {
500 /* we were given a hashed negative dentry */
501 dn = NULL;
502 }
503 if (err)
504 goto out_req;
505 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
506 /* make vfs retry on splice, ENOENT, or symlink */
507 dout("atomic_open finish_no_open on dn %p\n", dn);
508 err = finish_no_open(file, dn);
509 } else {
510 dout("atomic_open finish_open on dn %p\n", dn);
511 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
512 ceph_init_inode_acls(d_inode(dentry), &acls);
513 file->f_mode |= FMODE_CREATED;
514 }
515 err = finish_open(file, dentry, ceph_open);
516 }
517 out_req:
518 if (!req->r_err && req->r_target_inode)
519 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
520 ceph_mdsc_put_request(req);
521 out_acl:
522 ceph_release_acls_info(&acls);
523 dout("atomic_open result=%d\n", err);
524 return err;
525 }
526
ceph_release(struct inode * inode,struct file * file)527 int ceph_release(struct inode *inode, struct file *file)
528 {
529 struct ceph_inode_info *ci = ceph_inode(inode);
530
531 if (S_ISDIR(inode->i_mode)) {
532 struct ceph_dir_file_info *dfi = file->private_data;
533 dout("release inode %p dir file %p\n", inode, file);
534 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
535
536 ceph_put_fmode(ci, dfi->file_info.fmode);
537
538 if (dfi->last_readdir)
539 ceph_mdsc_put_request(dfi->last_readdir);
540 kfree(dfi->last_name);
541 kfree(dfi->dir_info);
542 kmem_cache_free(ceph_dir_file_cachep, dfi);
543 } else {
544 struct ceph_file_info *fi = file->private_data;
545 dout("release inode %p regular file %p\n", inode, file);
546 WARN_ON(!list_empty(&fi->rw_contexts));
547
548 ceph_put_fmode(ci, fi->fmode);
549 kmem_cache_free(ceph_file_cachep, fi);
550 }
551
552 /* wake up anyone waiting for caps on this inode */
553 wake_up_all(&ci->i_cap_wq);
554 return 0;
555 }
556
557 enum {
558 HAVE_RETRIED = 1,
559 CHECK_EOF = 2,
560 READ_INLINE = 3,
561 };
562
563 /*
564 * Read a range of bytes striped over one or more objects. Iterate over
565 * objects we stripe over. (That's not atomic, but good enough for now.)
566 *
567 * If we get a short result from the OSD, check against i_size; we need to
568 * only return a short read to the caller if we hit EOF.
569 */
striped_read(struct inode * inode,u64 pos,u64 len,struct page ** pages,int num_pages,int page_align,int * checkeof)570 static int striped_read(struct inode *inode,
571 u64 pos, u64 len,
572 struct page **pages, int num_pages,
573 int page_align, int *checkeof)
574 {
575 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
576 struct ceph_inode_info *ci = ceph_inode(inode);
577 u64 this_len;
578 loff_t i_size;
579 int page_idx;
580 int ret, read = 0;
581 bool hit_stripe, was_short;
582
583 /*
584 * we may need to do multiple reads. not atomic, unfortunately.
585 */
586 more:
587 this_len = len;
588 page_idx = (page_align + read) >> PAGE_SHIFT;
589 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
590 &ci->i_layout, pos, &this_len,
591 ci->i_truncate_seq, ci->i_truncate_size,
592 pages + page_idx, num_pages - page_idx,
593 ((page_align + read) & ~PAGE_MASK));
594 if (ret == -ENOENT)
595 ret = 0;
596 hit_stripe = this_len < len;
597 was_short = ret >= 0 && ret < this_len;
598 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
599 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
600
601 i_size = i_size_read(inode);
602 if (ret >= 0) {
603 if (was_short && (pos + ret < i_size)) {
604 int zlen = min(this_len - ret, i_size - pos - ret);
605 int zoff = page_align + read + ret;
606 dout(" zero gap %llu to %llu\n",
607 pos + ret, pos + ret + zlen);
608 ceph_zero_page_vector_range(zoff, zlen, pages);
609 ret += zlen;
610 }
611
612 read += ret;
613 pos += ret;
614 len -= ret;
615
616 /* hit stripe and need continue*/
617 if (len && hit_stripe && pos < i_size)
618 goto more;
619 }
620
621 if (read > 0) {
622 ret = read;
623 /* did we bounce off eof? */
624 if (pos + len > i_size)
625 *checkeof = CHECK_EOF;
626 }
627
628 dout("striped_read returns %d\n", ret);
629 return ret;
630 }
631
632 /*
633 * Completely synchronous read and write methods. Direct from __user
634 * buffer to osd, or directly to user pages (if O_DIRECT).
635 *
636 * If the read spans object boundary, just do multiple reads.
637 */
ceph_sync_read(struct kiocb * iocb,struct iov_iter * to,int * checkeof)638 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
639 int *checkeof)
640 {
641 struct file *file = iocb->ki_filp;
642 struct inode *inode = file_inode(file);
643 struct page **pages;
644 u64 off = iocb->ki_pos;
645 int num_pages;
646 ssize_t ret;
647 size_t len = iov_iter_count(to);
648
649 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
650 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
651
652 if (!len)
653 return 0;
654 /*
655 * flush any page cache pages in this range. this
656 * will make concurrent normal and sync io slow,
657 * but it will at least behave sensibly when they are
658 * in sequence.
659 */
660 ret = filemap_write_and_wait_range(inode->i_mapping, off,
661 off + len);
662 if (ret < 0)
663 return ret;
664
665 if (unlikely(to->type & ITER_PIPE)) {
666 size_t page_off;
667 ret = iov_iter_get_pages_alloc(to, &pages, len,
668 &page_off);
669 if (ret <= 0)
670 return -ENOMEM;
671 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
672
673 ret = striped_read(inode, off, ret, pages, num_pages,
674 page_off, checkeof);
675 if (ret > 0) {
676 iov_iter_advance(to, ret);
677 off += ret;
678 } else {
679 iov_iter_advance(to, 0);
680 }
681 ceph_put_page_vector(pages, num_pages, false);
682 } else {
683 num_pages = calc_pages_for(off, len);
684 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
685 if (IS_ERR(pages))
686 return PTR_ERR(pages);
687
688 ret = striped_read(inode, off, len, pages, num_pages,
689 (off & ~PAGE_MASK), checkeof);
690 if (ret > 0) {
691 int l, k = 0;
692 size_t left = ret;
693
694 while (left) {
695 size_t page_off = off & ~PAGE_MASK;
696 size_t copy = min_t(size_t, left,
697 PAGE_SIZE - page_off);
698 l = copy_page_to_iter(pages[k++], page_off,
699 copy, to);
700 off += l;
701 left -= l;
702 if (l < copy)
703 break;
704 }
705 }
706 ceph_release_page_vector(pages, num_pages);
707 }
708
709 if (off > iocb->ki_pos) {
710 ret = off - iocb->ki_pos;
711 iocb->ki_pos = off;
712 }
713
714 dout("sync_read result %zd\n", ret);
715 return ret;
716 }
717
718 struct ceph_aio_request {
719 struct kiocb *iocb;
720 size_t total_len;
721 bool write;
722 bool should_dirty;
723 int error;
724 struct list_head osd_reqs;
725 unsigned num_reqs;
726 atomic_t pending_reqs;
727 struct timespec64 mtime;
728 struct ceph_cap_flush *prealloc_cf;
729 };
730
731 struct ceph_aio_work {
732 struct work_struct work;
733 struct ceph_osd_request *req;
734 };
735
736 static void ceph_aio_retry_work(struct work_struct *work);
737
ceph_aio_complete(struct inode * inode,struct ceph_aio_request * aio_req)738 static void ceph_aio_complete(struct inode *inode,
739 struct ceph_aio_request *aio_req)
740 {
741 struct ceph_inode_info *ci = ceph_inode(inode);
742 int ret;
743
744 if (!atomic_dec_and_test(&aio_req->pending_reqs))
745 return;
746
747 ret = aio_req->error;
748 if (!ret)
749 ret = aio_req->total_len;
750
751 dout("ceph_aio_complete %p rc %d\n", inode, ret);
752
753 if (ret >= 0 && aio_req->write) {
754 int dirty;
755
756 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
757 if (endoff > i_size_read(inode)) {
758 if (ceph_inode_set_size(inode, endoff))
759 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
760 }
761
762 spin_lock(&ci->i_ceph_lock);
763 ci->i_inline_version = CEPH_INLINE_NONE;
764 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
765 &aio_req->prealloc_cf);
766 spin_unlock(&ci->i_ceph_lock);
767 if (dirty)
768 __mark_inode_dirty(inode, dirty);
769
770 }
771
772 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
773 CEPH_CAP_FILE_RD));
774
775 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
776
777 ceph_free_cap_flush(aio_req->prealloc_cf);
778 kfree(aio_req);
779 }
780
ceph_aio_complete_req(struct ceph_osd_request * req)781 static void ceph_aio_complete_req(struct ceph_osd_request *req)
782 {
783 int rc = req->r_result;
784 struct inode *inode = req->r_inode;
785 struct ceph_aio_request *aio_req = req->r_priv;
786 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
787
788 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
789 BUG_ON(!osd_data->num_bvecs);
790
791 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
792 inode, rc, osd_data->bvec_pos.iter.bi_size);
793
794 if (rc == -EOLDSNAPC) {
795 struct ceph_aio_work *aio_work;
796 BUG_ON(!aio_req->write);
797
798 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
799 if (aio_work) {
800 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
801 aio_work->req = req;
802 queue_work(ceph_inode_to_client(inode)->wb_wq,
803 &aio_work->work);
804 return;
805 }
806 rc = -ENOMEM;
807 } else if (!aio_req->write) {
808 if (rc == -ENOENT)
809 rc = 0;
810 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
811 struct iov_iter i;
812 int zlen = osd_data->bvec_pos.iter.bi_size - rc;
813
814 /*
815 * If read is satisfied by single OSD request,
816 * it can pass EOF. Otherwise read is within
817 * i_size.
818 */
819 if (aio_req->num_reqs == 1) {
820 loff_t i_size = i_size_read(inode);
821 loff_t endoff = aio_req->iocb->ki_pos + rc;
822 if (endoff < i_size)
823 zlen = min_t(size_t, zlen,
824 i_size - endoff);
825 aio_req->total_len = rc + zlen;
826 }
827
828 iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
829 osd_data->num_bvecs,
830 osd_data->bvec_pos.iter.bi_size);
831 iov_iter_advance(&i, rc);
832 iov_iter_zero(zlen, &i);
833 }
834 }
835
836 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
837 aio_req->should_dirty);
838 ceph_osdc_put_request(req);
839
840 if (rc < 0)
841 cmpxchg(&aio_req->error, 0, rc);
842
843 ceph_aio_complete(inode, aio_req);
844 return;
845 }
846
ceph_aio_retry_work(struct work_struct * work)847 static void ceph_aio_retry_work(struct work_struct *work)
848 {
849 struct ceph_aio_work *aio_work =
850 container_of(work, struct ceph_aio_work, work);
851 struct ceph_osd_request *orig_req = aio_work->req;
852 struct ceph_aio_request *aio_req = orig_req->r_priv;
853 struct inode *inode = orig_req->r_inode;
854 struct ceph_inode_info *ci = ceph_inode(inode);
855 struct ceph_snap_context *snapc;
856 struct ceph_osd_request *req;
857 int ret;
858
859 spin_lock(&ci->i_ceph_lock);
860 if (__ceph_have_pending_cap_snap(ci)) {
861 struct ceph_cap_snap *capsnap =
862 list_last_entry(&ci->i_cap_snaps,
863 struct ceph_cap_snap,
864 ci_item);
865 snapc = ceph_get_snap_context(capsnap->context);
866 } else {
867 BUG_ON(!ci->i_head_snapc);
868 snapc = ceph_get_snap_context(ci->i_head_snapc);
869 }
870 spin_unlock(&ci->i_ceph_lock);
871
872 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
873 false, GFP_NOFS);
874 if (!req) {
875 ret = -ENOMEM;
876 req = orig_req;
877 goto out;
878 }
879
880 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
881 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
882 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
883
884 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
885 if (ret) {
886 ceph_osdc_put_request(req);
887 req = orig_req;
888 goto out;
889 }
890
891 req->r_ops[0] = orig_req->r_ops[0];
892
893 req->r_mtime = aio_req->mtime;
894 req->r_data_offset = req->r_ops[0].extent.offset;
895
896 ceph_osdc_put_request(orig_req);
897
898 req->r_callback = ceph_aio_complete_req;
899 req->r_inode = inode;
900 req->r_priv = aio_req;
901
902 ret = ceph_osdc_start_request(req->r_osdc, req, false);
903 out:
904 if (ret < 0) {
905 req->r_result = ret;
906 ceph_aio_complete_req(req);
907 }
908
909 ceph_put_snap_context(snapc);
910 kfree(aio_work);
911 }
912
913 static ssize_t
ceph_direct_read_write(struct kiocb * iocb,struct iov_iter * iter,struct ceph_snap_context * snapc,struct ceph_cap_flush ** pcf)914 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
915 struct ceph_snap_context *snapc,
916 struct ceph_cap_flush **pcf)
917 {
918 struct file *file = iocb->ki_filp;
919 struct inode *inode = file_inode(file);
920 struct ceph_inode_info *ci = ceph_inode(inode);
921 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
922 struct ceph_vino vino;
923 struct ceph_osd_request *req;
924 struct bio_vec *bvecs;
925 struct ceph_aio_request *aio_req = NULL;
926 int num_pages = 0;
927 int flags;
928 int ret;
929 struct timespec64 mtime = current_time(inode);
930 size_t count = iov_iter_count(iter);
931 loff_t pos = iocb->ki_pos;
932 bool write = iov_iter_rw(iter) == WRITE;
933 bool should_dirty = !write && iter_is_iovec(iter);
934
935 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
936 return -EROFS;
937
938 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
939 (write ? "write" : "read"), file, pos, (unsigned)count,
940 snapc, snapc->seq);
941
942 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
943 if (ret < 0)
944 return ret;
945
946 if (write) {
947 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
948 pos >> PAGE_SHIFT,
949 (pos + count) >> PAGE_SHIFT);
950 if (ret2 < 0)
951 dout("invalidate_inode_pages2_range returned %d\n", ret2);
952
953 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
954 } else {
955 flags = CEPH_OSD_FLAG_READ;
956 }
957
958 while (iov_iter_count(iter) > 0) {
959 u64 size = iov_iter_count(iter);
960 ssize_t len;
961
962 if (write)
963 size = min_t(u64, size, fsc->mount_options->wsize);
964 else
965 size = min_t(u64, size, fsc->mount_options->rsize);
966
967 vino = ceph_vino(inode);
968 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
969 vino, pos, &size, 0,
970 1,
971 write ? CEPH_OSD_OP_WRITE :
972 CEPH_OSD_OP_READ,
973 flags, snapc,
974 ci->i_truncate_seq,
975 ci->i_truncate_size,
976 false);
977 if (IS_ERR(req)) {
978 ret = PTR_ERR(req);
979 break;
980 }
981
982 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
983 if (len < 0) {
984 ceph_osdc_put_request(req);
985 ret = len;
986 break;
987 }
988 if (len != size)
989 osd_req_op_extent_update(req, 0, len);
990
991 /*
992 * To simplify error handling, allow AIO when IO within i_size
993 * or IO can be satisfied by single OSD request.
994 */
995 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
996 (len == count || pos + count <= i_size_read(inode))) {
997 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
998 if (aio_req) {
999 aio_req->iocb = iocb;
1000 aio_req->write = write;
1001 aio_req->should_dirty = should_dirty;
1002 INIT_LIST_HEAD(&aio_req->osd_reqs);
1003 if (write) {
1004 aio_req->mtime = mtime;
1005 swap(aio_req->prealloc_cf, *pcf);
1006 }
1007 }
1008 /* ignore error */
1009 }
1010
1011 if (write) {
1012 /*
1013 * throw out any page cache pages in this range. this
1014 * may block.
1015 */
1016 truncate_inode_pages_range(inode->i_mapping, pos,
1017 (pos+len) | (PAGE_SIZE - 1));
1018
1019 req->r_mtime = mtime;
1020 }
1021
1022 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1023
1024 if (aio_req) {
1025 aio_req->total_len += len;
1026 aio_req->num_reqs++;
1027 atomic_inc(&aio_req->pending_reqs);
1028
1029 req->r_callback = ceph_aio_complete_req;
1030 req->r_inode = inode;
1031 req->r_priv = aio_req;
1032 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1033
1034 pos += len;
1035 continue;
1036 }
1037
1038 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1039 if (!ret)
1040 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1041
1042 size = i_size_read(inode);
1043 if (!write) {
1044 if (ret == -ENOENT)
1045 ret = 0;
1046 if (ret >= 0 && ret < len && pos + ret < size) {
1047 struct iov_iter i;
1048 int zlen = min_t(size_t, len - ret,
1049 size - pos - ret);
1050
1051 iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
1052 len);
1053 iov_iter_advance(&i, ret);
1054 iov_iter_zero(zlen, &i);
1055 ret += zlen;
1056 }
1057 if (ret >= 0)
1058 len = ret;
1059 }
1060
1061 put_bvecs(bvecs, num_pages, should_dirty);
1062 ceph_osdc_put_request(req);
1063 if (ret < 0)
1064 break;
1065
1066 pos += len;
1067 if (!write && pos >= size)
1068 break;
1069
1070 if (write && pos > size) {
1071 if (ceph_inode_set_size(inode, pos))
1072 ceph_check_caps(ceph_inode(inode),
1073 CHECK_CAPS_AUTHONLY,
1074 NULL);
1075 }
1076 }
1077
1078 if (aio_req) {
1079 LIST_HEAD(osd_reqs);
1080
1081 if (aio_req->num_reqs == 0) {
1082 kfree(aio_req);
1083 return ret;
1084 }
1085
1086 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1087 CEPH_CAP_FILE_RD);
1088
1089 list_splice(&aio_req->osd_reqs, &osd_reqs);
1090 while (!list_empty(&osd_reqs)) {
1091 req = list_first_entry(&osd_reqs,
1092 struct ceph_osd_request,
1093 r_unsafe_item);
1094 list_del_init(&req->r_unsafe_item);
1095 if (ret >= 0)
1096 ret = ceph_osdc_start_request(req->r_osdc,
1097 req, false);
1098 if (ret < 0) {
1099 req->r_result = ret;
1100 ceph_aio_complete_req(req);
1101 }
1102 }
1103 return -EIOCBQUEUED;
1104 }
1105
1106 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1107 ret = pos - iocb->ki_pos;
1108 iocb->ki_pos = pos;
1109 }
1110 return ret;
1111 }
1112
1113 /*
1114 * Synchronous write, straight from __user pointer or user pages.
1115 *
1116 * If write spans object boundary, just do multiple writes. (For a
1117 * correct atomic write, we should e.g. take write locks on all
1118 * objects, rollback on failure, etc.)
1119 */
1120 static ssize_t
ceph_sync_write(struct kiocb * iocb,struct iov_iter * from,loff_t pos,struct ceph_snap_context * snapc)1121 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1122 struct ceph_snap_context *snapc)
1123 {
1124 struct file *file = iocb->ki_filp;
1125 struct inode *inode = file_inode(file);
1126 struct ceph_inode_info *ci = ceph_inode(inode);
1127 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1128 struct ceph_vino vino;
1129 struct ceph_osd_request *req;
1130 struct page **pages;
1131 u64 len;
1132 int num_pages;
1133 int written = 0;
1134 int flags;
1135 int ret;
1136 bool check_caps = false;
1137 struct timespec64 mtime = current_time(inode);
1138 size_t count = iov_iter_count(from);
1139
1140 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1141 return -EROFS;
1142
1143 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1144 file, pos, (unsigned)count, snapc, snapc->seq);
1145
1146 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1147 if (ret < 0)
1148 return ret;
1149
1150 ret = invalidate_inode_pages2_range(inode->i_mapping,
1151 pos >> PAGE_SHIFT,
1152 (pos + count) >> PAGE_SHIFT);
1153 if (ret < 0)
1154 dout("invalidate_inode_pages2_range returned %d\n", ret);
1155
1156 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1157
1158 while ((len = iov_iter_count(from)) > 0) {
1159 size_t left;
1160 int n;
1161
1162 vino = ceph_vino(inode);
1163 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1164 vino, pos, &len, 0, 1,
1165 CEPH_OSD_OP_WRITE, flags, snapc,
1166 ci->i_truncate_seq,
1167 ci->i_truncate_size,
1168 false);
1169 if (IS_ERR(req)) {
1170 ret = PTR_ERR(req);
1171 break;
1172 }
1173
1174 /*
1175 * write from beginning of first page,
1176 * regardless of io alignment
1177 */
1178 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1179
1180 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1181 if (IS_ERR(pages)) {
1182 ret = PTR_ERR(pages);
1183 goto out;
1184 }
1185
1186 left = len;
1187 for (n = 0; n < num_pages; n++) {
1188 size_t plen = min_t(size_t, left, PAGE_SIZE);
1189 ret = copy_page_from_iter(pages[n], 0, plen, from);
1190 if (ret != plen) {
1191 ret = -EFAULT;
1192 break;
1193 }
1194 left -= ret;
1195 }
1196
1197 if (ret < 0) {
1198 ceph_release_page_vector(pages, num_pages);
1199 goto out;
1200 }
1201
1202 req->r_inode = inode;
1203
1204 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1205 false, true);
1206
1207 req->r_mtime = mtime;
1208 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1209 if (!ret)
1210 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1211
1212 out:
1213 ceph_osdc_put_request(req);
1214 if (ret != 0) {
1215 ceph_set_error_write(ci);
1216 break;
1217 }
1218
1219 ceph_clear_error_write(ci);
1220 pos += len;
1221 written += len;
1222 if (pos > i_size_read(inode)) {
1223 check_caps = ceph_inode_set_size(inode, pos);
1224 if (check_caps)
1225 ceph_check_caps(ceph_inode(inode),
1226 CHECK_CAPS_AUTHONLY,
1227 NULL);
1228 }
1229
1230 }
1231
1232 if (ret != -EOLDSNAPC && written > 0) {
1233 ret = written;
1234 iocb->ki_pos = pos;
1235 }
1236 return ret;
1237 }
1238
1239 /*
1240 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1241 * Atomically grab references, so that those bits are not released
1242 * back to the MDS mid-read.
1243 *
1244 * Hmm, the sync read case isn't actually async... should it be?
1245 */
ceph_read_iter(struct kiocb * iocb,struct iov_iter * to)1246 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1247 {
1248 struct file *filp = iocb->ki_filp;
1249 struct ceph_file_info *fi = filp->private_data;
1250 size_t len = iov_iter_count(to);
1251 struct inode *inode = file_inode(filp);
1252 struct ceph_inode_info *ci = ceph_inode(inode);
1253 struct page *pinned_page = NULL;
1254 ssize_t ret;
1255 int want, got = 0;
1256 int retry_op = 0, read = 0;
1257
1258 again:
1259 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1260 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1261
1262 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1263 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1264 else
1265 want = CEPH_CAP_FILE_CACHE;
1266 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1267 if (ret < 0)
1268 return ret;
1269
1270 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1271 (iocb->ki_flags & IOCB_DIRECT) ||
1272 (fi->flags & CEPH_F_SYNC)) {
1273
1274 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1275 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1276 ceph_cap_string(got));
1277
1278 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1279 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1280 ret = ceph_direct_read_write(iocb, to,
1281 NULL, NULL);
1282 if (ret >= 0 && ret < len)
1283 retry_op = CHECK_EOF;
1284 } else {
1285 ret = ceph_sync_read(iocb, to, &retry_op);
1286 }
1287 } else {
1288 retry_op = READ_INLINE;
1289 }
1290 } else {
1291 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1292 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1293 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1294 ceph_cap_string(got));
1295 ceph_add_rw_context(fi, &rw_ctx);
1296 ret = generic_file_read_iter(iocb, to);
1297 ceph_del_rw_context(fi, &rw_ctx);
1298 }
1299 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1300 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1301 if (pinned_page) {
1302 put_page(pinned_page);
1303 pinned_page = NULL;
1304 }
1305 ceph_put_cap_refs(ci, got);
1306 if (retry_op > HAVE_RETRIED && ret >= 0) {
1307 int statret;
1308 struct page *page = NULL;
1309 loff_t i_size;
1310 if (retry_op == READ_INLINE) {
1311 page = __page_cache_alloc(GFP_KERNEL);
1312 if (!page)
1313 return -ENOMEM;
1314 }
1315
1316 statret = __ceph_do_getattr(inode, page,
1317 CEPH_STAT_CAP_INLINE_DATA, !!page);
1318 if (statret < 0) {
1319 if (page)
1320 __free_page(page);
1321 if (statret == -ENODATA) {
1322 BUG_ON(retry_op != READ_INLINE);
1323 goto again;
1324 }
1325 return statret;
1326 }
1327
1328 i_size = i_size_read(inode);
1329 if (retry_op == READ_INLINE) {
1330 BUG_ON(ret > 0 || read > 0);
1331 if (iocb->ki_pos < i_size &&
1332 iocb->ki_pos < PAGE_SIZE) {
1333 loff_t end = min_t(loff_t, i_size,
1334 iocb->ki_pos + len);
1335 end = min_t(loff_t, end, PAGE_SIZE);
1336 if (statret < end)
1337 zero_user_segment(page, statret, end);
1338 ret = copy_page_to_iter(page,
1339 iocb->ki_pos & ~PAGE_MASK,
1340 end - iocb->ki_pos, to);
1341 iocb->ki_pos += ret;
1342 read += ret;
1343 }
1344 if (iocb->ki_pos < i_size && read < len) {
1345 size_t zlen = min_t(size_t, len - read,
1346 i_size - iocb->ki_pos);
1347 ret = iov_iter_zero(zlen, to);
1348 iocb->ki_pos += ret;
1349 read += ret;
1350 }
1351 __free_pages(page, 0);
1352 return read;
1353 }
1354
1355 /* hit EOF or hole? */
1356 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1357 ret < len) {
1358 dout("sync_read hit hole, ppos %lld < size %lld"
1359 ", reading more\n", iocb->ki_pos, i_size);
1360
1361 read += ret;
1362 len -= ret;
1363 retry_op = HAVE_RETRIED;
1364 goto again;
1365 }
1366 }
1367
1368 if (ret >= 0)
1369 ret += read;
1370
1371 return ret;
1372 }
1373
1374 /*
1375 * Take cap references to avoid releasing caps to MDS mid-write.
1376 *
1377 * If we are synchronous, and write with an old snap context, the OSD
1378 * may return EOLDSNAPC. In that case, retry the write.. _after_
1379 * dropping our cap refs and allowing the pending snap to logically
1380 * complete _before_ this write occurs.
1381 *
1382 * If we are near ENOSPC, write synchronously.
1383 */
ceph_write_iter(struct kiocb * iocb,struct iov_iter * from)1384 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1385 {
1386 struct file *file = iocb->ki_filp;
1387 struct ceph_file_info *fi = file->private_data;
1388 struct inode *inode = file_inode(file);
1389 struct ceph_inode_info *ci = ceph_inode(inode);
1390 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1391 struct ceph_osd_client *osdc = &fsc->client->osdc;
1392 struct ceph_cap_flush *prealloc_cf;
1393 ssize_t count, written = 0;
1394 int err, want, got;
1395 u32 map_flags;
1396 u64 pool_flags;
1397 loff_t pos;
1398 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1399
1400 if (ceph_snap(inode) != CEPH_NOSNAP)
1401 return -EROFS;
1402
1403 prealloc_cf = ceph_alloc_cap_flush();
1404 if (!prealloc_cf)
1405 return -ENOMEM;
1406
1407 retry_snap:
1408 inode_lock(inode);
1409
1410 /* We can write back this queue in page reclaim */
1411 current->backing_dev_info = inode_to_bdi(inode);
1412
1413 if (iocb->ki_flags & IOCB_APPEND) {
1414 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1415 if (err < 0)
1416 goto out;
1417 }
1418
1419 err = generic_write_checks(iocb, from);
1420 if (err <= 0)
1421 goto out;
1422
1423 pos = iocb->ki_pos;
1424 if (unlikely(pos >= limit)) {
1425 err = -EFBIG;
1426 goto out;
1427 } else {
1428 iov_iter_truncate(from, limit - pos);
1429 }
1430
1431 count = iov_iter_count(from);
1432 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1433 err = -EDQUOT;
1434 goto out;
1435 }
1436
1437 err = file_remove_privs(file);
1438 if (err)
1439 goto out;
1440
1441 err = file_update_time(file);
1442 if (err)
1443 goto out;
1444
1445 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1446 err = ceph_uninline_data(file, NULL);
1447 if (err < 0)
1448 goto out;
1449 }
1450
1451 down_read(&osdc->lock);
1452 map_flags = osdc->osdmap->flags;
1453 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1454 up_read(&osdc->lock);
1455 if ((map_flags & CEPH_OSDMAP_FULL) ||
1456 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1457 err = -ENOSPC;
1458 goto out;
1459 }
1460
1461 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1462 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1463 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1464 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1465 else
1466 want = CEPH_CAP_FILE_BUFFER;
1467 got = 0;
1468 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1469 &got, NULL);
1470 if (err < 0)
1471 goto out;
1472
1473 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1474 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1475
1476 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1477 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1478 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1479 struct ceph_snap_context *snapc;
1480 struct iov_iter data;
1481 inode_unlock(inode);
1482
1483 spin_lock(&ci->i_ceph_lock);
1484 if (__ceph_have_pending_cap_snap(ci)) {
1485 struct ceph_cap_snap *capsnap =
1486 list_last_entry(&ci->i_cap_snaps,
1487 struct ceph_cap_snap,
1488 ci_item);
1489 snapc = ceph_get_snap_context(capsnap->context);
1490 } else {
1491 BUG_ON(!ci->i_head_snapc);
1492 snapc = ceph_get_snap_context(ci->i_head_snapc);
1493 }
1494 spin_unlock(&ci->i_ceph_lock);
1495
1496 /* we might need to revert back to that point */
1497 data = *from;
1498 if (iocb->ki_flags & IOCB_DIRECT)
1499 written = ceph_direct_read_write(iocb, &data, snapc,
1500 &prealloc_cf);
1501 else
1502 written = ceph_sync_write(iocb, &data, pos, snapc);
1503 if (written > 0)
1504 iov_iter_advance(from, written);
1505 ceph_put_snap_context(snapc);
1506 } else {
1507 /*
1508 * No need to acquire the i_truncate_mutex. Because
1509 * the MDS revokes Fwb caps before sending truncate
1510 * message to us. We can't get Fwb cap while there
1511 * are pending vmtruncate. So write and vmtruncate
1512 * can not run at the same time
1513 */
1514 written = generic_perform_write(file, from, pos);
1515 if (likely(written >= 0))
1516 iocb->ki_pos = pos + written;
1517 inode_unlock(inode);
1518 }
1519
1520 if (written >= 0) {
1521 int dirty;
1522
1523 spin_lock(&ci->i_ceph_lock);
1524 ci->i_inline_version = CEPH_INLINE_NONE;
1525 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1526 &prealloc_cf);
1527 spin_unlock(&ci->i_ceph_lock);
1528 if (dirty)
1529 __mark_inode_dirty(inode, dirty);
1530 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1531 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1532 }
1533
1534 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1535 inode, ceph_vinop(inode), pos, (unsigned)count,
1536 ceph_cap_string(got));
1537 ceph_put_cap_refs(ci, got);
1538
1539 if (written == -EOLDSNAPC) {
1540 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1541 inode, ceph_vinop(inode), pos, (unsigned)count);
1542 goto retry_snap;
1543 }
1544
1545 if (written >= 0) {
1546 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1547 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1548 iocb->ki_flags |= IOCB_DSYNC;
1549 written = generic_write_sync(iocb, written);
1550 }
1551
1552 goto out_unlocked;
1553
1554 out:
1555 inode_unlock(inode);
1556 out_unlocked:
1557 ceph_free_cap_flush(prealloc_cf);
1558 current->backing_dev_info = NULL;
1559 return written ? written : err;
1560 }
1561
1562 /*
1563 * llseek. be sure to verify file size on SEEK_END.
1564 */
ceph_llseek(struct file * file,loff_t offset,int whence)1565 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1566 {
1567 struct inode *inode = file->f_mapping->host;
1568 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1569 loff_t i_size;
1570 loff_t ret;
1571
1572 inode_lock(inode);
1573
1574 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1575 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1576 if (ret < 0)
1577 goto out;
1578 }
1579
1580 i_size = i_size_read(inode);
1581 switch (whence) {
1582 case SEEK_END:
1583 offset += i_size;
1584 break;
1585 case SEEK_CUR:
1586 /*
1587 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1588 * position-querying operation. Avoid rewriting the "same"
1589 * f_pos value back to the file because a concurrent read(),
1590 * write() or lseek() might have altered it
1591 */
1592 if (offset == 0) {
1593 ret = file->f_pos;
1594 goto out;
1595 }
1596 offset += file->f_pos;
1597 break;
1598 case SEEK_DATA:
1599 if (offset < 0 || offset >= i_size) {
1600 ret = -ENXIO;
1601 goto out;
1602 }
1603 break;
1604 case SEEK_HOLE:
1605 if (offset < 0 || offset >= i_size) {
1606 ret = -ENXIO;
1607 goto out;
1608 }
1609 offset = i_size;
1610 break;
1611 }
1612
1613 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1614
1615 out:
1616 inode_unlock(inode);
1617 return ret;
1618 }
1619
ceph_zero_partial_page(struct inode * inode,loff_t offset,unsigned size)1620 static inline void ceph_zero_partial_page(
1621 struct inode *inode, loff_t offset, unsigned size)
1622 {
1623 struct page *page;
1624 pgoff_t index = offset >> PAGE_SHIFT;
1625
1626 page = find_lock_page(inode->i_mapping, index);
1627 if (page) {
1628 wait_on_page_writeback(page);
1629 zero_user(page, offset & (PAGE_SIZE - 1), size);
1630 unlock_page(page);
1631 put_page(page);
1632 }
1633 }
1634
ceph_zero_pagecache_range(struct inode * inode,loff_t offset,loff_t length)1635 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1636 loff_t length)
1637 {
1638 loff_t nearly = round_up(offset, PAGE_SIZE);
1639 if (offset < nearly) {
1640 loff_t size = nearly - offset;
1641 if (length < size)
1642 size = length;
1643 ceph_zero_partial_page(inode, offset, size);
1644 offset += size;
1645 length -= size;
1646 }
1647 if (length >= PAGE_SIZE) {
1648 loff_t size = round_down(length, PAGE_SIZE);
1649 truncate_pagecache_range(inode, offset, offset + size - 1);
1650 offset += size;
1651 length -= size;
1652 }
1653 if (length)
1654 ceph_zero_partial_page(inode, offset, length);
1655 }
1656
ceph_zero_partial_object(struct inode * inode,loff_t offset,loff_t * length)1657 static int ceph_zero_partial_object(struct inode *inode,
1658 loff_t offset, loff_t *length)
1659 {
1660 struct ceph_inode_info *ci = ceph_inode(inode);
1661 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1662 struct ceph_osd_request *req;
1663 int ret = 0;
1664 loff_t zero = 0;
1665 int op;
1666
1667 if (!length) {
1668 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1669 length = &zero;
1670 } else {
1671 op = CEPH_OSD_OP_ZERO;
1672 }
1673
1674 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1675 ceph_vino(inode),
1676 offset, length,
1677 0, 1, op,
1678 CEPH_OSD_FLAG_WRITE,
1679 NULL, 0, 0, false);
1680 if (IS_ERR(req)) {
1681 ret = PTR_ERR(req);
1682 goto out;
1683 }
1684
1685 req->r_mtime = inode->i_mtime;
1686 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1687 if (!ret) {
1688 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1689 if (ret == -ENOENT)
1690 ret = 0;
1691 }
1692 ceph_osdc_put_request(req);
1693
1694 out:
1695 return ret;
1696 }
1697
ceph_zero_objects(struct inode * inode,loff_t offset,loff_t length)1698 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1699 {
1700 int ret = 0;
1701 struct ceph_inode_info *ci = ceph_inode(inode);
1702 s32 stripe_unit = ci->i_layout.stripe_unit;
1703 s32 stripe_count = ci->i_layout.stripe_count;
1704 s32 object_size = ci->i_layout.object_size;
1705 u64 object_set_size = object_size * stripe_count;
1706 u64 nearly, t;
1707
1708 /* round offset up to next period boundary */
1709 nearly = offset + object_set_size - 1;
1710 t = nearly;
1711 nearly -= do_div(t, object_set_size);
1712
1713 while (length && offset < nearly) {
1714 loff_t size = length;
1715 ret = ceph_zero_partial_object(inode, offset, &size);
1716 if (ret < 0)
1717 return ret;
1718 offset += size;
1719 length -= size;
1720 }
1721 while (length >= object_set_size) {
1722 int i;
1723 loff_t pos = offset;
1724 for (i = 0; i < stripe_count; ++i) {
1725 ret = ceph_zero_partial_object(inode, pos, NULL);
1726 if (ret < 0)
1727 return ret;
1728 pos += stripe_unit;
1729 }
1730 offset += object_set_size;
1731 length -= object_set_size;
1732 }
1733 while (length) {
1734 loff_t size = length;
1735 ret = ceph_zero_partial_object(inode, offset, &size);
1736 if (ret < 0)
1737 return ret;
1738 offset += size;
1739 length -= size;
1740 }
1741 return ret;
1742 }
1743
ceph_fallocate(struct file * file,int mode,loff_t offset,loff_t length)1744 static long ceph_fallocate(struct file *file, int mode,
1745 loff_t offset, loff_t length)
1746 {
1747 struct ceph_file_info *fi = file->private_data;
1748 struct inode *inode = file_inode(file);
1749 struct ceph_inode_info *ci = ceph_inode(inode);
1750 struct ceph_cap_flush *prealloc_cf;
1751 int want, got = 0;
1752 int dirty;
1753 int ret = 0;
1754 loff_t endoff = 0;
1755 loff_t size;
1756
1757 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1758 return -EOPNOTSUPP;
1759
1760 if (!S_ISREG(inode->i_mode))
1761 return -EOPNOTSUPP;
1762
1763 prealloc_cf = ceph_alloc_cap_flush();
1764 if (!prealloc_cf)
1765 return -ENOMEM;
1766
1767 inode_lock(inode);
1768
1769 if (ceph_snap(inode) != CEPH_NOSNAP) {
1770 ret = -EROFS;
1771 goto unlock;
1772 }
1773
1774 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1775 ret = ceph_uninline_data(file, NULL);
1776 if (ret < 0)
1777 goto unlock;
1778 }
1779
1780 size = i_size_read(inode);
1781
1782 /* Are we punching a hole beyond EOF? */
1783 if (offset >= size)
1784 goto unlock;
1785 if ((offset + length) > size)
1786 length = size - offset;
1787
1788 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1789 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1790 else
1791 want = CEPH_CAP_FILE_BUFFER;
1792
1793 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1794 if (ret < 0)
1795 goto unlock;
1796
1797 ceph_zero_pagecache_range(inode, offset, length);
1798 ret = ceph_zero_objects(inode, offset, length);
1799
1800 if (!ret) {
1801 spin_lock(&ci->i_ceph_lock);
1802 ci->i_inline_version = CEPH_INLINE_NONE;
1803 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1804 &prealloc_cf);
1805 spin_unlock(&ci->i_ceph_lock);
1806 if (dirty)
1807 __mark_inode_dirty(inode, dirty);
1808 }
1809
1810 ceph_put_cap_refs(ci, got);
1811 unlock:
1812 inode_unlock(inode);
1813 ceph_free_cap_flush(prealloc_cf);
1814 return ret;
1815 }
1816
1817 const struct file_operations ceph_file_fops = {
1818 .open = ceph_open,
1819 .release = ceph_release,
1820 .llseek = ceph_llseek,
1821 .read_iter = ceph_read_iter,
1822 .write_iter = ceph_write_iter,
1823 .mmap = ceph_mmap,
1824 .fsync = ceph_fsync,
1825 .lock = ceph_lock,
1826 .setlease = simple_nosetlease,
1827 .flock = ceph_flock,
1828 .splice_read = generic_file_splice_read,
1829 .splice_write = iter_file_splice_write,
1830 .unlocked_ioctl = ceph_ioctl,
1831 .compat_ioctl = ceph_ioctl,
1832 .fallocate = ceph_fallocate,
1833 };
1834
1835