1 /* CacheFiles path walking and related routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/file.h>
15 #include <linux/fs.h>
16 #include <linux/fsnotify.h>
17 #include <linux/quotaops.h>
18 #include <linux/xattr.h>
19 #include <linux/mount.h>
20 #include <linux/namei.h>
21 #include <linux/security.h>
22 #include <linux/slab.h>
23 #include <linux/xattr.h>
24 #include "internal.h"
25
26 #define CACHEFILES_KEYBUF_SIZE 512
27
28 /*
29 * dump debugging info about an object
30 */
31 static noinline
__cachefiles_printk_object(struct cachefiles_object * object,const char * prefix)32 void __cachefiles_printk_object(struct cachefiles_object *object,
33 const char *prefix)
34 {
35 struct fscache_cookie *cookie;
36 const u8 *k;
37 unsigned loop;
38
39 pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id);
40 pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
41 prefix, object->fscache.state->name,
42 object->fscache.flags, work_busy(&object->fscache.work),
43 object->fscache.events, object->fscache.event_mask);
44 pr_err("%sops=%u inp=%u exc=%u\n",
45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
46 object->fscache.n_exclusive);
47 pr_err("%sparent=%p\n",
48 prefix, object->fscache.parent);
49
50 spin_lock(&object->fscache.lock);
51 cookie = object->fscache.cookie;
52 if (cookie) {
53 pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 prefix,
55 object->fscache.cookie,
56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags);
59 pr_err("%skey=[%u] '", prefix, cookie->key_len);
60 k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
61 cookie->inline_key : cookie->key;
62 for (loop = 0; loop < cookie->key_len; loop++)
63 pr_cont("%02x", k[loop]);
64 pr_cont("'\n");
65 } else {
66 pr_err("%scookie=NULL\n", prefix);
67 }
68 spin_unlock(&object->fscache.lock);
69 }
70
71 /*
72 * dump debugging info about a pair of objects
73 */
cachefiles_printk_object(struct cachefiles_object * object,struct cachefiles_object * xobject)74 static noinline void cachefiles_printk_object(struct cachefiles_object *object,
75 struct cachefiles_object *xobject)
76 {
77 if (object)
78 __cachefiles_printk_object(object, "");
79 if (xobject)
80 __cachefiles_printk_object(xobject, "x");
81 }
82
83 /*
84 * mark the owner of a dentry, if there is one, to indicate that that dentry
85 * has been preemptively deleted
86 * - the caller must hold the i_mutex on the dentry's parent as required to
87 * call vfs_unlink(), vfs_rmdir() or vfs_rename()
88 */
cachefiles_mark_object_buried(struct cachefiles_cache * cache,struct dentry * dentry,enum fscache_why_object_killed why)89 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
90 struct dentry *dentry,
91 enum fscache_why_object_killed why)
92 {
93 struct cachefiles_object *object;
94 struct rb_node *p;
95
96 _enter(",'%pd'", dentry);
97
98 write_lock(&cache->active_lock);
99
100 p = cache->active_nodes.rb_node;
101 while (p) {
102 object = rb_entry(p, struct cachefiles_object, active_node);
103 if (object->dentry > dentry)
104 p = p->rb_left;
105 else if (object->dentry < dentry)
106 p = p->rb_right;
107 else
108 goto found_dentry;
109 }
110
111 write_unlock(&cache->active_lock);
112 trace_cachefiles_mark_buried(NULL, dentry, why);
113 _leave(" [no owner]");
114 return;
115
116 /* found the dentry for */
117 found_dentry:
118 kdebug("preemptive burial: OBJ%x [%s] %p",
119 object->fscache.debug_id,
120 object->fscache.state->name,
121 dentry);
122
123 trace_cachefiles_mark_buried(object, dentry, why);
124
125 if (fscache_object_is_live(&object->fscache)) {
126 pr_err("\n");
127 pr_err("Error: Can't preemptively bury live object\n");
128 cachefiles_printk_object(object, NULL);
129 } else {
130 if (why != FSCACHE_OBJECT_IS_STALE)
131 fscache_object_mark_killed(&object->fscache, why);
132 }
133
134 write_unlock(&cache->active_lock);
135 _leave(" [owner marked]");
136 }
137
138 /*
139 * record the fact that an object is now active
140 */
cachefiles_mark_object_active(struct cachefiles_cache * cache,struct cachefiles_object * object)141 static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
142 struct cachefiles_object *object)
143 {
144 struct cachefiles_object *xobject;
145 struct rb_node **_p, *_parent = NULL;
146 struct dentry *dentry;
147
148 _enter(",%p", object);
149
150 try_again:
151 write_lock(&cache->active_lock);
152
153 dentry = object->dentry;
154 trace_cachefiles_mark_active(object, dentry);
155
156 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
157 pr_err("Error: Object already active\n");
158 cachefiles_printk_object(object, NULL);
159 BUG();
160 }
161
162 _p = &cache->active_nodes.rb_node;
163 while (*_p) {
164 _parent = *_p;
165 xobject = rb_entry(_parent,
166 struct cachefiles_object, active_node);
167
168 ASSERT(xobject != object);
169
170 if (xobject->dentry > dentry)
171 _p = &(*_p)->rb_left;
172 else if (xobject->dentry < dentry)
173 _p = &(*_p)->rb_right;
174 else
175 goto wait_for_old_object;
176 }
177
178 rb_link_node(&object->active_node, _parent, _p);
179 rb_insert_color(&object->active_node, &cache->active_nodes);
180
181 write_unlock(&cache->active_lock);
182 _leave(" = 0");
183 return 0;
184
185 /* an old object from a previous incarnation is hogging the slot - we
186 * need to wait for it to be destroyed */
187 wait_for_old_object:
188 trace_cachefiles_wait_active(object, dentry, xobject);
189 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
190
191 if (fscache_object_is_live(&xobject->fscache)) {
192 pr_err("\n");
193 pr_err("Error: Unexpected object collision\n");
194 cachefiles_printk_object(object, xobject);
195 }
196 atomic_inc(&xobject->usage);
197 write_unlock(&cache->active_lock);
198
199 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
200 wait_queue_head_t *wq;
201
202 signed long timeout = 60 * HZ;
203 wait_queue_entry_t wait;
204 bool requeue;
205
206 /* if the object we're waiting for is queued for processing,
207 * then just put ourselves on the queue behind it */
208 if (work_pending(&xobject->fscache.work)) {
209 _debug("queue OBJ%x behind OBJ%x immediately",
210 object->fscache.debug_id,
211 xobject->fscache.debug_id);
212 goto requeue;
213 }
214
215 /* otherwise we sleep until either the object we're waiting for
216 * is done, or the fscache_object is congested */
217 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
218 init_wait(&wait);
219 requeue = false;
220 do {
221 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
222 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
223 break;
224
225 requeue = fscache_object_sleep_till_congested(&timeout);
226 } while (timeout > 0 && !requeue);
227 finish_wait(wq, &wait);
228
229 if (requeue &&
230 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
231 _debug("queue OBJ%x behind OBJ%x after wait",
232 object->fscache.debug_id,
233 xobject->fscache.debug_id);
234 goto requeue;
235 }
236
237 if (timeout <= 0) {
238 pr_err("\n");
239 pr_err("Error: Overlong wait for old active object to go away\n");
240 cachefiles_printk_object(object, xobject);
241 goto requeue;
242 }
243 }
244
245 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
246
247 cache->cache.ops->put_object(&xobject->fscache,
248 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
249 goto try_again;
250
251 requeue:
252 cache->cache.ops->put_object(&xobject->fscache,
253 (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
254 _leave(" = -ETIMEDOUT");
255 return -ETIMEDOUT;
256 }
257
258 /*
259 * Mark an object as being inactive.
260 */
cachefiles_mark_object_inactive(struct cachefiles_cache * cache,struct cachefiles_object * object,blkcnt_t i_blocks)261 void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
262 struct cachefiles_object *object,
263 blkcnt_t i_blocks)
264 {
265 struct dentry *dentry = object->dentry;
266 struct inode *inode = d_backing_inode(dentry);
267
268 trace_cachefiles_mark_inactive(object, dentry, inode);
269
270 write_lock(&cache->active_lock);
271 rb_erase(&object->active_node, &cache->active_nodes);
272 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
273 write_unlock(&cache->active_lock);
274
275 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
276
277 /* This object can now be culled, so we need to let the daemon know
278 * that there is something it can remove if it needs to.
279 */
280 atomic_long_add(i_blocks, &cache->b_released);
281 if (atomic_inc_return(&cache->f_released))
282 cachefiles_state_changed(cache);
283 }
284
285 /*
286 * delete an object representation from the cache
287 * - file backed objects are unlinked
288 * - directory backed objects are stuffed into the graveyard for userspace to
289 * delete
290 * - unlocks the directory mutex
291 */
cachefiles_bury_object(struct cachefiles_cache * cache,struct cachefiles_object * object,struct dentry * dir,struct dentry * rep,bool preemptive,enum fscache_why_object_killed why)292 static int cachefiles_bury_object(struct cachefiles_cache *cache,
293 struct cachefiles_object *object,
294 struct dentry *dir,
295 struct dentry *rep,
296 bool preemptive,
297 enum fscache_why_object_killed why)
298 {
299 struct dentry *grave, *trap;
300 struct path path, path_to_graveyard;
301 char nbuffer[8 + 8 + 1];
302 int ret;
303
304 _enter(",'%pd','%pd'", dir, rep);
305
306 _debug("remove %p from %p", rep, dir);
307
308 /* non-directories can just be unlinked */
309 if (!d_is_dir(rep)) {
310 _debug("unlink stale object");
311
312 path.mnt = cache->mnt;
313 path.dentry = dir;
314 ret = security_path_unlink(&path, rep);
315 if (ret < 0) {
316 cachefiles_io_error(cache, "Unlink security error");
317 } else {
318 trace_cachefiles_unlink(object, rep, why);
319 ret = vfs_unlink(d_inode(dir), rep, NULL);
320
321 if (preemptive)
322 cachefiles_mark_object_buried(cache, rep, why);
323 }
324
325 inode_unlock(d_inode(dir));
326
327 if (ret == -EIO)
328 cachefiles_io_error(cache, "Unlink failed");
329
330 _leave(" = %d", ret);
331 return ret;
332 }
333
334 /* directories have to be moved to the graveyard */
335 _debug("move stale object to graveyard");
336 inode_unlock(d_inode(dir));
337
338 try_again:
339 /* first step is to make up a grave dentry in the graveyard */
340 sprintf(nbuffer, "%08x%08x",
341 (uint32_t) get_seconds(),
342 (uint32_t) atomic_inc_return(&cache->gravecounter));
343
344 /* do the multiway lock magic */
345 trap = lock_rename(cache->graveyard, dir);
346
347 /* do some checks before getting the grave dentry */
348 if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
349 /* the entry was probably culled when we dropped the parent dir
350 * lock */
351 unlock_rename(cache->graveyard, dir);
352 _leave(" = 0 [culled?]");
353 return 0;
354 }
355
356 if (!d_can_lookup(cache->graveyard)) {
357 unlock_rename(cache->graveyard, dir);
358 cachefiles_io_error(cache, "Graveyard no longer a directory");
359 return -EIO;
360 }
361
362 if (trap == rep) {
363 unlock_rename(cache->graveyard, dir);
364 cachefiles_io_error(cache, "May not make directory loop");
365 return -EIO;
366 }
367
368 if (d_mountpoint(rep)) {
369 unlock_rename(cache->graveyard, dir);
370 cachefiles_io_error(cache, "Mountpoint in cache");
371 return -EIO;
372 }
373
374 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
375 if (IS_ERR(grave)) {
376 unlock_rename(cache->graveyard, dir);
377
378 if (PTR_ERR(grave) == -ENOMEM) {
379 _leave(" = -ENOMEM");
380 return -ENOMEM;
381 }
382
383 cachefiles_io_error(cache, "Lookup error %ld",
384 PTR_ERR(grave));
385 return -EIO;
386 }
387
388 if (d_is_positive(grave)) {
389 unlock_rename(cache->graveyard, dir);
390 dput(grave);
391 grave = NULL;
392 cond_resched();
393 goto try_again;
394 }
395
396 if (d_mountpoint(grave)) {
397 unlock_rename(cache->graveyard, dir);
398 dput(grave);
399 cachefiles_io_error(cache, "Mountpoint in graveyard");
400 return -EIO;
401 }
402
403 /* target should not be an ancestor of source */
404 if (trap == grave) {
405 unlock_rename(cache->graveyard, dir);
406 dput(grave);
407 cachefiles_io_error(cache, "May not make directory loop");
408 return -EIO;
409 }
410
411 /* attempt the rename */
412 path.mnt = cache->mnt;
413 path.dentry = dir;
414 path_to_graveyard.mnt = cache->mnt;
415 path_to_graveyard.dentry = cache->graveyard;
416 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
417 if (ret < 0) {
418 cachefiles_io_error(cache, "Rename security error %d", ret);
419 } else {
420 trace_cachefiles_rename(object, rep, grave, why);
421 ret = vfs_rename(d_inode(dir), rep,
422 d_inode(cache->graveyard), grave, NULL, 0);
423 if (ret != 0 && ret != -ENOMEM)
424 cachefiles_io_error(cache,
425 "Rename failed with error %d", ret);
426
427 if (preemptive)
428 cachefiles_mark_object_buried(cache, rep, why);
429 }
430
431 unlock_rename(cache->graveyard, dir);
432 dput(grave);
433 _leave(" = 0");
434 return 0;
435 }
436
437 /*
438 * delete an object representation from the cache
439 */
cachefiles_delete_object(struct cachefiles_cache * cache,struct cachefiles_object * object)440 int cachefiles_delete_object(struct cachefiles_cache *cache,
441 struct cachefiles_object *object)
442 {
443 struct dentry *dir;
444 int ret;
445
446 _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
447
448 ASSERT(object->dentry);
449 ASSERT(d_backing_inode(object->dentry));
450 ASSERT(object->dentry->d_parent);
451
452 dir = dget_parent(object->dentry);
453
454 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
455
456 if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
457 /* object allocation for the same key preemptively deleted this
458 * object's file so that it could create its own file */
459 _debug("object preemptively buried");
460 inode_unlock(d_inode(dir));
461 ret = 0;
462 } else {
463 /* we need to check that our parent is _still_ our parent - it
464 * may have been renamed */
465 if (dir == object->dentry->d_parent) {
466 ret = cachefiles_bury_object(cache, object, dir,
467 object->dentry, false,
468 FSCACHE_OBJECT_WAS_RETIRED);
469 } else {
470 /* it got moved, presumably by cachefilesd culling it,
471 * so it's no longer in the key path and we can ignore
472 * it */
473 inode_unlock(d_inode(dir));
474 ret = 0;
475 }
476 }
477
478 dput(dir);
479 _leave(" = %d", ret);
480 return ret;
481 }
482
483 /*
484 * walk from the parent object to the child object through the backing
485 * filesystem, creating directories as we go
486 */
cachefiles_walk_to_object(struct cachefiles_object * parent,struct cachefiles_object * object,const char * key,struct cachefiles_xattr * auxdata)487 int cachefiles_walk_to_object(struct cachefiles_object *parent,
488 struct cachefiles_object *object,
489 const char *key,
490 struct cachefiles_xattr *auxdata)
491 {
492 struct cachefiles_cache *cache;
493 struct dentry *dir, *next = NULL;
494 struct inode *inode;
495 struct path path;
496 unsigned long start;
497 const char *name;
498 int ret, nlen;
499
500 _enter("OBJ%x{%p},OBJ%x,%s,",
501 parent->fscache.debug_id, parent->dentry,
502 object->fscache.debug_id, key);
503
504 cache = container_of(parent->fscache.cache,
505 struct cachefiles_cache, cache);
506 path.mnt = cache->mnt;
507
508 ASSERT(parent->dentry);
509 ASSERT(d_backing_inode(parent->dentry));
510
511 if (!(d_is_dir(parent->dentry))) {
512 // TODO: convert file to dir
513 _leave("looking up in none directory");
514 return -ENOBUFS;
515 }
516
517 dir = dget(parent->dentry);
518
519 advance:
520 /* attempt to transit the first directory component */
521 name = key;
522 nlen = strlen(key);
523
524 /* key ends in a double NUL */
525 key = key + nlen + 1;
526 if (!*key)
527 key = NULL;
528
529 lookup_again:
530 /* search the current directory for the element name */
531 _debug("lookup '%s'", name);
532
533 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
534
535 start = jiffies;
536 next = lookup_one_len(name, dir, nlen);
537 cachefiles_hist(cachefiles_lookup_histogram, start);
538 if (IS_ERR(next)) {
539 trace_cachefiles_lookup(object, next, NULL);
540 goto lookup_error;
541 }
542
543 inode = d_backing_inode(next);
544 trace_cachefiles_lookup(object, next, inode);
545 _debug("next -> %p %s", next, inode ? "positive" : "negative");
546
547 if (!key)
548 object->new = !inode;
549
550 /* if this element of the path doesn't exist, then the lookup phase
551 * failed, and we can release any readers in the certain knowledge that
552 * there's nothing for them to actually read */
553 if (d_is_negative(next))
554 fscache_object_lookup_negative(&object->fscache);
555
556 /* we need to create the object if it's negative */
557 if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
558 /* index objects and intervening tree levels must be subdirs */
559 if (d_is_negative(next)) {
560 ret = cachefiles_has_space(cache, 1, 0);
561 if (ret < 0)
562 goto no_space_error;
563
564 path.dentry = dir;
565 ret = security_path_mkdir(&path, next, 0);
566 if (ret < 0)
567 goto create_error;
568 start = jiffies;
569 ret = vfs_mkdir(d_inode(dir), next, 0);
570 cachefiles_hist(cachefiles_mkdir_histogram, start);
571 if (!key)
572 trace_cachefiles_mkdir(object, next, ret);
573 if (ret < 0)
574 goto create_error;
575
576 if (unlikely(d_unhashed(next))) {
577 dput(next);
578 inode_unlock(d_inode(dir));
579 goto lookup_again;
580 }
581 ASSERT(d_backing_inode(next));
582
583 _debug("mkdir -> %p{%p{ino=%lu}}",
584 next, d_backing_inode(next), d_backing_inode(next)->i_ino);
585
586 } else if (!d_can_lookup(next)) {
587 pr_err("inode %lu is not a directory\n",
588 d_backing_inode(next)->i_ino);
589 ret = -ENOBUFS;
590 goto error;
591 }
592
593 } else {
594 /* non-index objects start out life as files */
595 if (d_is_negative(next)) {
596 ret = cachefiles_has_space(cache, 1, 0);
597 if (ret < 0)
598 goto no_space_error;
599
600 path.dentry = dir;
601 ret = security_path_mknod(&path, next, S_IFREG, 0);
602 if (ret < 0)
603 goto create_error;
604 start = jiffies;
605 ret = vfs_create(d_inode(dir), next, S_IFREG, true);
606 cachefiles_hist(cachefiles_create_histogram, start);
607 trace_cachefiles_create(object, next, ret);
608 if (ret < 0)
609 goto create_error;
610
611 ASSERT(d_backing_inode(next));
612
613 _debug("create -> %p{%p{ino=%lu}}",
614 next, d_backing_inode(next), d_backing_inode(next)->i_ino);
615
616 } else if (!d_can_lookup(next) &&
617 !d_is_reg(next)
618 ) {
619 pr_err("inode %lu is not a file or directory\n",
620 d_backing_inode(next)->i_ino);
621 ret = -ENOBUFS;
622 goto error;
623 }
624 }
625
626 /* process the next component */
627 if (key) {
628 _debug("advance");
629 inode_unlock(d_inode(dir));
630 dput(dir);
631 dir = next;
632 next = NULL;
633 goto advance;
634 }
635
636 /* we've found the object we were looking for */
637 object->dentry = next;
638
639 /* if we've found that the terminal object exists, then we need to
640 * check its attributes and delete it if it's out of date */
641 if (!object->new) {
642 _debug("validate '%pd'", next);
643
644 ret = cachefiles_check_object_xattr(object, auxdata);
645 if (ret == -ESTALE) {
646 /* delete the object (the deleter drops the directory
647 * mutex) */
648 object->dentry = NULL;
649
650 ret = cachefiles_bury_object(cache, object, dir, next,
651 true,
652 FSCACHE_OBJECT_IS_STALE);
653 dput(next);
654 next = NULL;
655
656 if (ret < 0)
657 goto delete_error;
658
659 _debug("redo lookup");
660 fscache_object_retrying_stale(&object->fscache);
661 goto lookup_again;
662 }
663 }
664
665 /* note that we're now using this object */
666 ret = cachefiles_mark_object_active(cache, object);
667
668 inode_unlock(d_inode(dir));
669 dput(dir);
670 dir = NULL;
671
672 if (ret == -ETIMEDOUT)
673 goto mark_active_timed_out;
674
675 _debug("=== OBTAINED_OBJECT ===");
676
677 if (object->new) {
678 /* attach data to a newly constructed terminal object */
679 ret = cachefiles_set_object_xattr(object, auxdata);
680 if (ret < 0)
681 goto check_error;
682 } else {
683 /* always update the atime on an object we've just looked up
684 * (this is used to keep track of culling, and atimes are only
685 * updated by read, write and readdir but not lookup or
686 * open) */
687 path.dentry = next;
688 touch_atime(&path);
689 }
690
691 /* open a file interface onto a data file */
692 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
693 if (d_is_reg(object->dentry)) {
694 const struct address_space_operations *aops;
695
696 ret = -EPERM;
697 aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
698 if (!aops->bmap)
699 goto check_error;
700 if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
701 goto check_error;
702
703 object->backer = object->dentry;
704 } else {
705 BUG(); // TODO: open file in data-class subdir
706 }
707 }
708
709 object->new = 0;
710 fscache_obtained_object(&object->fscache);
711
712 _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
713 return 0;
714
715 no_space_error:
716 fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
717 create_error:
718 _debug("create error %d", ret);
719 if (ret == -EIO)
720 cachefiles_io_error(cache, "Create/mkdir failed");
721 goto error;
722
723 mark_active_timed_out:
724 _debug("mark active timed out");
725 goto release_dentry;
726
727 check_error:
728 _debug("check error %d", ret);
729 cachefiles_mark_object_inactive(
730 cache, object, d_backing_inode(object->dentry)->i_blocks);
731 release_dentry:
732 dput(object->dentry);
733 object->dentry = NULL;
734 goto error_out;
735
736 delete_error:
737 _debug("delete error %d", ret);
738 goto error_out2;
739
740 lookup_error:
741 _debug("lookup error %ld", PTR_ERR(next));
742 ret = PTR_ERR(next);
743 if (ret == -EIO)
744 cachefiles_io_error(cache, "Lookup failed");
745 next = NULL;
746 error:
747 inode_unlock(d_inode(dir));
748 dput(next);
749 error_out2:
750 dput(dir);
751 error_out:
752 _leave(" = error %d", -ret);
753 return ret;
754 }
755
756 /*
757 * get a subdirectory
758 */
cachefiles_get_directory(struct cachefiles_cache * cache,struct dentry * dir,const char * dirname)759 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
760 struct dentry *dir,
761 const char *dirname)
762 {
763 struct dentry *subdir;
764 unsigned long start;
765 struct path path;
766 int ret;
767
768 _enter(",,%s", dirname);
769
770 /* search the current directory for the element name */
771 inode_lock(d_inode(dir));
772
773 retry:
774 start = jiffies;
775 subdir = lookup_one_len(dirname, dir, strlen(dirname));
776 cachefiles_hist(cachefiles_lookup_histogram, start);
777 if (IS_ERR(subdir)) {
778 if (PTR_ERR(subdir) == -ENOMEM)
779 goto nomem_d_alloc;
780 goto lookup_error;
781 }
782
783 _debug("subdir -> %p %s",
784 subdir, d_backing_inode(subdir) ? "positive" : "negative");
785
786 /* we need to create the subdir if it doesn't exist yet */
787 if (d_is_negative(subdir)) {
788 ret = cachefiles_has_space(cache, 1, 0);
789 if (ret < 0)
790 goto mkdir_error;
791
792 _debug("attempt mkdir");
793
794 path.mnt = cache->mnt;
795 path.dentry = dir;
796 ret = security_path_mkdir(&path, subdir, 0700);
797 if (ret < 0)
798 goto mkdir_error;
799 ret = vfs_mkdir(d_inode(dir), subdir, 0700);
800 if (ret < 0)
801 goto mkdir_error;
802
803 if (unlikely(d_unhashed(subdir))) {
804 dput(subdir);
805 goto retry;
806 }
807 ASSERT(d_backing_inode(subdir));
808
809 _debug("mkdir -> %p{%p{ino=%lu}}",
810 subdir,
811 d_backing_inode(subdir),
812 d_backing_inode(subdir)->i_ino);
813 }
814
815 inode_unlock(d_inode(dir));
816
817 /* we need to make sure the subdir is a directory */
818 ASSERT(d_backing_inode(subdir));
819
820 if (!d_can_lookup(subdir)) {
821 pr_err("%s is not a directory\n", dirname);
822 ret = -EIO;
823 goto check_error;
824 }
825
826 ret = -EPERM;
827 if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
828 !d_backing_inode(subdir)->i_op->lookup ||
829 !d_backing_inode(subdir)->i_op->mkdir ||
830 !d_backing_inode(subdir)->i_op->create ||
831 !d_backing_inode(subdir)->i_op->rename ||
832 !d_backing_inode(subdir)->i_op->rmdir ||
833 !d_backing_inode(subdir)->i_op->unlink)
834 goto check_error;
835
836 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
837 return subdir;
838
839 check_error:
840 dput(subdir);
841 _leave(" = %d [check]", ret);
842 return ERR_PTR(ret);
843
844 mkdir_error:
845 inode_unlock(d_inode(dir));
846 dput(subdir);
847 pr_err("mkdir %s failed with error %d\n", dirname, ret);
848 return ERR_PTR(ret);
849
850 lookup_error:
851 inode_unlock(d_inode(dir));
852 ret = PTR_ERR(subdir);
853 pr_err("Lookup %s failed with error %d\n", dirname, ret);
854 return ERR_PTR(ret);
855
856 nomem_d_alloc:
857 inode_unlock(d_inode(dir));
858 _leave(" = -ENOMEM");
859 return ERR_PTR(-ENOMEM);
860 }
861
862 /*
863 * find out if an object is in use or not
864 * - if finds object and it's not in use:
865 * - returns a pointer to the object and a reference on it
866 * - returns with the directory locked
867 */
cachefiles_check_active(struct cachefiles_cache * cache,struct dentry * dir,char * filename)868 static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
869 struct dentry *dir,
870 char *filename)
871 {
872 struct cachefiles_object *object;
873 struct rb_node *_n;
874 struct dentry *victim;
875 unsigned long start;
876 int ret;
877
878 //_enter(",%pd/,%s",
879 // dir, filename);
880
881 /* look up the victim */
882 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
883
884 start = jiffies;
885 victim = lookup_one_len(filename, dir, strlen(filename));
886 cachefiles_hist(cachefiles_lookup_histogram, start);
887 if (IS_ERR(victim))
888 goto lookup_error;
889
890 //_debug("victim -> %p %s",
891 // victim, d_backing_inode(victim) ? "positive" : "negative");
892
893 /* if the object is no longer there then we probably retired the object
894 * at the netfs's request whilst the cull was in progress
895 */
896 if (d_is_negative(victim)) {
897 inode_unlock(d_inode(dir));
898 dput(victim);
899 _leave(" = -ENOENT [absent]");
900 return ERR_PTR(-ENOENT);
901 }
902
903 /* check to see if we're using this object */
904 read_lock(&cache->active_lock);
905
906 _n = cache->active_nodes.rb_node;
907
908 while (_n) {
909 object = rb_entry(_n, struct cachefiles_object, active_node);
910
911 if (object->dentry > victim)
912 _n = _n->rb_left;
913 else if (object->dentry < victim)
914 _n = _n->rb_right;
915 else
916 goto object_in_use;
917 }
918
919 read_unlock(&cache->active_lock);
920
921 //_leave(" = %p", victim);
922 return victim;
923
924 object_in_use:
925 read_unlock(&cache->active_lock);
926 inode_unlock(d_inode(dir));
927 dput(victim);
928 //_leave(" = -EBUSY [in use]");
929 return ERR_PTR(-EBUSY);
930
931 lookup_error:
932 inode_unlock(d_inode(dir));
933 ret = PTR_ERR(victim);
934 if (ret == -ENOENT) {
935 /* file or dir now absent - probably retired by netfs */
936 _leave(" = -ESTALE [absent]");
937 return ERR_PTR(-ESTALE);
938 }
939
940 if (ret == -EIO) {
941 cachefiles_io_error(cache, "Lookup failed");
942 } else if (ret != -ENOMEM) {
943 pr_err("Internal error: %d\n", ret);
944 ret = -EIO;
945 }
946
947 _leave(" = %d", ret);
948 return ERR_PTR(ret);
949 }
950
951 /*
952 * cull an object if it's not in use
953 * - called only by cache manager daemon
954 */
cachefiles_cull(struct cachefiles_cache * cache,struct dentry * dir,char * filename)955 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
956 char *filename)
957 {
958 struct dentry *victim;
959 int ret;
960
961 _enter(",%pd/,%s", dir, filename);
962
963 victim = cachefiles_check_active(cache, dir, filename);
964 if (IS_ERR(victim))
965 return PTR_ERR(victim);
966
967 _debug("victim -> %p %s",
968 victim, d_backing_inode(victim) ? "positive" : "negative");
969
970 /* okay... the victim is not being used so we can cull it
971 * - start by marking it as stale
972 */
973 _debug("victim is cullable");
974
975 ret = cachefiles_remove_object_xattr(cache, victim);
976 if (ret < 0)
977 goto error_unlock;
978
979 /* actually remove the victim (drops the dir mutex) */
980 _debug("bury");
981
982 ret = cachefiles_bury_object(cache, NULL, dir, victim, false,
983 FSCACHE_OBJECT_WAS_CULLED);
984 if (ret < 0)
985 goto error;
986
987 dput(victim);
988 _leave(" = 0");
989 return 0;
990
991 error_unlock:
992 inode_unlock(d_inode(dir));
993 error:
994 dput(victim);
995 if (ret == -ENOENT) {
996 /* file or dir now absent - probably retired by netfs */
997 _leave(" = -ESTALE [absent]");
998 return -ESTALE;
999 }
1000
1001 if (ret != -ENOMEM) {
1002 pr_err("Internal error: %d\n", ret);
1003 ret = -EIO;
1004 }
1005
1006 _leave(" = %d", ret);
1007 return ret;
1008 }
1009
1010 /*
1011 * find out if an object is in use or not
1012 * - called only by cache manager daemon
1013 * - returns -EBUSY or 0 to indicate whether an object is in use or not
1014 */
cachefiles_check_in_use(struct cachefiles_cache * cache,struct dentry * dir,char * filename)1015 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
1016 char *filename)
1017 {
1018 struct dentry *victim;
1019
1020 //_enter(",%pd/,%s",
1021 // dir, filename);
1022
1023 victim = cachefiles_check_active(cache, dir, filename);
1024 if (IS_ERR(victim))
1025 return PTR_ERR(victim);
1026
1027 inode_unlock(d_inode(dir));
1028 dput(victim);
1029 //_leave(" = 0");
1030 return 0;
1031 }
1032