1 /* AFS server record management
2  *
3  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include "afs_fs.h"
15 #include "internal.h"
16 
17 static unsigned afs_server_gc_delay = 10;	/* Server record timeout in seconds */
18 static unsigned afs_server_update_delay = 30;	/* Time till VLDB recheck in secs */
19 
afs_inc_servers_outstanding(struct afs_net * net)20 static void afs_inc_servers_outstanding(struct afs_net *net)
21 {
22 	atomic_inc(&net->servers_outstanding);
23 }
24 
afs_dec_servers_outstanding(struct afs_net * net)25 static void afs_dec_servers_outstanding(struct afs_net *net)
26 {
27 	if (atomic_dec_and_test(&net->servers_outstanding))
28 		wake_up_var(&net->servers_outstanding);
29 }
30 
31 /*
32  * Find a server by one of its addresses.
33  */
afs_find_server(struct afs_net * net,const struct sockaddr_rxrpc * srx)34 struct afs_server *afs_find_server(struct afs_net *net,
35 				   const struct sockaddr_rxrpc *srx)
36 {
37 	const struct afs_addr_list *alist;
38 	struct afs_server *server = NULL;
39 	unsigned int i;
40 	int seq = 0, diff;
41 
42 	rcu_read_lock();
43 
44 	do {
45 		if (server)
46 			afs_put_server(net, server);
47 		server = NULL;
48 		read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
49 
50 		if (srx->transport.family == AF_INET6) {
51 			const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
52 			hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
53 				alist = rcu_dereference(server->addresses);
54 				for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
55 					b = &alist->addrs[i].transport.sin6;
56 					diff = ((u16 __force)a->sin6_port -
57 						(u16 __force)b->sin6_port);
58 					if (diff == 0)
59 						diff = memcmp(&a->sin6_addr,
60 							      &b->sin6_addr,
61 							      sizeof(struct in6_addr));
62 					if (diff == 0)
63 						goto found;
64 				}
65 			}
66 		} else {
67 			const struct sockaddr_in *a = &srx->transport.sin, *b;
68 			hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
69 				alist = rcu_dereference(server->addresses);
70 				for (i = 0; i < alist->nr_ipv4; i++) {
71 					b = &alist->addrs[i].transport.sin;
72 					diff = ((u16 __force)a->sin_port -
73 						(u16 __force)b->sin_port);
74 					if (diff == 0)
75 						diff = ((u32 __force)a->sin_addr.s_addr -
76 							(u32 __force)b->sin_addr.s_addr);
77 					if (diff == 0)
78 						goto found;
79 				}
80 			}
81 		}
82 
83 		server = NULL;
84 	found:
85 		if (server && !atomic_inc_not_zero(&server->usage))
86 			server = NULL;
87 
88 	} while (need_seqretry(&net->fs_addr_lock, seq));
89 
90 	done_seqretry(&net->fs_addr_lock, seq);
91 
92 	rcu_read_unlock();
93 	return server;
94 }
95 
96 /*
97  * Look up a server by its UUID
98  */
afs_find_server_by_uuid(struct afs_net * net,const uuid_t * uuid)99 struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
100 {
101 	struct afs_server *server = NULL;
102 	struct rb_node *p;
103 	int diff, seq = 0;
104 
105 	_enter("%pU", uuid);
106 
107 	do {
108 		/* Unfortunately, rbtree walking doesn't give reliable results
109 		 * under just the RCU read lock, so we have to check for
110 		 * changes.
111 		 */
112 		if (server)
113 			afs_put_server(net, server);
114 		server = NULL;
115 
116 		read_seqbegin_or_lock(&net->fs_lock, &seq);
117 
118 		p = net->fs_servers.rb_node;
119 		while (p) {
120 			server = rb_entry(p, struct afs_server, uuid_rb);
121 
122 			diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
123 			if (diff < 0) {
124 				p = p->rb_left;
125 			} else if (diff > 0) {
126 				p = p->rb_right;
127 			} else {
128 				afs_get_server(server);
129 				break;
130 			}
131 
132 			server = NULL;
133 		}
134 	} while (need_seqretry(&net->fs_lock, seq));
135 
136 	done_seqretry(&net->fs_lock, seq);
137 
138 	_leave(" = %p", server);
139 	return server;
140 }
141 
142 /*
143  * Install a server record in the namespace tree
144  */
afs_install_server(struct afs_net * net,struct afs_server * candidate)145 static struct afs_server *afs_install_server(struct afs_net *net,
146 					     struct afs_server *candidate)
147 {
148 	const struct afs_addr_list *alist;
149 	struct afs_server *server;
150 	struct rb_node **pp, *p;
151 	int ret = -EEXIST, diff;
152 
153 	_enter("%p", candidate);
154 
155 	write_seqlock(&net->fs_lock);
156 
157 	/* Firstly install the server in the UUID lookup tree */
158 	pp = &net->fs_servers.rb_node;
159 	p = NULL;
160 	while (*pp) {
161 		p = *pp;
162 		_debug("- consider %p", p);
163 		server = rb_entry(p, struct afs_server, uuid_rb);
164 		diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
165 		if (diff < 0)
166 			pp = &(*pp)->rb_left;
167 		else if (diff > 0)
168 			pp = &(*pp)->rb_right;
169 		else
170 			goto exists;
171 	}
172 
173 	server = candidate;
174 	rb_link_node(&server->uuid_rb, p, pp);
175 	rb_insert_color(&server->uuid_rb, &net->fs_servers);
176 	hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
177 
178 	write_seqlock(&net->fs_addr_lock);
179 	alist = rcu_dereference_protected(server->addresses,
180 					  lockdep_is_held(&net->fs_addr_lock.lock));
181 
182 	/* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
183 	 * it in the IPv4 and/or IPv6 reverse-map lists.
184 	 *
185 	 * TODO: For speed we want to use something other than a flat list
186 	 * here; even sorting the list in terms of lowest address would help a
187 	 * bit, but anything we might want to do gets messy and memory
188 	 * intensive.
189 	 */
190 	if (alist->nr_ipv4 > 0)
191 		hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
192 	if (alist->nr_addrs > alist->nr_ipv4)
193 		hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
194 
195 	write_sequnlock(&net->fs_addr_lock);
196 	ret = 0;
197 
198 exists:
199 	afs_get_server(server);
200 	write_sequnlock(&net->fs_lock);
201 	return server;
202 }
203 
204 /*
205  * allocate a new server record
206  */
afs_alloc_server(struct afs_net * net,const uuid_t * uuid,struct afs_addr_list * alist)207 static struct afs_server *afs_alloc_server(struct afs_net *net,
208 					   const uuid_t *uuid,
209 					   struct afs_addr_list *alist)
210 {
211 	struct afs_server *server;
212 
213 	_enter("");
214 
215 	server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
216 	if (!server)
217 		goto enomem;
218 
219 	atomic_set(&server->usage, 1);
220 	RCU_INIT_POINTER(server->addresses, alist);
221 	server->addr_version = alist->version;
222 	server->uuid = *uuid;
223 	server->flags = (1UL << AFS_SERVER_FL_NEW);
224 	server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
225 	rwlock_init(&server->fs_lock);
226 	INIT_HLIST_HEAD(&server->cb_volumes);
227 	rwlock_init(&server->cb_break_lock);
228 
229 	afs_inc_servers_outstanding(net);
230 	_leave(" = %p", server);
231 	return server;
232 
233 enomem:
234 	_leave(" = NULL [nomem]");
235 	return NULL;
236 }
237 
238 /*
239  * Look up an address record for a server
240  */
afs_vl_lookup_addrs(struct afs_cell * cell,struct key * key,const uuid_t * uuid)241 static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
242 						 struct key *key, const uuid_t *uuid)
243 {
244 	struct afs_addr_cursor ac;
245 	struct afs_addr_list *alist;
246 	int ret;
247 
248 	ret = afs_set_vl_cursor(&ac, cell);
249 	if (ret < 0)
250 		return ERR_PTR(ret);
251 
252 	while (afs_iterate_addresses(&ac)) {
253 		if (test_bit(ac.index, &ac.alist->yfs))
254 			alist = afs_yfsvl_get_endpoints(cell->net, &ac, key, uuid);
255 		else
256 			alist = afs_vl_get_addrs_u(cell->net, &ac, key, uuid);
257 		switch (ac.error) {
258 		case 0:
259 			afs_end_cursor(&ac);
260 			return alist;
261 		case -ECONNABORTED:
262 			ac.error = afs_abort_to_error(ac.abort_code);
263 			goto error;
264 		case -ENOMEM:
265 		case -ENONET:
266 			goto error;
267 		case -ENETUNREACH:
268 		case -EHOSTUNREACH:
269 		case -ECONNREFUSED:
270 			break;
271 		default:
272 			ac.error = -EIO;
273 			goto error;
274 		}
275 	}
276 
277 error:
278 	return ERR_PTR(afs_end_cursor(&ac));
279 }
280 
281 /*
282  * Get or create a fileserver record.
283  */
afs_lookup_server(struct afs_cell * cell,struct key * key,const uuid_t * uuid)284 struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
285 				     const uuid_t *uuid)
286 {
287 	struct afs_addr_list *alist;
288 	struct afs_server *server, *candidate;
289 
290 	_enter("%p,%pU", cell->net, uuid);
291 
292 	server = afs_find_server_by_uuid(cell->net, uuid);
293 	if (server)
294 		return server;
295 
296 	alist = afs_vl_lookup_addrs(cell, key, uuid);
297 	if (IS_ERR(alist))
298 		return ERR_CAST(alist);
299 
300 	candidate = afs_alloc_server(cell->net, uuid, alist);
301 	if (!candidate) {
302 		afs_put_addrlist(alist);
303 		return ERR_PTR(-ENOMEM);
304 	}
305 
306 	server = afs_install_server(cell->net, candidate);
307 	if (server != candidate) {
308 		afs_put_addrlist(alist);
309 		kfree(candidate);
310 	}
311 
312 	_leave(" = %p{%d}", server, atomic_read(&server->usage));
313 	return server;
314 }
315 
316 /*
317  * Set the server timer to fire after a given delay, assuming it's not already
318  * set for an earlier time.
319  */
afs_set_server_timer(struct afs_net * net,time64_t delay)320 static void afs_set_server_timer(struct afs_net *net, time64_t delay)
321 {
322 	if (net->live) {
323 		afs_inc_servers_outstanding(net);
324 		if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
325 			afs_dec_servers_outstanding(net);
326 	}
327 }
328 
329 /*
330  * Server management timer.  We have an increment on fs_outstanding that we
331  * need to pass along to the work item.
332  */
afs_servers_timer(struct timer_list * timer)333 void afs_servers_timer(struct timer_list *timer)
334 {
335 	struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
336 
337 	_enter("");
338 	if (!queue_work(afs_wq, &net->fs_manager))
339 		afs_dec_servers_outstanding(net);
340 }
341 
342 /*
343  * Release a reference on a server record.
344  */
afs_put_server(struct afs_net * net,struct afs_server * server)345 void afs_put_server(struct afs_net *net, struct afs_server *server)
346 {
347 	unsigned int usage;
348 
349 	if (!server)
350 		return;
351 
352 	server->put_time = ktime_get_real_seconds();
353 
354 	usage = atomic_dec_return(&server->usage);
355 
356 	_enter("{%u}", usage);
357 
358 	if (likely(usage > 0))
359 		return;
360 
361 	afs_set_server_timer(net, afs_server_gc_delay);
362 }
363 
afs_server_rcu(struct rcu_head * rcu)364 static void afs_server_rcu(struct rcu_head *rcu)
365 {
366 	struct afs_server *server = container_of(rcu, struct afs_server, rcu);
367 
368 	afs_put_addrlist(rcu_access_pointer(server->addresses));
369 	kfree(server);
370 }
371 
372 /*
373  * destroy a dead server
374  */
afs_destroy_server(struct afs_net * net,struct afs_server * server)375 static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
376 {
377 	struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
378 	struct afs_addr_cursor ac = {
379 		.alist	= alist,
380 		.start	= alist->index,
381 		.index	= 0,
382 		.addr	= &alist->addrs[alist->index],
383 		.error	= 0,
384 	};
385 	_enter("%p", server);
386 
387 	if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
388 		afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
389 
390 	call_rcu(&server->rcu, afs_server_rcu);
391 	afs_dec_servers_outstanding(net);
392 }
393 
394 /*
395  * Garbage collect any expired servers.
396  */
afs_gc_servers(struct afs_net * net,struct afs_server * gc_list)397 static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
398 {
399 	struct afs_server *server;
400 	bool deleted;
401 	int usage;
402 
403 	while ((server = gc_list)) {
404 		gc_list = server->gc_next;
405 
406 		write_seqlock(&net->fs_lock);
407 		usage = 1;
408 		deleted = atomic_try_cmpxchg(&server->usage, &usage, 0);
409 		if (deleted) {
410 			rb_erase(&server->uuid_rb, &net->fs_servers);
411 			hlist_del_rcu(&server->proc_link);
412 		}
413 		write_sequnlock(&net->fs_lock);
414 
415 		if (deleted) {
416 			write_seqlock(&net->fs_addr_lock);
417 			if (!hlist_unhashed(&server->addr4_link))
418 				hlist_del_rcu(&server->addr4_link);
419 			if (!hlist_unhashed(&server->addr6_link))
420 				hlist_del_rcu(&server->addr6_link);
421 			write_sequnlock(&net->fs_addr_lock);
422 			afs_destroy_server(net, server);
423 		}
424 	}
425 }
426 
427 /*
428  * Manage the records of servers known to be within a network namespace.  This
429  * includes garbage collecting unused servers.
430  *
431  * Note also that we were given an increment on net->servers_outstanding by
432  * whoever queued us that we need to deal with before returning.
433  */
afs_manage_servers(struct work_struct * work)434 void afs_manage_servers(struct work_struct *work)
435 {
436 	struct afs_net *net = container_of(work, struct afs_net, fs_manager);
437 	struct afs_server *gc_list = NULL;
438 	struct rb_node *cursor;
439 	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
440 	bool purging = !net->live;
441 
442 	_enter("");
443 
444 	/* Trawl the server list looking for servers that have expired from
445 	 * lack of use.
446 	 */
447 	read_seqlock_excl(&net->fs_lock);
448 
449 	for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
450 		struct afs_server *server =
451 			rb_entry(cursor, struct afs_server, uuid_rb);
452 		int usage = atomic_read(&server->usage);
453 
454 		_debug("manage %pU %u", &server->uuid, usage);
455 
456 		ASSERTCMP(usage, >=, 1);
457 		ASSERTIFCMP(purging, usage, ==, 1);
458 
459 		if (usage == 1) {
460 			time64_t expire_at = server->put_time;
461 
462 			if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
463 			    !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
464 				expire_at += afs_server_gc_delay;
465 			if (purging || expire_at <= now) {
466 				server->gc_next = gc_list;
467 				gc_list = server;
468 			} else if (expire_at < next_manage) {
469 				next_manage = expire_at;
470 			}
471 		}
472 	}
473 
474 	read_sequnlock_excl(&net->fs_lock);
475 
476 	/* Update the timer on the way out.  We have to pass an increment on
477 	 * servers_outstanding in the namespace that we are in to the timer or
478 	 * the work scheduler.
479 	 */
480 	if (!purging && next_manage < TIME64_MAX) {
481 		now = ktime_get_real_seconds();
482 
483 		if (next_manage - now <= 0) {
484 			if (queue_work(afs_wq, &net->fs_manager))
485 				afs_inc_servers_outstanding(net);
486 		} else {
487 			afs_set_server_timer(net, next_manage - now);
488 		}
489 	}
490 
491 	afs_gc_servers(net, gc_list);
492 
493 	afs_dec_servers_outstanding(net);
494 	_leave(" [%d]", atomic_read(&net->servers_outstanding));
495 }
496 
afs_queue_server_manager(struct afs_net * net)497 static void afs_queue_server_manager(struct afs_net *net)
498 {
499 	afs_inc_servers_outstanding(net);
500 	if (!queue_work(afs_wq, &net->fs_manager))
501 		afs_dec_servers_outstanding(net);
502 }
503 
504 /*
505  * Purge list of servers.
506  */
afs_purge_servers(struct afs_net * net)507 void afs_purge_servers(struct afs_net *net)
508 {
509 	_enter("");
510 
511 	if (del_timer_sync(&net->fs_timer))
512 		atomic_dec(&net->servers_outstanding);
513 
514 	afs_queue_server_manager(net);
515 
516 	_debug("wait");
517 	wait_var_event(&net->servers_outstanding,
518 		       !atomic_read(&net->servers_outstanding));
519 	_leave("");
520 }
521 
522 /*
523  * Probe a fileserver to find its capabilities.
524  *
525  * TODO: Try service upgrade.
526  */
afs_do_probe_fileserver(struct afs_fs_cursor * fc)527 static bool afs_do_probe_fileserver(struct afs_fs_cursor *fc)
528 {
529 	_enter("");
530 
531 	fc->ac.addr = NULL;
532 	fc->ac.start = READ_ONCE(fc->ac.alist->index);
533 	fc->ac.index = fc->ac.start;
534 	fc->ac.error = 0;
535 	fc->ac.begun = false;
536 
537 	while (afs_iterate_addresses(&fc->ac)) {
538 		afs_fs_get_capabilities(afs_v2net(fc->vnode), fc->cbi->server,
539 					&fc->ac, fc->key);
540 		switch (fc->ac.error) {
541 		case 0:
542 			afs_end_cursor(&fc->ac);
543 			set_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags);
544 			return true;
545 		case -ECONNABORTED:
546 			fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
547 			goto error;
548 		case -ENOMEM:
549 		case -ENONET:
550 			goto error;
551 		case -ENETUNREACH:
552 		case -EHOSTUNREACH:
553 		case -ECONNREFUSED:
554 		case -ETIMEDOUT:
555 		case -ETIME:
556 			break;
557 		default:
558 			fc->ac.error = -EIO;
559 			goto error;
560 		}
561 	}
562 
563 error:
564 	afs_end_cursor(&fc->ac);
565 	return false;
566 }
567 
568 /*
569  * If we haven't already, try probing the fileserver to get its capabilities.
570  * We try not to instigate parallel probes, but it's possible that the parallel
571  * probes will fail due to authentication failure when ours would succeed.
572  *
573  * TODO: Try sending an anonymous probe if an authenticated probe fails.
574  */
afs_probe_fileserver(struct afs_fs_cursor * fc)575 bool afs_probe_fileserver(struct afs_fs_cursor *fc)
576 {
577 	bool success;
578 	int ret, retries = 0;
579 
580 	_enter("");
581 
582 retry:
583 	if (test_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags)) {
584 		_leave(" = t");
585 		return true;
586 	}
587 
588 	if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags)) {
589 		success = afs_do_probe_fileserver(fc);
590 		clear_bit_unlock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags);
591 		wake_up_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING);
592 		_leave(" = t");
593 		return success;
594 	}
595 
596 	_debug("wait");
597 	ret = wait_on_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING,
598 			  TASK_INTERRUPTIBLE);
599 	if (ret == -ERESTARTSYS) {
600 		fc->ac.error = ret;
601 		_leave(" = f [%d]", ret);
602 		return false;
603 	}
604 
605 	retries++;
606 	if (retries == 4) {
607 		fc->ac.error = -ESTALE;
608 		_leave(" = f [stale]");
609 		return false;
610 	}
611 	_debug("retry");
612 	goto retry;
613 }
614 
615 /*
616  * Get an update for a server's address list.
617  */
afs_update_server_record(struct afs_fs_cursor * fc,struct afs_server * server)618 static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
619 {
620 	struct afs_addr_list *alist, *discard;
621 
622 	_enter("");
623 
624 	alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
625 				    &server->uuid);
626 	if (IS_ERR(alist)) {
627 		fc->ac.error = PTR_ERR(alist);
628 		_leave(" = f [%d]", fc->ac.error);
629 		return false;
630 	}
631 
632 	discard = alist;
633 	if (server->addr_version != alist->version) {
634 		write_lock(&server->fs_lock);
635 		discard = rcu_dereference_protected(server->addresses,
636 						    lockdep_is_held(&server->fs_lock));
637 		rcu_assign_pointer(server->addresses, alist);
638 		server->addr_version = alist->version;
639 		write_unlock(&server->fs_lock);
640 	}
641 
642 	server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
643 	afs_put_addrlist(discard);
644 	_leave(" = t");
645 	return true;
646 }
647 
648 /*
649  * See if a server's address list needs updating.
650  */
afs_check_server_record(struct afs_fs_cursor * fc,struct afs_server * server)651 bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
652 {
653 	time64_t now = ktime_get_real_seconds();
654 	long diff;
655 	bool success;
656 	int ret, retries = 0;
657 
658 	_enter("");
659 
660 	ASSERT(server);
661 
662 retry:
663 	diff = READ_ONCE(server->update_at) - now;
664 	if (diff > 0) {
665 		_leave(" = t [not now %ld]", diff);
666 		return true;
667 	}
668 
669 	if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
670 		success = afs_update_server_record(fc, server);
671 		clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
672 		wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
673 		_leave(" = %d", success);
674 		return success;
675 	}
676 
677 	ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
678 			  TASK_INTERRUPTIBLE);
679 	if (ret == -ERESTARTSYS) {
680 		fc->ac.error = ret;
681 		_leave(" = f [intr]");
682 		return false;
683 	}
684 
685 	retries++;
686 	if (retries == 4) {
687 		_leave(" = f [stale]");
688 		ret = -ESTALE;
689 		return false;
690 	}
691 	goto retry;
692 }
693