1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) 2005-2010 Red Hat, Inc.  All rights reserved.
5 **
6 **  This copyrighted material is made available to anyone wishing to use,
7 **  modify, copy, or redistribute it subject to the terms and conditions
8 **  of the GNU General Public License v.2.
9 **
10 *******************************************************************************
11 ******************************************************************************/
12 
13 /* Central locking logic has four stages:
14 
15    dlm_lock()
16    dlm_unlock()
17 
18    request_lock(ls, lkb)
19    convert_lock(ls, lkb)
20    unlock_lock(ls, lkb)
21    cancel_lock(ls, lkb)
22 
23    _request_lock(r, lkb)
24    _convert_lock(r, lkb)
25    _unlock_lock(r, lkb)
26    _cancel_lock(r, lkb)
27 
28    do_request(r, lkb)
29    do_convert(r, lkb)
30    do_unlock(r, lkb)
31    do_cancel(r, lkb)
32 
33    Stage 1 (lock, unlock) is mainly about checking input args and
34    splitting into one of the four main operations:
35 
36        dlm_lock          = request_lock
37        dlm_lock+CONVERT  = convert_lock
38        dlm_unlock        = unlock_lock
39        dlm_unlock+CANCEL = cancel_lock
40 
41    Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42    provided to the next stage.
43 
44    Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45    When remote, it calls send_xxxx(), when local it calls do_xxxx().
46 
47    Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
48    given rsb and lkb and queues callbacks.
49 
50    For remote operations, send_xxxx() results in the corresponding do_xxxx()
51    function being executed on the remote node.  The connecting send/receive
52    calls on local (L) and remote (R) nodes:
53 
54    L: send_xxxx()              ->  R: receive_xxxx()
55                                    R: do_xxxx()
56    L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
57 */
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
63 #include "memory.h"
64 #include "lowcomms.h"
65 #include "requestqueue.h"
66 #include "util.h"
67 #include "dir.h"
68 #include "member.h"
69 #include "lockspace.h"
70 #include "ast.h"
71 #include "lock.h"
72 #include "rcom.h"
73 #include "recover.h"
74 #include "lvb_table.h"
75 #include "user.h"
76 #include "config.h"
77 
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 				    struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
94 
95 /*
96  * Lock compatibilty matrix - thanks Steve
97  * UN = Unlocked state. Not really a state, used as a flag
98  * PD = Padding. Used to make the matrix a nice power of two in size
99  * Other states are the same as the VMS DLM.
100  * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
101  */
102 
103 static const int __dlm_compat_matrix[8][8] = {
104       /* UN NL CR CW PR PW EX PD */
105         {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
106         {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
107         {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
108         {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
109         {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
110         {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
111         {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
112         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
113 };
114 
115 /*
116  * This defines the direction of transfer of LVB data.
117  * Granted mode is the row; requested mode is the column.
118  * Usage: matrix[grmode+1][rqmode+1]
119  * 1 = LVB is returned to the caller
120  * 0 = LVB is written to the resource
121  * -1 = nothing happens to the LVB
122  */
123 
124 const int dlm_lvb_operations[8][8] = {
125         /* UN   NL  CR  CW  PR  PW  EX  PD*/
126         {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
127         {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
128         {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
129         {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
130         {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
131         {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
132         {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
133         {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
134 };
135 
136 #define modes_compat(gr, rq) \
137 	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
138 
dlm_modes_compat(int mode1,int mode2)139 int dlm_modes_compat(int mode1, int mode2)
140 {
141 	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
142 }
143 
144 /*
145  * Compatibility matrix for conversions with QUECVT set.
146  * Granted mode is the row; requested mode is the column.
147  * Usage: matrix[grmode+1][rqmode+1]
148  */
149 
150 static const int __quecvt_compat_matrix[8][8] = {
151       /* UN NL CR CW PR PW EX PD */
152         {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
153         {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
154         {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
155         {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
156         {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
157         {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
158         {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
159         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
160 };
161 
dlm_print_lkb(struct dlm_lkb * lkb)162 void dlm_print_lkb(struct dlm_lkb *lkb)
163 {
164 	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 	       "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 	       (unsigned long long)lkb->lkb_recover_seq);
170 }
171 
dlm_print_rsb(struct dlm_rsb * r)172 static void dlm_print_rsb(struct dlm_rsb *r)
173 {
174 	printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
175 	       "rlc %d name %s\n",
176 	       r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 	       r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
178 	       r->res_name);
179 }
180 
dlm_dump_rsb(struct dlm_rsb * r)181 void dlm_dump_rsb(struct dlm_rsb *r)
182 {
183 	struct dlm_lkb *lkb;
184 
185 	dlm_print_rsb(r);
186 
187 	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 	printk(KERN_ERR "rsb lookup list\n");
190 	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
191 		dlm_print_lkb(lkb);
192 	printk(KERN_ERR "rsb grant queue:\n");
193 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
194 		dlm_print_lkb(lkb);
195 	printk(KERN_ERR "rsb convert queue:\n");
196 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
197 		dlm_print_lkb(lkb);
198 	printk(KERN_ERR "rsb wait queue:\n");
199 	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
200 		dlm_print_lkb(lkb);
201 }
202 
203 /* Threads cannot use the lockspace while it's being recovered */
204 
dlm_lock_recovery(struct dlm_ls * ls)205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
206 {
207 	down_read(&ls->ls_in_recovery);
208 }
209 
dlm_unlock_recovery(struct dlm_ls * ls)210 void dlm_unlock_recovery(struct dlm_ls *ls)
211 {
212 	up_read(&ls->ls_in_recovery);
213 }
214 
dlm_lock_recovery_try(struct dlm_ls * ls)215 int dlm_lock_recovery_try(struct dlm_ls *ls)
216 {
217 	return down_read_trylock(&ls->ls_in_recovery);
218 }
219 
can_be_queued(struct dlm_lkb * lkb)220 static inline int can_be_queued(struct dlm_lkb *lkb)
221 {
222 	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
223 }
224 
force_blocking_asts(struct dlm_lkb * lkb)225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
226 {
227 	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
228 }
229 
is_demoted(struct dlm_lkb * lkb)230 static inline int is_demoted(struct dlm_lkb *lkb)
231 {
232 	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
233 }
234 
is_altmode(struct dlm_lkb * lkb)235 static inline int is_altmode(struct dlm_lkb *lkb)
236 {
237 	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
238 }
239 
is_granted(struct dlm_lkb * lkb)240 static inline int is_granted(struct dlm_lkb *lkb)
241 {
242 	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
243 }
244 
is_remote(struct dlm_rsb * r)245 static inline int is_remote(struct dlm_rsb *r)
246 {
247 	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 	return !!r->res_nodeid;
249 }
250 
is_process_copy(struct dlm_lkb * lkb)251 static inline int is_process_copy(struct dlm_lkb *lkb)
252 {
253 	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
254 }
255 
is_master_copy(struct dlm_lkb * lkb)256 static inline int is_master_copy(struct dlm_lkb *lkb)
257 {
258 	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
259 }
260 
middle_conversion(struct dlm_lkb * lkb)261 static inline int middle_conversion(struct dlm_lkb *lkb)
262 {
263 	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
265 		return 1;
266 	return 0;
267 }
268 
down_conversion(struct dlm_lkb * lkb)269 static inline int down_conversion(struct dlm_lkb *lkb)
270 {
271 	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272 }
273 
is_overlap_unlock(struct dlm_lkb * lkb)274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
275 {
276 	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
277 }
278 
is_overlap_cancel(struct dlm_lkb * lkb)279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
280 {
281 	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
282 }
283 
is_overlap(struct dlm_lkb * lkb)284 static inline int is_overlap(struct dlm_lkb *lkb)
285 {
286 	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 				  DLM_IFL_OVERLAP_CANCEL));
288 }
289 
queue_cast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
291 {
292 	if (is_master_copy(lkb))
293 		return;
294 
295 	del_timeout(lkb);
296 
297 	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
298 
299 	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 	   timeout caused the cancel then return -ETIMEDOUT */
301 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
303 		rv = -ETIMEDOUT;
304 	}
305 
306 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
308 		rv = -EDEADLK;
309 	}
310 
311 	dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
312 }
313 
queue_cast_overlap(struct dlm_rsb * r,struct dlm_lkb * lkb)314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
315 {
316 	queue_cast(r, lkb,
317 		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
318 }
319 
queue_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int rqmode)320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
321 {
322 	if (is_master_copy(lkb)) {
323 		send_bast(r, lkb, rqmode);
324 	} else {
325 		dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
326 	}
327 }
328 
329 /*
330  * Basic operations on rsb's and lkb's
331  */
332 
333 /* This is only called to add a reference when the code already holds
334    a valid reference to the rsb, so there's no need for locking. */
335 
hold_rsb(struct dlm_rsb * r)336 static inline void hold_rsb(struct dlm_rsb *r)
337 {
338 	kref_get(&r->res_ref);
339 }
340 
dlm_hold_rsb(struct dlm_rsb * r)341 void dlm_hold_rsb(struct dlm_rsb *r)
342 {
343 	hold_rsb(r);
344 }
345 
346 /* When all references to the rsb are gone it's transferred to
347    the tossed list for later disposal. */
348 
put_rsb(struct dlm_rsb * r)349 static void put_rsb(struct dlm_rsb *r)
350 {
351 	struct dlm_ls *ls = r->res_ls;
352 	uint32_t bucket = r->res_bucket;
353 
354 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 	kref_put(&r->res_ref, toss_rsb);
356 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
357 }
358 
dlm_put_rsb(struct dlm_rsb * r)359 void dlm_put_rsb(struct dlm_rsb *r)
360 {
361 	put_rsb(r);
362 }
363 
pre_rsb_struct(struct dlm_ls * ls)364 static int pre_rsb_struct(struct dlm_ls *ls)
365 {
366 	struct dlm_rsb *r1, *r2;
367 	int count = 0;
368 
369 	spin_lock(&ls->ls_new_rsb_spin);
370 	if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 		spin_unlock(&ls->ls_new_rsb_spin);
372 		return 0;
373 	}
374 	spin_unlock(&ls->ls_new_rsb_spin);
375 
376 	r1 = dlm_allocate_rsb(ls);
377 	r2 = dlm_allocate_rsb(ls);
378 
379 	spin_lock(&ls->ls_new_rsb_spin);
380 	if (r1) {
381 		list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 		ls->ls_new_rsb_count++;
383 	}
384 	if (r2) {
385 		list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 		ls->ls_new_rsb_count++;
387 	}
388 	count = ls->ls_new_rsb_count;
389 	spin_unlock(&ls->ls_new_rsb_spin);
390 
391 	if (!count)
392 		return -ENOMEM;
393 	return 0;
394 }
395 
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397    unlock any spinlocks, go back and call pre_rsb_struct again.
398    Otherwise, take an rsb off the list and return it. */
399 
get_rsb_struct(struct dlm_ls * ls,char * name,int len,struct dlm_rsb ** r_ret)400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 			  struct dlm_rsb **r_ret)
402 {
403 	struct dlm_rsb *r;
404 	int count;
405 
406 	spin_lock(&ls->ls_new_rsb_spin);
407 	if (list_empty(&ls->ls_new_rsb)) {
408 		count = ls->ls_new_rsb_count;
409 		spin_unlock(&ls->ls_new_rsb_spin);
410 		log_debug(ls, "find_rsb retry %d %d %s",
411 			  count, dlm_config.ci_new_rsb_count, name);
412 		return -EAGAIN;
413 	}
414 
415 	r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 	list_del(&r->res_hashchain);
417 	/* Convert the empty list_head to a NULL rb_node for tree usage: */
418 	memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 	ls->ls_new_rsb_count--;
420 	spin_unlock(&ls->ls_new_rsb_spin);
421 
422 	r->res_ls = ls;
423 	r->res_length = len;
424 	memcpy(r->res_name, name, len);
425 	mutex_init(&r->res_mutex);
426 
427 	INIT_LIST_HEAD(&r->res_lookup);
428 	INIT_LIST_HEAD(&r->res_grantqueue);
429 	INIT_LIST_HEAD(&r->res_convertqueue);
430 	INIT_LIST_HEAD(&r->res_waitqueue);
431 	INIT_LIST_HEAD(&r->res_root_list);
432 	INIT_LIST_HEAD(&r->res_recover_list);
433 
434 	*r_ret = r;
435 	return 0;
436 }
437 
rsb_cmp(struct dlm_rsb * r,const char * name,int nlen)438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
439 {
440 	char maxname[DLM_RESNAME_MAXLEN];
441 
442 	memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 	memcpy(maxname, name, nlen);
444 	return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
445 }
446 
dlm_search_rsb_tree(struct rb_root * tree,char * name,int len,struct dlm_rsb ** r_ret)447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 			struct dlm_rsb **r_ret)
449 {
450 	struct rb_node *node = tree->rb_node;
451 	struct dlm_rsb *r;
452 	int rc;
453 
454 	while (node) {
455 		r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 		rc = rsb_cmp(r, name, len);
457 		if (rc < 0)
458 			node = node->rb_left;
459 		else if (rc > 0)
460 			node = node->rb_right;
461 		else
462 			goto found;
463 	}
464 	*r_ret = NULL;
465 	return -EBADR;
466 
467  found:
468 	*r_ret = r;
469 	return 0;
470 }
471 
rsb_insert(struct dlm_rsb * rsb,struct rb_root * tree)472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
473 {
474 	struct rb_node **newn = &tree->rb_node;
475 	struct rb_node *parent = NULL;
476 	int rc;
477 
478 	while (*newn) {
479 		struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
480 					       res_hashnode);
481 
482 		parent = *newn;
483 		rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
484 		if (rc < 0)
485 			newn = &parent->rb_left;
486 		else if (rc > 0)
487 			newn = &parent->rb_right;
488 		else {
489 			log_print("rsb_insert match");
490 			dlm_dump_rsb(rsb);
491 			dlm_dump_rsb(cur);
492 			return -EEXIST;
493 		}
494 	}
495 
496 	rb_link_node(&rsb->res_hashnode, parent, newn);
497 	rb_insert_color(&rsb->res_hashnode, tree);
498 	return 0;
499 }
500 
501 /*
502  * Find rsb in rsbtbl and potentially create/add one
503  *
504  * Delaying the release of rsb's has a similar benefit to applications keeping
505  * NL locks on an rsb, but without the guarantee that the cached master value
506  * will still be valid when the rsb is reused.  Apps aren't always smart enough
507  * to keep NL locks on an rsb that they may lock again shortly; this can lead
508  * to excessive master lookups and removals if we don't delay the release.
509  *
510  * Searching for an rsb means looking through both the normal list and toss
511  * list.  When found on the toss list the rsb is moved to the normal list with
512  * ref count of 1; when found on normal list the ref count is incremented.
513  *
514  * rsb's on the keep list are being used locally and refcounted.
515  * rsb's on the toss list are not being used locally, and are not refcounted.
516  *
517  * The toss list rsb's were either
518  * - previously used locally but not any more (were on keep list, then
519  *   moved to toss list when last refcount dropped)
520  * - created and put on toss list as a directory record for a lookup
521  *   (we are the dir node for the res, but are not using the res right now,
522  *   but some other node is)
523  *
524  * The purpose of find_rsb() is to return a refcounted rsb for local use.
525  * So, if the given rsb is on the toss list, it is moved to the keep list
526  * before being returned.
527  *
528  * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529  * more refcounts exist, so the rsb is moved from the keep list to the
530  * toss list.
531  *
532  * rsb's on both keep and toss lists are used for doing a name to master
533  * lookups.  rsb's that are in use locally (and being refcounted) are on
534  * the keep list, rsb's that are not in use locally (not refcounted) and
535  * only exist for name/master lookups are on the toss list.
536  *
537  * rsb's on the toss list who's dir_nodeid is not local can have stale
538  * name/master mappings.  So, remote requests on such rsb's can potentially
539  * return with an error, which means the mapping is stale and needs to
540  * be updated with a new lookup.  (The idea behind MASTER UNCERTAIN and
541  * first_lkid is to keep only a single outstanding request on an rsb
542  * while that rsb has a potentially stale master.)
543  */
544 
find_rsb_dir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 			uint32_t hash, uint32_t b,
547 			int dir_nodeid, int from_nodeid,
548 			unsigned int flags, struct dlm_rsb **r_ret)
549 {
550 	struct dlm_rsb *r = NULL;
551 	int our_nodeid = dlm_our_nodeid();
552 	int from_local = 0;
553 	int from_other = 0;
554 	int from_dir = 0;
555 	int create = 0;
556 	int error;
557 
558 	if (flags & R_RECEIVE_REQUEST) {
559 		if (from_nodeid == dir_nodeid)
560 			from_dir = 1;
561 		else
562 			from_other = 1;
563 	} else if (flags & R_REQUEST) {
564 		from_local = 1;
565 	}
566 
567 	/*
568 	 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 	 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 	 * we're the new master.  Our local recovery may not have set
571 	 * res_master_nodeid to our_nodeid yet, so allow either.  Don't
572 	 * create the rsb; dlm_recover_process_copy() will handle EBADR
573 	 * by resending.
574 	 *
575 	 * If someone sends us a request, we are the dir node, and we do
576 	 * not find the rsb anywhere, then recreate it.  This happens if
577 	 * someone sends us a request after we have removed/freed an rsb
578 	 * from our toss list.  (They sent a request instead of lookup
579 	 * because they are using an rsb from their toss list.)
580 	 */
581 
582 	if (from_local || from_dir ||
583 	    (from_other && (dir_nodeid == our_nodeid))) {
584 		create = 1;
585 	}
586 
587  retry:
588 	if (create) {
589 		error = pre_rsb_struct(ls);
590 		if (error < 0)
591 			goto out;
592 	}
593 
594 	spin_lock(&ls->ls_rsbtbl[b].lock);
595 
596 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
597 	if (error)
598 		goto do_toss;
599 
600 	/*
601 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
602 	 */
603 
604 	kref_get(&r->res_ref);
605 	error = 0;
606 	goto out_unlock;
607 
608 
609  do_toss:
610 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
611 	if (error)
612 		goto do_new;
613 
614 	/*
615 	 * rsb found inactive (master_nodeid may be out of date unless
616 	 * we are the dir_nodeid or were the master)  No other thread
617 	 * is using this rsb because it's on the toss list, so we can
618 	 * look at or update res_master_nodeid without lock_rsb.
619 	 */
620 
621 	if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 		/* our rsb was not master, and another node (not the dir node)
623 		   has sent us a request */
624 		log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 			  from_nodeid, r->res_master_nodeid, dir_nodeid,
626 			  r->res_name);
627 		error = -ENOTBLK;
628 		goto out_unlock;
629 	}
630 
631 	if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 		/* don't think this should ever happen */
633 		log_error(ls, "find_rsb toss from_dir %d master %d",
634 			  from_nodeid, r->res_master_nodeid);
635 		dlm_print_rsb(r);
636 		/* fix it and go on */
637 		r->res_master_nodeid = our_nodeid;
638 		r->res_nodeid = 0;
639 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 		r->res_first_lkid = 0;
641 	}
642 
643 	if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 		/* Because we have held no locks on this rsb,
645 		   res_master_nodeid could have become stale. */
646 		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 		r->res_first_lkid = 0;
648 	}
649 
650 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
652 	goto out_unlock;
653 
654 
655  do_new:
656 	/*
657 	 * rsb not found
658 	 */
659 
660 	if (error == -EBADR && !create)
661 		goto out_unlock;
662 
663 	error = get_rsb_struct(ls, name, len, &r);
664 	if (error == -EAGAIN) {
665 		spin_unlock(&ls->ls_rsbtbl[b].lock);
666 		goto retry;
667 	}
668 	if (error)
669 		goto out_unlock;
670 
671 	r->res_hash = hash;
672 	r->res_bucket = b;
673 	r->res_dir_nodeid = dir_nodeid;
674 	kref_init(&r->res_ref);
675 
676 	if (from_dir) {
677 		/* want to see how often this happens */
678 		log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 			  from_nodeid, r->res_name);
680 		r->res_master_nodeid = our_nodeid;
681 		r->res_nodeid = 0;
682 		goto out_add;
683 	}
684 
685 	if (from_other && (dir_nodeid != our_nodeid)) {
686 		/* should never happen */
687 		log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 			  from_nodeid, dir_nodeid, our_nodeid, r->res_name);
689 		dlm_free_rsb(r);
690 		r = NULL;
691 		error = -ENOTBLK;
692 		goto out_unlock;
693 	}
694 
695 	if (from_other) {
696 		log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 			  from_nodeid, dir_nodeid, r->res_name);
698 	}
699 
700 	if (dir_nodeid == our_nodeid) {
701 		/* When we are the dir nodeid, we can set the master
702 		   node immediately */
703 		r->res_master_nodeid = our_nodeid;
704 		r->res_nodeid = 0;
705 	} else {
706 		/* set_master will send_lookup to dir_nodeid */
707 		r->res_master_nodeid = 0;
708 		r->res_nodeid = -1;
709 	}
710 
711  out_add:
712 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
713  out_unlock:
714 	spin_unlock(&ls->ls_rsbtbl[b].lock);
715  out:
716 	*r_ret = r;
717 	return error;
718 }
719 
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721    dlm_recover_locks) before we've made ourself master (in
722    dlm_recover_masters). */
723 
find_rsb_nodir(struct dlm_ls * ls,char * name,int len,uint32_t hash,uint32_t b,int dir_nodeid,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 			  uint32_t hash, uint32_t b,
726 			  int dir_nodeid, int from_nodeid,
727 			  unsigned int flags, struct dlm_rsb **r_ret)
728 {
729 	struct dlm_rsb *r = NULL;
730 	int our_nodeid = dlm_our_nodeid();
731 	int recover = (flags & R_RECEIVE_RECOVER);
732 	int error;
733 
734  retry:
735 	error = pre_rsb_struct(ls);
736 	if (error < 0)
737 		goto out;
738 
739 	spin_lock(&ls->ls_rsbtbl[b].lock);
740 
741 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
742 	if (error)
743 		goto do_toss;
744 
745 	/*
746 	 * rsb is active, so we can't check master_nodeid without lock_rsb.
747 	 */
748 
749 	kref_get(&r->res_ref);
750 	goto out_unlock;
751 
752 
753  do_toss:
754 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
755 	if (error)
756 		goto do_new;
757 
758 	/*
759 	 * rsb found inactive. No other thread is using this rsb because
760 	 * it's on the toss list, so we can look at or update
761 	 * res_master_nodeid without lock_rsb.
762 	 */
763 
764 	if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 		/* our rsb is not master, and another node has sent us a
766 		   request; this should never happen */
767 		log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 			  from_nodeid, r->res_master_nodeid, dir_nodeid);
769 		dlm_print_rsb(r);
770 		error = -ENOTBLK;
771 		goto out_unlock;
772 	}
773 
774 	if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 	    (dir_nodeid == our_nodeid)) {
776 		/* our rsb is not master, and we are dir; may as well fix it;
777 		   this should never happen */
778 		log_error(ls, "find_rsb toss our %d master %d dir %d",
779 			  our_nodeid, r->res_master_nodeid, dir_nodeid);
780 		dlm_print_rsb(r);
781 		r->res_master_nodeid = our_nodeid;
782 		r->res_nodeid = 0;
783 	}
784 
785 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
787 	goto out_unlock;
788 
789 
790  do_new:
791 	/*
792 	 * rsb not found
793 	 */
794 
795 	error = get_rsb_struct(ls, name, len, &r);
796 	if (error == -EAGAIN) {
797 		spin_unlock(&ls->ls_rsbtbl[b].lock);
798 		goto retry;
799 	}
800 	if (error)
801 		goto out_unlock;
802 
803 	r->res_hash = hash;
804 	r->res_bucket = b;
805 	r->res_dir_nodeid = dir_nodeid;
806 	r->res_master_nodeid = dir_nodeid;
807 	r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 	kref_init(&r->res_ref);
809 
810 	error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
811  out_unlock:
812 	spin_unlock(&ls->ls_rsbtbl[b].lock);
813  out:
814 	*r_ret = r;
815 	return error;
816 }
817 
find_rsb(struct dlm_ls * ls,char * name,int len,int from_nodeid,unsigned int flags,struct dlm_rsb ** r_ret)818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 		    unsigned int flags, struct dlm_rsb **r_ret)
820 {
821 	uint32_t hash, b;
822 	int dir_nodeid;
823 
824 	if (len > DLM_RESNAME_MAXLEN)
825 		return -EINVAL;
826 
827 	hash = jhash(name, len, 0);
828 	b = hash & (ls->ls_rsbtbl_size - 1);
829 
830 	dir_nodeid = dlm_hash2nodeid(ls, hash);
831 
832 	if (dlm_no_directory(ls))
833 		return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 				      from_nodeid, flags, r_ret);
835 	else
836 		return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 				      from_nodeid, flags, r_ret);
838 }
839 
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841    so we need to return an error or make ourself the master */
842 
validate_master_nodeid(struct dlm_ls * ls,struct dlm_rsb * r,int from_nodeid)843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
844 				  int from_nodeid)
845 {
846 	if (dlm_no_directory(ls)) {
847 		log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 			  from_nodeid, r->res_master_nodeid,
849 			  r->res_dir_nodeid);
850 		dlm_print_rsb(r);
851 		return -ENOTBLK;
852 	}
853 
854 	if (from_nodeid != r->res_dir_nodeid) {
855 		/* our rsb is not master, and another node (not the dir node)
856 	   	   has sent us a request.  this is much more common when our
857 	   	   master_nodeid is zero, so limit debug to non-zero.  */
858 
859 		if (r->res_master_nodeid) {
860 			log_debug(ls, "validate master from_other %d master %d "
861 				  "dir %d first %x %s", from_nodeid,
862 				  r->res_master_nodeid, r->res_dir_nodeid,
863 				  r->res_first_lkid, r->res_name);
864 		}
865 		return -ENOTBLK;
866 	} else {
867 		/* our rsb is not master, but the dir nodeid has sent us a
868 	   	   request; this could happen with master 0 / res_nodeid -1 */
869 
870 		if (r->res_master_nodeid) {
871 			log_error(ls, "validate master from_dir %d master %d "
872 				  "first %x %s",
873 				  from_nodeid, r->res_master_nodeid,
874 				  r->res_first_lkid, r->res_name);
875 		}
876 
877 		r->res_master_nodeid = dlm_our_nodeid();
878 		r->res_nodeid = 0;
879 		return 0;
880 	}
881 }
882 
883 /*
884  * We're the dir node for this res and another node wants to know the
885  * master nodeid.  During normal operation (non recovery) this is only
886  * called from receive_lookup(); master lookups when the local node is
887  * the dir node are done by find_rsb().
888  *
889  * normal operation, we are the dir node for a resource
890  * . _request_lock
891  * . set_master
892  * . send_lookup
893  * . receive_lookup
894  * . dlm_master_lookup flags 0
895  *
896  * recover directory, we are rebuilding dir for all resources
897  * . dlm_recover_directory
898  * . dlm_rcom_names
899  *   remote node sends back the rsb names it is master of and we are dir of
900  * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901  *   we either create new rsb setting remote node as master, or find existing
902  *   rsb and set master to be the remote node.
903  *
904  * recover masters, we are finding the new master for resources
905  * . dlm_recover_masters
906  * . recover_master
907  * . dlm_send_rcom_lookup
908  * . receive_rcom_lookup
909  * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
910  */
911 
dlm_master_lookup(struct dlm_ls * ls,int from_nodeid,char * name,int len,unsigned int flags,int * r_nodeid,int * result)912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 		      unsigned int flags, int *r_nodeid, int *result)
914 {
915 	struct dlm_rsb *r = NULL;
916 	uint32_t hash, b;
917 	int from_master = (flags & DLM_LU_RECOVER_DIR);
918 	int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 	int our_nodeid = dlm_our_nodeid();
920 	int dir_nodeid, error, toss_list = 0;
921 
922 	if (len > DLM_RESNAME_MAXLEN)
923 		return -EINVAL;
924 
925 	if (from_nodeid == our_nodeid) {
926 		log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
927 			  our_nodeid, flags);
928 		return -EINVAL;
929 	}
930 
931 	hash = jhash(name, len, 0);
932 	b = hash & (ls->ls_rsbtbl_size - 1);
933 
934 	dir_nodeid = dlm_hash2nodeid(ls, hash);
935 	if (dir_nodeid != our_nodeid) {
936 		log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 			  from_nodeid, dir_nodeid, our_nodeid, hash,
938 			  ls->ls_num_nodes);
939 		*r_nodeid = -1;
940 		return -EINVAL;
941 	}
942 
943  retry:
944 	error = pre_rsb_struct(ls);
945 	if (error < 0)
946 		return error;
947 
948 	spin_lock(&ls->ls_rsbtbl[b].lock);
949 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
950 	if (!error) {
951 		/* because the rsb is active, we need to lock_rsb before
952 		   checking/changing re_master_nodeid */
953 
954 		hold_rsb(r);
955 		spin_unlock(&ls->ls_rsbtbl[b].lock);
956 		lock_rsb(r);
957 		goto found;
958 	}
959 
960 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
961 	if (error)
962 		goto not_found;
963 
964 	/* because the rsb is inactive (on toss list), it's not refcounted
965 	   and lock_rsb is not used, but is protected by the rsbtbl lock */
966 
967 	toss_list = 1;
968  found:
969 	if (r->res_dir_nodeid != our_nodeid) {
970 		/* should not happen, but may as well fix it and carry on */
971 		log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 			  r->res_dir_nodeid, our_nodeid, r->res_name);
973 		r->res_dir_nodeid = our_nodeid;
974 	}
975 
976 	if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 		/* Recovery uses this function to set a new master when
978 		   the previous master failed.  Setting NEW_MASTER will
979 		   force dlm_recover_masters to call recover_master on this
980 		   rsb even though the res_nodeid is no longer removed. */
981 
982 		r->res_master_nodeid = from_nodeid;
983 		r->res_nodeid = from_nodeid;
984 		rsb_set_flag(r, RSB_NEW_MASTER);
985 
986 		if (toss_list) {
987 			/* I don't think we should ever find it on toss list. */
988 			log_error(ls, "dlm_master_lookup fix_master on toss");
989 			dlm_dump_rsb(r);
990 		}
991 	}
992 
993 	if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 		/* this will happen if from_nodeid became master during
995 		   a previous recovery cycle, and we aborted the previous
996 		   cycle before recovering this master value */
997 
998 		log_limit(ls, "dlm_master_lookup from_master %d "
999 			  "master_nodeid %d res_nodeid %d first %x %s",
1000 			  from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 			  r->res_first_lkid, r->res_name);
1002 
1003 		if (r->res_master_nodeid == our_nodeid) {
1004 			log_error(ls, "from_master %d our_master", from_nodeid);
1005 			dlm_dump_rsb(r);
1006 			goto out_found;
1007 		}
1008 
1009 		r->res_master_nodeid = from_nodeid;
1010 		r->res_nodeid = from_nodeid;
1011 		rsb_set_flag(r, RSB_NEW_MASTER);
1012 	}
1013 
1014 	if (!r->res_master_nodeid) {
1015 		/* this will happen if recovery happens while we're looking
1016 		   up the master for this rsb */
1017 
1018 		log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1019 			  from_nodeid, r->res_first_lkid, r->res_name);
1020 		r->res_master_nodeid = from_nodeid;
1021 		r->res_nodeid = from_nodeid;
1022 	}
1023 
1024 	if (!from_master && !fix_master &&
1025 	    (r->res_master_nodeid == from_nodeid)) {
1026 		/* this can happen when the master sends remove, the dir node
1027 		   finds the rsb on the keep list and ignores the remove,
1028 		   and the former master sends a lookup */
1029 
1030 		log_limit(ls, "dlm_master_lookup from master %d flags %x "
1031 			  "first %x %s", from_nodeid, flags,
1032 			  r->res_first_lkid, r->res_name);
1033 	}
1034 
1035  out_found:
1036 	*r_nodeid = r->res_master_nodeid;
1037 	if (result)
1038 		*result = DLM_LU_MATCH;
1039 
1040 	if (toss_list) {
1041 		r->res_toss_time = jiffies;
1042 		/* the rsb was inactive (on toss list) */
1043 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1044 	} else {
1045 		/* the rsb was active */
1046 		unlock_rsb(r);
1047 		put_rsb(r);
1048 	}
1049 	return 0;
1050 
1051  not_found:
1052 	error = get_rsb_struct(ls, name, len, &r);
1053 	if (error == -EAGAIN) {
1054 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1055 		goto retry;
1056 	}
1057 	if (error)
1058 		goto out_unlock;
1059 
1060 	r->res_hash = hash;
1061 	r->res_bucket = b;
1062 	r->res_dir_nodeid = our_nodeid;
1063 	r->res_master_nodeid = from_nodeid;
1064 	r->res_nodeid = from_nodeid;
1065 	kref_init(&r->res_ref);
1066 	r->res_toss_time = jiffies;
1067 
1068 	error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1069 	if (error) {
1070 		/* should never happen */
1071 		dlm_free_rsb(r);
1072 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1073 		goto retry;
1074 	}
1075 
1076 	if (result)
1077 		*result = DLM_LU_ADD;
1078 	*r_nodeid = from_nodeid;
1079 	error = 0;
1080  out_unlock:
1081 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1082 	return error;
1083 }
1084 
dlm_dump_rsb_hash(struct dlm_ls * ls,uint32_t hash)1085 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1086 {
1087 	struct rb_node *n;
1088 	struct dlm_rsb *r;
1089 	int i;
1090 
1091 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1092 		spin_lock(&ls->ls_rsbtbl[i].lock);
1093 		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1094 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
1095 			if (r->res_hash == hash)
1096 				dlm_dump_rsb(r);
1097 		}
1098 		spin_unlock(&ls->ls_rsbtbl[i].lock);
1099 	}
1100 }
1101 
dlm_dump_rsb_name(struct dlm_ls * ls,char * name,int len)1102 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1103 {
1104 	struct dlm_rsb *r = NULL;
1105 	uint32_t hash, b;
1106 	int error;
1107 
1108 	hash = jhash(name, len, 0);
1109 	b = hash & (ls->ls_rsbtbl_size - 1);
1110 
1111 	spin_lock(&ls->ls_rsbtbl[b].lock);
1112 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1113 	if (!error)
1114 		goto out_dump;
1115 
1116 	error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1117 	if (error)
1118 		goto out;
1119  out_dump:
1120 	dlm_dump_rsb(r);
1121  out:
1122 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1123 }
1124 
toss_rsb(struct kref * kref)1125 static void toss_rsb(struct kref *kref)
1126 {
1127 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1128 	struct dlm_ls *ls = r->res_ls;
1129 
1130 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1131 	kref_init(&r->res_ref);
1132 	rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1133 	rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1134 	r->res_toss_time = jiffies;
1135 	ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1136 	if (r->res_lvbptr) {
1137 		dlm_free_lvb(r->res_lvbptr);
1138 		r->res_lvbptr = NULL;
1139 	}
1140 }
1141 
1142 /* See comment for unhold_lkb */
1143 
unhold_rsb(struct dlm_rsb * r)1144 static void unhold_rsb(struct dlm_rsb *r)
1145 {
1146 	int rv;
1147 	rv = kref_put(&r->res_ref, toss_rsb);
1148 	DLM_ASSERT(!rv, dlm_dump_rsb(r););
1149 }
1150 
kill_rsb(struct kref * kref)1151 static void kill_rsb(struct kref *kref)
1152 {
1153 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1154 
1155 	/* All work is done after the return from kref_put() so we
1156 	   can release the write_lock before the remove and free. */
1157 
1158 	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1159 	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1160 	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1161 	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1162 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1163 	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1164 }
1165 
1166 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1167    The rsb must exist as long as any lkb's for it do. */
1168 
attach_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1169 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1170 {
1171 	hold_rsb(r);
1172 	lkb->lkb_resource = r;
1173 }
1174 
detach_lkb(struct dlm_lkb * lkb)1175 static void detach_lkb(struct dlm_lkb *lkb)
1176 {
1177 	if (lkb->lkb_resource) {
1178 		put_rsb(lkb->lkb_resource);
1179 		lkb->lkb_resource = NULL;
1180 	}
1181 }
1182 
create_lkb(struct dlm_ls * ls,struct dlm_lkb ** lkb_ret)1183 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1184 {
1185 	struct dlm_lkb *lkb;
1186 	int rv;
1187 
1188 	lkb = dlm_allocate_lkb(ls);
1189 	if (!lkb)
1190 		return -ENOMEM;
1191 
1192 	lkb->lkb_nodeid = -1;
1193 	lkb->lkb_grmode = DLM_LOCK_IV;
1194 	kref_init(&lkb->lkb_ref);
1195 	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1196 	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1197 	INIT_LIST_HEAD(&lkb->lkb_time_list);
1198 	INIT_LIST_HEAD(&lkb->lkb_cb_list);
1199 	mutex_init(&lkb->lkb_cb_mutex);
1200 	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1201 
1202 	idr_preload(GFP_NOFS);
1203 	spin_lock(&ls->ls_lkbidr_spin);
1204 	rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1205 	if (rv >= 0)
1206 		lkb->lkb_id = rv;
1207 	spin_unlock(&ls->ls_lkbidr_spin);
1208 	idr_preload_end();
1209 
1210 	if (rv < 0) {
1211 		log_error(ls, "create_lkb idr error %d", rv);
1212 		dlm_free_lkb(lkb);
1213 		return rv;
1214 	}
1215 
1216 	*lkb_ret = lkb;
1217 	return 0;
1218 }
1219 
find_lkb(struct dlm_ls * ls,uint32_t lkid,struct dlm_lkb ** lkb_ret)1220 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1221 {
1222 	struct dlm_lkb *lkb;
1223 
1224 	spin_lock(&ls->ls_lkbidr_spin);
1225 	lkb = idr_find(&ls->ls_lkbidr, lkid);
1226 	if (lkb)
1227 		kref_get(&lkb->lkb_ref);
1228 	spin_unlock(&ls->ls_lkbidr_spin);
1229 
1230 	*lkb_ret = lkb;
1231 	return lkb ? 0 : -ENOENT;
1232 }
1233 
kill_lkb(struct kref * kref)1234 static void kill_lkb(struct kref *kref)
1235 {
1236 	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1237 
1238 	/* All work is done after the return from kref_put() so we
1239 	   can release the write_lock before the detach_lkb */
1240 
1241 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1242 }
1243 
1244 /* __put_lkb() is used when an lkb may not have an rsb attached to
1245    it so we need to provide the lockspace explicitly */
1246 
__put_lkb(struct dlm_ls * ls,struct dlm_lkb * lkb)1247 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1248 {
1249 	uint32_t lkid = lkb->lkb_id;
1250 
1251 	spin_lock(&ls->ls_lkbidr_spin);
1252 	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1253 		idr_remove(&ls->ls_lkbidr, lkid);
1254 		spin_unlock(&ls->ls_lkbidr_spin);
1255 
1256 		detach_lkb(lkb);
1257 
1258 		/* for local/process lkbs, lvbptr points to caller's lksb */
1259 		if (lkb->lkb_lvbptr && is_master_copy(lkb))
1260 			dlm_free_lvb(lkb->lkb_lvbptr);
1261 		dlm_free_lkb(lkb);
1262 		return 1;
1263 	} else {
1264 		spin_unlock(&ls->ls_lkbidr_spin);
1265 		return 0;
1266 	}
1267 }
1268 
dlm_put_lkb(struct dlm_lkb * lkb)1269 int dlm_put_lkb(struct dlm_lkb *lkb)
1270 {
1271 	struct dlm_ls *ls;
1272 
1273 	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1274 	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1275 
1276 	ls = lkb->lkb_resource->res_ls;
1277 	return __put_lkb(ls, lkb);
1278 }
1279 
1280 /* This is only called to add a reference when the code already holds
1281    a valid reference to the lkb, so there's no need for locking. */
1282 
hold_lkb(struct dlm_lkb * lkb)1283 static inline void hold_lkb(struct dlm_lkb *lkb)
1284 {
1285 	kref_get(&lkb->lkb_ref);
1286 }
1287 
1288 /* This is called when we need to remove a reference and are certain
1289    it's not the last ref.  e.g. del_lkb is always called between a
1290    find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1291    put_lkb would work fine, but would involve unnecessary locking */
1292 
unhold_lkb(struct dlm_lkb * lkb)1293 static inline void unhold_lkb(struct dlm_lkb *lkb)
1294 {
1295 	int rv;
1296 	rv = kref_put(&lkb->lkb_ref, kill_lkb);
1297 	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1298 }
1299 
lkb_add_ordered(struct list_head * new,struct list_head * head,int mode)1300 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1301 			    int mode)
1302 {
1303 	struct dlm_lkb *lkb = NULL;
1304 
1305 	list_for_each_entry(lkb, head, lkb_statequeue)
1306 		if (lkb->lkb_rqmode < mode)
1307 			break;
1308 
1309 	__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1310 }
1311 
1312 /* add/remove lkb to rsb's grant/convert/wait queue */
1313 
add_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int status)1314 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1315 {
1316 	kref_get(&lkb->lkb_ref);
1317 
1318 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1319 
1320 	lkb->lkb_timestamp = ktime_get();
1321 
1322 	lkb->lkb_status = status;
1323 
1324 	switch (status) {
1325 	case DLM_LKSTS_WAITING:
1326 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1327 			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1328 		else
1329 			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1330 		break;
1331 	case DLM_LKSTS_GRANTED:
1332 		/* convention says granted locks kept in order of grmode */
1333 		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1334 				lkb->lkb_grmode);
1335 		break;
1336 	case DLM_LKSTS_CONVERT:
1337 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1338 			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1339 		else
1340 			list_add_tail(&lkb->lkb_statequeue,
1341 				      &r->res_convertqueue);
1342 		break;
1343 	default:
1344 		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1345 	}
1346 }
1347 
del_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb)1348 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1349 {
1350 	lkb->lkb_status = 0;
1351 	list_del(&lkb->lkb_statequeue);
1352 	unhold_lkb(lkb);
1353 }
1354 
move_lkb(struct dlm_rsb * r,struct dlm_lkb * lkb,int sts)1355 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1356 {
1357 	hold_lkb(lkb);
1358 	del_lkb(r, lkb);
1359 	add_lkb(r, lkb, sts);
1360 	unhold_lkb(lkb);
1361 }
1362 
msg_reply_type(int mstype)1363 static int msg_reply_type(int mstype)
1364 {
1365 	switch (mstype) {
1366 	case DLM_MSG_REQUEST:
1367 		return DLM_MSG_REQUEST_REPLY;
1368 	case DLM_MSG_CONVERT:
1369 		return DLM_MSG_CONVERT_REPLY;
1370 	case DLM_MSG_UNLOCK:
1371 		return DLM_MSG_UNLOCK_REPLY;
1372 	case DLM_MSG_CANCEL:
1373 		return DLM_MSG_CANCEL_REPLY;
1374 	case DLM_MSG_LOOKUP:
1375 		return DLM_MSG_LOOKUP_REPLY;
1376 	}
1377 	return -1;
1378 }
1379 
nodeid_warned(int nodeid,int num_nodes,int * warned)1380 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1381 {
1382 	int i;
1383 
1384 	for (i = 0; i < num_nodes; i++) {
1385 		if (!warned[i]) {
1386 			warned[i] = nodeid;
1387 			return 0;
1388 		}
1389 		if (warned[i] == nodeid)
1390 			return 1;
1391 	}
1392 	return 0;
1393 }
1394 
dlm_scan_waiters(struct dlm_ls * ls)1395 void dlm_scan_waiters(struct dlm_ls *ls)
1396 {
1397 	struct dlm_lkb *lkb;
1398 	s64 us;
1399 	s64 debug_maxus = 0;
1400 	u32 debug_scanned = 0;
1401 	u32 debug_expired = 0;
1402 	int num_nodes = 0;
1403 	int *warned = NULL;
1404 
1405 	if (!dlm_config.ci_waitwarn_us)
1406 		return;
1407 
1408 	mutex_lock(&ls->ls_waiters_mutex);
1409 
1410 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1411 		if (!lkb->lkb_wait_time)
1412 			continue;
1413 
1414 		debug_scanned++;
1415 
1416 		us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1417 
1418 		if (us < dlm_config.ci_waitwarn_us)
1419 			continue;
1420 
1421 		lkb->lkb_wait_time = 0;
1422 
1423 		debug_expired++;
1424 		if (us > debug_maxus)
1425 			debug_maxus = us;
1426 
1427 		if (!num_nodes) {
1428 			num_nodes = ls->ls_num_nodes;
1429 			warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1430 		}
1431 		if (!warned)
1432 			continue;
1433 		if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1434 			continue;
1435 
1436 		log_error(ls, "waitwarn %x %lld %d us check connection to "
1437 			  "node %d", lkb->lkb_id, (long long)us,
1438 			  dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1439 	}
1440 	mutex_unlock(&ls->ls_waiters_mutex);
1441 	kfree(warned);
1442 
1443 	if (debug_expired)
1444 		log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1445 			  debug_scanned, debug_expired,
1446 			  dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1447 }
1448 
1449 /* add/remove lkb from global waiters list of lkb's waiting for
1450    a reply from a remote node */
1451 
add_to_waiters(struct dlm_lkb * lkb,int mstype,int to_nodeid)1452 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1453 {
1454 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1455 	int error = 0;
1456 
1457 	mutex_lock(&ls->ls_waiters_mutex);
1458 
1459 	if (is_overlap_unlock(lkb) ||
1460 	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1461 		error = -EINVAL;
1462 		goto out;
1463 	}
1464 
1465 	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1466 		switch (mstype) {
1467 		case DLM_MSG_UNLOCK:
1468 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1469 			break;
1470 		case DLM_MSG_CANCEL:
1471 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1472 			break;
1473 		default:
1474 			error = -EBUSY;
1475 			goto out;
1476 		}
1477 		lkb->lkb_wait_count++;
1478 		hold_lkb(lkb);
1479 
1480 		log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1481 			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
1482 			  lkb->lkb_wait_count, lkb->lkb_flags);
1483 		goto out;
1484 	}
1485 
1486 	DLM_ASSERT(!lkb->lkb_wait_count,
1487 		   dlm_print_lkb(lkb);
1488 		   printk("wait_count %d\n", lkb->lkb_wait_count););
1489 
1490 	lkb->lkb_wait_count++;
1491 	lkb->lkb_wait_type = mstype;
1492 	lkb->lkb_wait_time = ktime_get();
1493 	lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1494 	hold_lkb(lkb);
1495 	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1496  out:
1497 	if (error)
1498 		log_error(ls, "addwait error %x %d flags %x %d %d %s",
1499 			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
1500 			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1501 	mutex_unlock(&ls->ls_waiters_mutex);
1502 	return error;
1503 }
1504 
1505 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1506    list as part of process_requestqueue (e.g. a lookup that has an optimized
1507    request reply on the requestqueue) between dlm_recover_waiters_pre() which
1508    set RESEND and dlm_recover_waiters_post() */
1509 
_remove_from_waiters(struct dlm_lkb * lkb,int mstype,struct dlm_message * ms)1510 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1511 				struct dlm_message *ms)
1512 {
1513 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1514 	int overlap_done = 0;
1515 
1516 	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1517 		log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1518 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1519 		overlap_done = 1;
1520 		goto out_del;
1521 	}
1522 
1523 	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1524 		log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1525 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1526 		overlap_done = 1;
1527 		goto out_del;
1528 	}
1529 
1530 	/* Cancel state was preemptively cleared by a successful convert,
1531 	   see next comment, nothing to do. */
1532 
1533 	if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1534 	    (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1535 		log_debug(ls, "remwait %x cancel_reply wait_type %d",
1536 			  lkb->lkb_id, lkb->lkb_wait_type);
1537 		return -1;
1538 	}
1539 
1540 	/* Remove for the convert reply, and premptively remove for the
1541 	   cancel reply.  A convert has been granted while there's still
1542 	   an outstanding cancel on it (the cancel is moot and the result
1543 	   in the cancel reply should be 0).  We preempt the cancel reply
1544 	   because the app gets the convert result and then can follow up
1545 	   with another op, like convert.  This subsequent op would see the
1546 	   lingering state of the cancel and fail with -EBUSY. */
1547 
1548 	if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1549 	    (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1550 	    is_overlap_cancel(lkb) && ms && !ms->m_result) {
1551 		log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1552 			  lkb->lkb_id);
1553 		lkb->lkb_wait_type = 0;
1554 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1555 		lkb->lkb_wait_count--;
1556 		unhold_lkb(lkb);
1557 		goto out_del;
1558 	}
1559 
1560 	/* N.B. type of reply may not always correspond to type of original
1561 	   msg due to lookup->request optimization, verify others? */
1562 
1563 	if (lkb->lkb_wait_type) {
1564 		lkb->lkb_wait_type = 0;
1565 		goto out_del;
1566 	}
1567 
1568 	log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1569 		  lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1570 		  mstype, lkb->lkb_flags);
1571 	return -1;
1572 
1573  out_del:
1574 	/* the force-unlock/cancel has completed and we haven't recvd a reply
1575 	   to the op that was in progress prior to the unlock/cancel; we
1576 	   give up on any reply to the earlier op.  FIXME: not sure when/how
1577 	   this would happen */
1578 
1579 	if (overlap_done && lkb->lkb_wait_type) {
1580 		log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1581 			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
1582 		lkb->lkb_wait_count--;
1583 		unhold_lkb(lkb);
1584 		lkb->lkb_wait_type = 0;
1585 	}
1586 
1587 	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1588 
1589 	lkb->lkb_flags &= ~DLM_IFL_RESEND;
1590 	lkb->lkb_wait_count--;
1591 	if (!lkb->lkb_wait_count)
1592 		list_del_init(&lkb->lkb_wait_reply);
1593 	unhold_lkb(lkb);
1594 	return 0;
1595 }
1596 
remove_from_waiters(struct dlm_lkb * lkb,int mstype)1597 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1598 {
1599 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1600 	int error;
1601 
1602 	mutex_lock(&ls->ls_waiters_mutex);
1603 	error = _remove_from_waiters(lkb, mstype, NULL);
1604 	mutex_unlock(&ls->ls_waiters_mutex);
1605 	return error;
1606 }
1607 
1608 /* Handles situations where we might be processing a "fake" or "stub" reply in
1609    which we can't try to take waiters_mutex again. */
1610 
remove_from_waiters_ms(struct dlm_lkb * lkb,struct dlm_message * ms)1611 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1612 {
1613 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1614 	int error;
1615 
1616 	if (ms->m_flags != DLM_IFL_STUB_MS)
1617 		mutex_lock(&ls->ls_waiters_mutex);
1618 	error = _remove_from_waiters(lkb, ms->m_type, ms);
1619 	if (ms->m_flags != DLM_IFL_STUB_MS)
1620 		mutex_unlock(&ls->ls_waiters_mutex);
1621 	return error;
1622 }
1623 
1624 /* If there's an rsb for the same resource being removed, ensure
1625    that the remove message is sent before the new lookup message.
1626    It should be rare to need a delay here, but if not, then it may
1627    be worthwhile to add a proper wait mechanism rather than a delay. */
1628 
wait_pending_remove(struct dlm_rsb * r)1629 static void wait_pending_remove(struct dlm_rsb *r)
1630 {
1631 	struct dlm_ls *ls = r->res_ls;
1632  restart:
1633 	spin_lock(&ls->ls_remove_spin);
1634 	if (ls->ls_remove_len &&
1635 	    !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1636 		log_debug(ls, "delay lookup for remove dir %d %s",
1637 		  	  r->res_dir_nodeid, r->res_name);
1638 		spin_unlock(&ls->ls_remove_spin);
1639 		msleep(1);
1640 		goto restart;
1641 	}
1642 	spin_unlock(&ls->ls_remove_spin);
1643 }
1644 
1645 /*
1646  * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1647  * read by other threads in wait_pending_remove.  ls_remove_names
1648  * and ls_remove_lens are only used by the scan thread, so they do
1649  * not need protection.
1650  */
1651 
shrink_bucket(struct dlm_ls * ls,int b)1652 static void shrink_bucket(struct dlm_ls *ls, int b)
1653 {
1654 	struct rb_node *n, *next;
1655 	struct dlm_rsb *r;
1656 	char *name;
1657 	int our_nodeid = dlm_our_nodeid();
1658 	int remote_count = 0;
1659 	int need_shrink = 0;
1660 	int i, len, rv;
1661 
1662 	memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1663 
1664 	spin_lock(&ls->ls_rsbtbl[b].lock);
1665 
1666 	if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1667 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1668 		return;
1669 	}
1670 
1671 	for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1672 		next = rb_next(n);
1673 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
1674 
1675 		/* If we're the directory record for this rsb, and
1676 		   we're not the master of it, then we need to wait
1677 		   for the master node to send us a dir remove for
1678 		   before removing the dir record. */
1679 
1680 		if (!dlm_no_directory(ls) &&
1681 		    (r->res_master_nodeid != our_nodeid) &&
1682 		    (dlm_dir_nodeid(r) == our_nodeid)) {
1683 			continue;
1684 		}
1685 
1686 		need_shrink = 1;
1687 
1688 		if (!time_after_eq(jiffies, r->res_toss_time +
1689 				   dlm_config.ci_toss_secs * HZ)) {
1690 			continue;
1691 		}
1692 
1693 		if (!dlm_no_directory(ls) &&
1694 		    (r->res_master_nodeid == our_nodeid) &&
1695 		    (dlm_dir_nodeid(r) != our_nodeid)) {
1696 
1697 			/* We're the master of this rsb but we're not
1698 			   the directory record, so we need to tell the
1699 			   dir node to remove the dir record. */
1700 
1701 			ls->ls_remove_lens[remote_count] = r->res_length;
1702 			memcpy(ls->ls_remove_names[remote_count], r->res_name,
1703 			       DLM_RESNAME_MAXLEN);
1704 			remote_count++;
1705 
1706 			if (remote_count >= DLM_REMOVE_NAMES_MAX)
1707 				break;
1708 			continue;
1709 		}
1710 
1711 		if (!kref_put(&r->res_ref, kill_rsb)) {
1712 			log_error(ls, "tossed rsb in use %s", r->res_name);
1713 			continue;
1714 		}
1715 
1716 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1717 		dlm_free_rsb(r);
1718 	}
1719 
1720 	if (need_shrink)
1721 		ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1722 	else
1723 		ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1724 	spin_unlock(&ls->ls_rsbtbl[b].lock);
1725 
1726 	/*
1727 	 * While searching for rsb's to free, we found some that require
1728 	 * remote removal.  We leave them in place and find them again here
1729 	 * so there is a very small gap between removing them from the toss
1730 	 * list and sending the removal.  Keeping this gap small is
1731 	 * important to keep us (the master node) from being out of sync
1732 	 * with the remote dir node for very long.
1733 	 *
1734 	 * From the time the rsb is removed from toss until just after
1735 	 * send_remove, the rsb name is saved in ls_remove_name.  A new
1736 	 * lookup checks this to ensure that a new lookup message for the
1737 	 * same resource name is not sent just before the remove message.
1738 	 */
1739 
1740 	for (i = 0; i < remote_count; i++) {
1741 		name = ls->ls_remove_names[i];
1742 		len = ls->ls_remove_lens[i];
1743 
1744 		spin_lock(&ls->ls_rsbtbl[b].lock);
1745 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1746 		if (rv) {
1747 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1748 			log_debug(ls, "remove_name not toss %s", name);
1749 			continue;
1750 		}
1751 
1752 		if (r->res_master_nodeid != our_nodeid) {
1753 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1754 			log_debug(ls, "remove_name master %d dir %d our %d %s",
1755 				  r->res_master_nodeid, r->res_dir_nodeid,
1756 				  our_nodeid, name);
1757 			continue;
1758 		}
1759 
1760 		if (r->res_dir_nodeid == our_nodeid) {
1761 			/* should never happen */
1762 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1763 			log_error(ls, "remove_name dir %d master %d our %d %s",
1764 				  r->res_dir_nodeid, r->res_master_nodeid,
1765 				  our_nodeid, name);
1766 			continue;
1767 		}
1768 
1769 		if (!time_after_eq(jiffies, r->res_toss_time +
1770 				   dlm_config.ci_toss_secs * HZ)) {
1771 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1772 			log_debug(ls, "remove_name toss_time %lu now %lu %s",
1773 				  r->res_toss_time, jiffies, name);
1774 			continue;
1775 		}
1776 
1777 		if (!kref_put(&r->res_ref, kill_rsb)) {
1778 			spin_unlock(&ls->ls_rsbtbl[b].lock);
1779 			log_error(ls, "remove_name in use %s", name);
1780 			continue;
1781 		}
1782 
1783 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1784 
1785 		/* block lookup of same name until we've sent remove */
1786 		spin_lock(&ls->ls_remove_spin);
1787 		ls->ls_remove_len = len;
1788 		memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1789 		spin_unlock(&ls->ls_remove_spin);
1790 		spin_unlock(&ls->ls_rsbtbl[b].lock);
1791 
1792 		send_remove(r);
1793 
1794 		/* allow lookup of name again */
1795 		spin_lock(&ls->ls_remove_spin);
1796 		ls->ls_remove_len = 0;
1797 		memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1798 		spin_unlock(&ls->ls_remove_spin);
1799 
1800 		dlm_free_rsb(r);
1801 	}
1802 }
1803 
dlm_scan_rsbs(struct dlm_ls * ls)1804 void dlm_scan_rsbs(struct dlm_ls *ls)
1805 {
1806 	int i;
1807 
1808 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1809 		shrink_bucket(ls, i);
1810 		if (dlm_locking_stopped(ls))
1811 			break;
1812 		cond_resched();
1813 	}
1814 }
1815 
add_timeout(struct dlm_lkb * lkb)1816 static void add_timeout(struct dlm_lkb *lkb)
1817 {
1818 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1819 
1820 	if (is_master_copy(lkb))
1821 		return;
1822 
1823 	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1824 	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1825 		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1826 		goto add_it;
1827 	}
1828 	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1829 		goto add_it;
1830 	return;
1831 
1832  add_it:
1833 	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1834 	mutex_lock(&ls->ls_timeout_mutex);
1835 	hold_lkb(lkb);
1836 	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1837 	mutex_unlock(&ls->ls_timeout_mutex);
1838 }
1839 
del_timeout(struct dlm_lkb * lkb)1840 static void del_timeout(struct dlm_lkb *lkb)
1841 {
1842 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1843 
1844 	mutex_lock(&ls->ls_timeout_mutex);
1845 	if (!list_empty(&lkb->lkb_time_list)) {
1846 		list_del_init(&lkb->lkb_time_list);
1847 		unhold_lkb(lkb);
1848 	}
1849 	mutex_unlock(&ls->ls_timeout_mutex);
1850 }
1851 
1852 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1853    lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1854    and then lock rsb because of lock ordering in add_timeout.  We may need
1855    to specify some special timeout-related bits in the lkb that are just to
1856    be accessed under the timeout_mutex. */
1857 
dlm_scan_timeout(struct dlm_ls * ls)1858 void dlm_scan_timeout(struct dlm_ls *ls)
1859 {
1860 	struct dlm_rsb *r;
1861 	struct dlm_lkb *lkb = NULL, *iter;
1862 	int do_cancel, do_warn;
1863 	s64 wait_us;
1864 
1865 	for (;;) {
1866 		if (dlm_locking_stopped(ls))
1867 			break;
1868 
1869 		do_cancel = 0;
1870 		do_warn = 0;
1871 		mutex_lock(&ls->ls_timeout_mutex);
1872 		list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
1873 
1874 			wait_us = ktime_to_us(ktime_sub(ktime_get(),
1875 							iter->lkb_timestamp));
1876 
1877 			if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
1878 			    wait_us >= (iter->lkb_timeout_cs * 10000))
1879 				do_cancel = 1;
1880 
1881 			if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1882 			    wait_us >= dlm_config.ci_timewarn_cs * 10000)
1883 				do_warn = 1;
1884 
1885 			if (!do_cancel && !do_warn)
1886 				continue;
1887 			hold_lkb(iter);
1888 			lkb = iter;
1889 			break;
1890 		}
1891 		mutex_unlock(&ls->ls_timeout_mutex);
1892 
1893 		if (!lkb)
1894 			break;
1895 
1896 		r = lkb->lkb_resource;
1897 		hold_rsb(r);
1898 		lock_rsb(r);
1899 
1900 		if (do_warn) {
1901 			/* clear flag so we only warn once */
1902 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1903 			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1904 				del_timeout(lkb);
1905 			dlm_timeout_warn(lkb);
1906 		}
1907 
1908 		if (do_cancel) {
1909 			log_debug(ls, "timeout cancel %x node %d %s",
1910 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1911 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1912 			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1913 			del_timeout(lkb);
1914 			_cancel_lock(r, lkb);
1915 		}
1916 
1917 		unlock_rsb(r);
1918 		unhold_rsb(r);
1919 		dlm_put_lkb(lkb);
1920 	}
1921 }
1922 
1923 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1924    dlm_recoverd before checking/setting ls_recover_begin. */
1925 
dlm_adjust_timeouts(struct dlm_ls * ls)1926 void dlm_adjust_timeouts(struct dlm_ls *ls)
1927 {
1928 	struct dlm_lkb *lkb;
1929 	u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1930 
1931 	ls->ls_recover_begin = 0;
1932 	mutex_lock(&ls->ls_timeout_mutex);
1933 	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1934 		lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1935 	mutex_unlock(&ls->ls_timeout_mutex);
1936 
1937 	if (!dlm_config.ci_waitwarn_us)
1938 		return;
1939 
1940 	mutex_lock(&ls->ls_waiters_mutex);
1941 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1942 		if (ktime_to_us(lkb->lkb_wait_time))
1943 			lkb->lkb_wait_time = ktime_get();
1944 	}
1945 	mutex_unlock(&ls->ls_waiters_mutex);
1946 }
1947 
1948 /* lkb is master or local copy */
1949 
set_lvb_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)1950 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1951 {
1952 	int b, len = r->res_ls->ls_lvblen;
1953 
1954 	/* b=1 lvb returned to caller
1955 	   b=0 lvb written to rsb or invalidated
1956 	   b=-1 do nothing */
1957 
1958 	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1959 
1960 	if (b == 1) {
1961 		if (!lkb->lkb_lvbptr)
1962 			return;
1963 
1964 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1965 			return;
1966 
1967 		if (!r->res_lvbptr)
1968 			return;
1969 
1970 		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1971 		lkb->lkb_lvbseq = r->res_lvbseq;
1972 
1973 	} else if (b == 0) {
1974 		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1975 			rsb_set_flag(r, RSB_VALNOTVALID);
1976 			return;
1977 		}
1978 
1979 		if (!lkb->lkb_lvbptr)
1980 			return;
1981 
1982 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1983 			return;
1984 
1985 		if (!r->res_lvbptr)
1986 			r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1987 
1988 		if (!r->res_lvbptr)
1989 			return;
1990 
1991 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1992 		r->res_lvbseq++;
1993 		lkb->lkb_lvbseq = r->res_lvbseq;
1994 		rsb_clear_flag(r, RSB_VALNOTVALID);
1995 	}
1996 
1997 	if (rsb_flag(r, RSB_VALNOTVALID))
1998 		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1999 }
2000 
set_lvb_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)2001 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2002 {
2003 	if (lkb->lkb_grmode < DLM_LOCK_PW)
2004 		return;
2005 
2006 	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2007 		rsb_set_flag(r, RSB_VALNOTVALID);
2008 		return;
2009 	}
2010 
2011 	if (!lkb->lkb_lvbptr)
2012 		return;
2013 
2014 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2015 		return;
2016 
2017 	if (!r->res_lvbptr)
2018 		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2019 
2020 	if (!r->res_lvbptr)
2021 		return;
2022 
2023 	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2024 	r->res_lvbseq++;
2025 	rsb_clear_flag(r, RSB_VALNOTVALID);
2026 }
2027 
2028 /* lkb is process copy (pc) */
2029 
set_lvb_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2030 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2031 			    struct dlm_message *ms)
2032 {
2033 	int b;
2034 
2035 	if (!lkb->lkb_lvbptr)
2036 		return;
2037 
2038 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2039 		return;
2040 
2041 	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2042 	if (b == 1) {
2043 		int len = receive_extralen(ms);
2044 		if (len > r->res_ls->ls_lvblen)
2045 			len = r->res_ls->ls_lvblen;
2046 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2047 		lkb->lkb_lvbseq = ms->m_lvbseq;
2048 	}
2049 }
2050 
2051 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2052    remove_lock -- used for unlock, removes lkb from granted
2053    revert_lock -- used for cancel, moves lkb from convert to granted
2054    grant_lock  -- used for request and convert, adds lkb to granted or
2055                   moves lkb from convert or waiting to granted
2056 
2057    Each of these is used for master or local copy lkb's.  There is
2058    also a _pc() variation used to make the corresponding change on
2059    a process copy (pc) lkb. */
2060 
_remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2061 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2062 {
2063 	del_lkb(r, lkb);
2064 	lkb->lkb_grmode = DLM_LOCK_IV;
2065 	/* this unhold undoes the original ref from create_lkb()
2066 	   so this leads to the lkb being freed */
2067 	unhold_lkb(lkb);
2068 }
2069 
remove_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2070 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2071 {
2072 	set_lvb_unlock(r, lkb);
2073 	_remove_lock(r, lkb);
2074 }
2075 
remove_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2076 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2077 {
2078 	_remove_lock(r, lkb);
2079 }
2080 
2081 /* returns: 0 did nothing
2082 	    1 moved lock to granted
2083 	   -1 removed lock */
2084 
revert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2085 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2086 {
2087 	int rv = 0;
2088 
2089 	lkb->lkb_rqmode = DLM_LOCK_IV;
2090 
2091 	switch (lkb->lkb_status) {
2092 	case DLM_LKSTS_GRANTED:
2093 		break;
2094 	case DLM_LKSTS_CONVERT:
2095 		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2096 		rv = 1;
2097 		break;
2098 	case DLM_LKSTS_WAITING:
2099 		del_lkb(r, lkb);
2100 		lkb->lkb_grmode = DLM_LOCK_IV;
2101 		/* this unhold undoes the original ref from create_lkb()
2102 		   so this leads to the lkb being freed */
2103 		unhold_lkb(lkb);
2104 		rv = -1;
2105 		break;
2106 	default:
2107 		log_print("invalid status for revert %d", lkb->lkb_status);
2108 	}
2109 	return rv;
2110 }
2111 
revert_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb)2112 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2113 {
2114 	return revert_lock(r, lkb);
2115 }
2116 
_grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2117 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2118 {
2119 	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2120 		lkb->lkb_grmode = lkb->lkb_rqmode;
2121 		if (lkb->lkb_status)
2122 			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2123 		else
2124 			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2125 	}
2126 
2127 	lkb->lkb_rqmode = DLM_LOCK_IV;
2128 	lkb->lkb_highbast = 0;
2129 }
2130 
grant_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)2131 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2132 {
2133 	set_lvb_lock(r, lkb);
2134 	_grant_lock(r, lkb);
2135 }
2136 
grant_lock_pc(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)2137 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2138 			  struct dlm_message *ms)
2139 {
2140 	set_lvb_lock_pc(r, lkb, ms);
2141 	_grant_lock(r, lkb);
2142 }
2143 
2144 /* called by grant_pending_locks() which means an async grant message must
2145    be sent to the requesting node in addition to granting the lock if the
2146    lkb belongs to a remote node. */
2147 
grant_lock_pending(struct dlm_rsb * r,struct dlm_lkb * lkb)2148 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2149 {
2150 	grant_lock(r, lkb);
2151 	if (is_master_copy(lkb))
2152 		send_grant(r, lkb);
2153 	else
2154 		queue_cast(r, lkb, 0);
2155 }
2156 
2157 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2158    change the granted/requested modes.  We're munging things accordingly in
2159    the process copy.
2160    CONVDEADLK: our grmode may have been forced down to NL to resolve a
2161    conversion deadlock
2162    ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2163    compatible with other granted locks */
2164 
munge_demoted(struct dlm_lkb * lkb)2165 static void munge_demoted(struct dlm_lkb *lkb)
2166 {
2167 	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2168 		log_print("munge_demoted %x invalid modes gr %d rq %d",
2169 			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2170 		return;
2171 	}
2172 
2173 	lkb->lkb_grmode = DLM_LOCK_NL;
2174 }
2175 
munge_altmode(struct dlm_lkb * lkb,struct dlm_message * ms)2176 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2177 {
2178 	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2179 	    ms->m_type != DLM_MSG_GRANT) {
2180 		log_print("munge_altmode %x invalid reply type %d",
2181 			  lkb->lkb_id, ms->m_type);
2182 		return;
2183 	}
2184 
2185 	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2186 		lkb->lkb_rqmode = DLM_LOCK_PR;
2187 	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2188 		lkb->lkb_rqmode = DLM_LOCK_CW;
2189 	else {
2190 		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2191 		dlm_print_lkb(lkb);
2192 	}
2193 }
2194 
first_in_list(struct dlm_lkb * lkb,struct list_head * head)2195 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2196 {
2197 	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2198 					   lkb_statequeue);
2199 	if (lkb->lkb_id == first->lkb_id)
2200 		return 1;
2201 
2202 	return 0;
2203 }
2204 
2205 /* Check if the given lkb conflicts with another lkb on the queue. */
2206 
queue_conflict(struct list_head * head,struct dlm_lkb * lkb)2207 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2208 {
2209 	struct dlm_lkb *this;
2210 
2211 	list_for_each_entry(this, head, lkb_statequeue) {
2212 		if (this == lkb)
2213 			continue;
2214 		if (!modes_compat(this, lkb))
2215 			return 1;
2216 	}
2217 	return 0;
2218 }
2219 
2220 /*
2221  * "A conversion deadlock arises with a pair of lock requests in the converting
2222  * queue for one resource.  The granted mode of each lock blocks the requested
2223  * mode of the other lock."
2224  *
2225  * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2226  * convert queue from being granted, then deadlk/demote lkb.
2227  *
2228  * Example:
2229  * Granted Queue: empty
2230  * Convert Queue: NL->EX (first lock)
2231  *                PR->EX (second lock)
2232  *
2233  * The first lock can't be granted because of the granted mode of the second
2234  * lock and the second lock can't be granted because it's not first in the
2235  * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2236  * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2237  * flag set and return DEMOTED in the lksb flags.
2238  *
2239  * Originally, this function detected conv-deadlk in a more limited scope:
2240  * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2241  * - if lkb1 was the first entry in the queue (not just earlier), and was
2242  *   blocked by the granted mode of lkb2, and there was nothing on the
2243  *   granted queue preventing lkb1 from being granted immediately, i.e.
2244  *   lkb2 was the only thing preventing lkb1 from being granted.
2245  *
2246  * That second condition meant we'd only say there was conv-deadlk if
2247  * resolving it (by demotion) would lead to the first lock on the convert
2248  * queue being granted right away.  It allowed conversion deadlocks to exist
2249  * between locks on the convert queue while they couldn't be granted anyway.
2250  *
2251  * Now, we detect and take action on conversion deadlocks immediately when
2252  * they're created, even if they may not be immediately consequential.  If
2253  * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2254  * mode that would prevent lkb1's conversion from being granted, we do a
2255  * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2256  * I think this means that the lkb_is_ahead condition below should always
2257  * be zero, i.e. there will never be conv-deadlk between two locks that are
2258  * both already on the convert queue.
2259  */
2260 
conversion_deadlock_detect(struct dlm_rsb * r,struct dlm_lkb * lkb2)2261 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2262 {
2263 	struct dlm_lkb *lkb1;
2264 	int lkb_is_ahead = 0;
2265 
2266 	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2267 		if (lkb1 == lkb2) {
2268 			lkb_is_ahead = 1;
2269 			continue;
2270 		}
2271 
2272 		if (!lkb_is_ahead) {
2273 			if (!modes_compat(lkb2, lkb1))
2274 				return 1;
2275 		} else {
2276 			if (!modes_compat(lkb2, lkb1) &&
2277 			    !modes_compat(lkb1, lkb2))
2278 				return 1;
2279 		}
2280 	}
2281 	return 0;
2282 }
2283 
2284 /*
2285  * Return 1 if the lock can be granted, 0 otherwise.
2286  * Also detect and resolve conversion deadlocks.
2287  *
2288  * lkb is the lock to be granted
2289  *
2290  * now is 1 if the function is being called in the context of the
2291  * immediate request, it is 0 if called later, after the lock has been
2292  * queued.
2293  *
2294  * recover is 1 if dlm_recover_grant() is trying to grant conversions
2295  * after recovery.
2296  *
2297  * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2298  */
2299 
_can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover)2300 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2301 			   int recover)
2302 {
2303 	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2304 
2305 	/*
2306 	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2307 	 * a new request for a NL mode lock being blocked.
2308 	 *
2309 	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2310 	 * request, then it would be granted.  In essence, the use of this flag
2311 	 * tells the Lock Manager to expedite theis request by not considering
2312 	 * what may be in the CONVERTING or WAITING queues...  As of this
2313 	 * writing, the EXPEDITE flag can be used only with new requests for NL
2314 	 * mode locks.  This flag is not valid for conversion requests.
2315 	 *
2316 	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
2317 	 * conversion or used with a non-NL requested mode.  We also know an
2318 	 * EXPEDITE request is always granted immediately, so now must always
2319 	 * be 1.  The full condition to grant an expedite request: (now &&
2320 	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2321 	 * therefore be shortened to just checking the flag.
2322 	 */
2323 
2324 	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2325 		return 1;
2326 
2327 	/*
2328 	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2329 	 * added to the remaining conditions.
2330 	 */
2331 
2332 	if (queue_conflict(&r->res_grantqueue, lkb))
2333 		return 0;
2334 
2335 	/*
2336 	 * 6-3: By default, a conversion request is immediately granted if the
2337 	 * requested mode is compatible with the modes of all other granted
2338 	 * locks
2339 	 */
2340 
2341 	if (queue_conflict(&r->res_convertqueue, lkb))
2342 		return 0;
2343 
2344 	/*
2345 	 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2346 	 * locks for a recovered rsb, on which lkb's have been rebuilt.
2347 	 * The lkb's may have been rebuilt on the queues in a different
2348 	 * order than they were in on the previous master.  So, granting
2349 	 * queued conversions in order after recovery doesn't make sense
2350 	 * since the order hasn't been preserved anyway.  The new order
2351 	 * could also have created a new "in place" conversion deadlock.
2352 	 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2353 	 * After recovery, there would be no granted locks, and possibly
2354 	 * NL->EX, PR->EX, an in-place conversion deadlock.)  So, after
2355 	 * recovery, grant conversions without considering order.
2356 	 */
2357 
2358 	if (conv && recover)
2359 		return 1;
2360 
2361 	/*
2362 	 * 6-5: But the default algorithm for deciding whether to grant or
2363 	 * queue conversion requests does not by itself guarantee that such
2364 	 * requests are serviced on a "first come first serve" basis.  This, in
2365 	 * turn, can lead to a phenomenon known as "indefinate postponement".
2366 	 *
2367 	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2368 	 * the system service employed to request a lock conversion.  This flag
2369 	 * forces certain conversion requests to be queued, even if they are
2370 	 * compatible with the granted modes of other locks on the same
2371 	 * resource.  Thus, the use of this flag results in conversion requests
2372 	 * being ordered on a "first come first servce" basis.
2373 	 *
2374 	 * DCT: This condition is all about new conversions being able to occur
2375 	 * "in place" while the lock remains on the granted queue (assuming
2376 	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
2377 	 * doesn't _have_ to go onto the convert queue where it's processed in
2378 	 * order.  The "now" variable is necessary to distinguish converts
2379 	 * being received and processed for the first time now, because once a
2380 	 * convert is moved to the conversion queue the condition below applies
2381 	 * requiring fifo granting.
2382 	 */
2383 
2384 	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2385 		return 1;
2386 
2387 	/*
2388 	 * Even if the convert is compat with all granted locks,
2389 	 * QUECVT forces it behind other locks on the convert queue.
2390 	 */
2391 
2392 	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2393 		if (list_empty(&r->res_convertqueue))
2394 			return 1;
2395 		else
2396 			return 0;
2397 	}
2398 
2399 	/*
2400 	 * The NOORDER flag is set to avoid the standard vms rules on grant
2401 	 * order.
2402 	 */
2403 
2404 	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2405 		return 1;
2406 
2407 	/*
2408 	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2409 	 * granted until all other conversion requests ahead of it are granted
2410 	 * and/or canceled.
2411 	 */
2412 
2413 	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2414 		return 1;
2415 
2416 	/*
2417 	 * 6-4: By default, a new request is immediately granted only if all
2418 	 * three of the following conditions are satisfied when the request is
2419 	 * issued:
2420 	 * - The queue of ungranted conversion requests for the resource is
2421 	 *   empty.
2422 	 * - The queue of ungranted new requests for the resource is empty.
2423 	 * - The mode of the new request is compatible with the most
2424 	 *   restrictive mode of all granted locks on the resource.
2425 	 */
2426 
2427 	if (now && !conv && list_empty(&r->res_convertqueue) &&
2428 	    list_empty(&r->res_waitqueue))
2429 		return 1;
2430 
2431 	/*
2432 	 * 6-4: Once a lock request is in the queue of ungranted new requests,
2433 	 * it cannot be granted until the queue of ungranted conversion
2434 	 * requests is empty, all ungranted new requests ahead of it are
2435 	 * granted and/or canceled, and it is compatible with the granted mode
2436 	 * of the most restrictive lock granted on the resource.
2437 	 */
2438 
2439 	if (!now && !conv && list_empty(&r->res_convertqueue) &&
2440 	    first_in_list(lkb, &r->res_waitqueue))
2441 		return 1;
2442 
2443 	return 0;
2444 }
2445 
can_be_granted(struct dlm_rsb * r,struct dlm_lkb * lkb,int now,int recover,int * err)2446 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2447 			  int recover, int *err)
2448 {
2449 	int rv;
2450 	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2451 	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2452 
2453 	if (err)
2454 		*err = 0;
2455 
2456 	rv = _can_be_granted(r, lkb, now, recover);
2457 	if (rv)
2458 		goto out;
2459 
2460 	/*
2461 	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2462 	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2463 	 * cancels one of the locks.
2464 	 */
2465 
2466 	if (is_convert && can_be_queued(lkb) &&
2467 	    conversion_deadlock_detect(r, lkb)) {
2468 		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2469 			lkb->lkb_grmode = DLM_LOCK_NL;
2470 			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2471 		} else if (err) {
2472 			*err = -EDEADLK;
2473 		} else {
2474 			log_print("can_be_granted deadlock %x now %d",
2475 				  lkb->lkb_id, now);
2476 			dlm_dump_rsb(r);
2477 		}
2478 		goto out;
2479 	}
2480 
2481 	/*
2482 	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2483 	 * to grant a request in a mode other than the normal rqmode.  It's a
2484 	 * simple way to provide a big optimization to applications that can
2485 	 * use them.
2486 	 */
2487 
2488 	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2489 		alt = DLM_LOCK_PR;
2490 	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2491 		alt = DLM_LOCK_CW;
2492 
2493 	if (alt) {
2494 		lkb->lkb_rqmode = alt;
2495 		rv = _can_be_granted(r, lkb, now, 0);
2496 		if (rv)
2497 			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2498 		else
2499 			lkb->lkb_rqmode = rqmode;
2500 	}
2501  out:
2502 	return rv;
2503 }
2504 
2505 /* Returns the highest requested mode of all blocked conversions; sets
2506    cw if there's a blocked conversion to DLM_LOCK_CW. */
2507 
grant_pending_convert(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2508 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2509 				 unsigned int *count)
2510 {
2511 	struct dlm_lkb *lkb, *s;
2512 	int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2513 	int hi, demoted, quit, grant_restart, demote_restart;
2514 	int deadlk;
2515 
2516 	quit = 0;
2517  restart:
2518 	grant_restart = 0;
2519 	demote_restart = 0;
2520 	hi = DLM_LOCK_IV;
2521 
2522 	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2523 		demoted = is_demoted(lkb);
2524 		deadlk = 0;
2525 
2526 		if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2527 			grant_lock_pending(r, lkb);
2528 			grant_restart = 1;
2529 			if (count)
2530 				(*count)++;
2531 			continue;
2532 		}
2533 
2534 		if (!demoted && is_demoted(lkb)) {
2535 			log_print("WARN: pending demoted %x node %d %s",
2536 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2537 			demote_restart = 1;
2538 			continue;
2539 		}
2540 
2541 		if (deadlk) {
2542 			/*
2543 			 * If DLM_LKB_NODLKWT flag is set and conversion
2544 			 * deadlock is detected, we request blocking AST and
2545 			 * down (or cancel) conversion.
2546 			 */
2547 			if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2548 				if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2549 					queue_bast(r, lkb, lkb->lkb_rqmode);
2550 					lkb->lkb_highbast = lkb->lkb_rqmode;
2551 				}
2552 			} else {
2553 				log_print("WARN: pending deadlock %x node %d %s",
2554 					  lkb->lkb_id, lkb->lkb_nodeid,
2555 					  r->res_name);
2556 				dlm_dump_rsb(r);
2557 			}
2558 			continue;
2559 		}
2560 
2561 		hi = max_t(int, lkb->lkb_rqmode, hi);
2562 
2563 		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2564 			*cw = 1;
2565 	}
2566 
2567 	if (grant_restart)
2568 		goto restart;
2569 	if (demote_restart && !quit) {
2570 		quit = 1;
2571 		goto restart;
2572 	}
2573 
2574 	return max_t(int, high, hi);
2575 }
2576 
grant_pending_wait(struct dlm_rsb * r,int high,int * cw,unsigned int * count)2577 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2578 			      unsigned int *count)
2579 {
2580 	struct dlm_lkb *lkb, *s;
2581 
2582 	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2583 		if (can_be_granted(r, lkb, 0, 0, NULL)) {
2584 			grant_lock_pending(r, lkb);
2585 			if (count)
2586 				(*count)++;
2587 		} else {
2588 			high = max_t(int, lkb->lkb_rqmode, high);
2589 			if (lkb->lkb_rqmode == DLM_LOCK_CW)
2590 				*cw = 1;
2591 		}
2592 	}
2593 
2594 	return high;
2595 }
2596 
2597 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2598    on either the convert or waiting queue.
2599    high is the largest rqmode of all locks blocked on the convert or
2600    waiting queue. */
2601 
lock_requires_bast(struct dlm_lkb * gr,int high,int cw)2602 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2603 {
2604 	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2605 		if (gr->lkb_highbast < DLM_LOCK_EX)
2606 			return 1;
2607 		return 0;
2608 	}
2609 
2610 	if (gr->lkb_highbast < high &&
2611 	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2612 		return 1;
2613 	return 0;
2614 }
2615 
grant_pending_locks(struct dlm_rsb * r,unsigned int * count)2616 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2617 {
2618 	struct dlm_lkb *lkb, *s;
2619 	int high = DLM_LOCK_IV;
2620 	int cw = 0;
2621 
2622 	if (!is_master(r)) {
2623 		log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2624 		dlm_dump_rsb(r);
2625 		return;
2626 	}
2627 
2628 	high = grant_pending_convert(r, high, &cw, count);
2629 	high = grant_pending_wait(r, high, &cw, count);
2630 
2631 	if (high == DLM_LOCK_IV)
2632 		return;
2633 
2634 	/*
2635 	 * If there are locks left on the wait/convert queue then send blocking
2636 	 * ASTs to granted locks based on the largest requested mode (high)
2637 	 * found above.
2638 	 */
2639 
2640 	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2641 		if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2642 			if (cw && high == DLM_LOCK_PR &&
2643 			    lkb->lkb_grmode == DLM_LOCK_PR)
2644 				queue_bast(r, lkb, DLM_LOCK_CW);
2645 			else
2646 				queue_bast(r, lkb, high);
2647 			lkb->lkb_highbast = high;
2648 		}
2649 	}
2650 }
2651 
modes_require_bast(struct dlm_lkb * gr,struct dlm_lkb * rq)2652 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2653 {
2654 	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2655 	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2656 		if (gr->lkb_highbast < DLM_LOCK_EX)
2657 			return 1;
2658 		return 0;
2659 	}
2660 
2661 	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2662 		return 1;
2663 	return 0;
2664 }
2665 
send_bast_queue(struct dlm_rsb * r,struct list_head * head,struct dlm_lkb * lkb)2666 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2667 			    struct dlm_lkb *lkb)
2668 {
2669 	struct dlm_lkb *gr;
2670 
2671 	list_for_each_entry(gr, head, lkb_statequeue) {
2672 		/* skip self when sending basts to convertqueue */
2673 		if (gr == lkb)
2674 			continue;
2675 		if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2676 			queue_bast(r, gr, lkb->lkb_rqmode);
2677 			gr->lkb_highbast = lkb->lkb_rqmode;
2678 		}
2679 	}
2680 }
2681 
send_blocking_asts(struct dlm_rsb * r,struct dlm_lkb * lkb)2682 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2683 {
2684 	send_bast_queue(r, &r->res_grantqueue, lkb);
2685 }
2686 
send_blocking_asts_all(struct dlm_rsb * r,struct dlm_lkb * lkb)2687 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2688 {
2689 	send_bast_queue(r, &r->res_grantqueue, lkb);
2690 	send_bast_queue(r, &r->res_convertqueue, lkb);
2691 }
2692 
2693 /* set_master(r, lkb) -- set the master nodeid of a resource
2694 
2695    The purpose of this function is to set the nodeid field in the given
2696    lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
2697    known, it can just be copied to the lkb and the function will return
2698    0.  If the rsb's nodeid is _not_ known, it needs to be looked up
2699    before it can be copied to the lkb.
2700 
2701    When the rsb nodeid is being looked up remotely, the initial lkb
2702    causing the lookup is kept on the ls_waiters list waiting for the
2703    lookup reply.  Other lkb's waiting for the same rsb lookup are kept
2704    on the rsb's res_lookup list until the master is verified.
2705 
2706    Return values:
2707    0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2708    1: the rsb master is not available and the lkb has been placed on
2709       a wait queue
2710 */
2711 
set_master(struct dlm_rsb * r,struct dlm_lkb * lkb)2712 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2713 {
2714 	int our_nodeid = dlm_our_nodeid();
2715 
2716 	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2717 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2718 		r->res_first_lkid = lkb->lkb_id;
2719 		lkb->lkb_nodeid = r->res_nodeid;
2720 		return 0;
2721 	}
2722 
2723 	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2724 		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2725 		return 1;
2726 	}
2727 
2728 	if (r->res_master_nodeid == our_nodeid) {
2729 		lkb->lkb_nodeid = 0;
2730 		return 0;
2731 	}
2732 
2733 	if (r->res_master_nodeid) {
2734 		lkb->lkb_nodeid = r->res_master_nodeid;
2735 		return 0;
2736 	}
2737 
2738 	if (dlm_dir_nodeid(r) == our_nodeid) {
2739 		/* This is a somewhat unusual case; find_rsb will usually
2740 		   have set res_master_nodeid when dir nodeid is local, but
2741 		   there are cases where we become the dir node after we've
2742 		   past find_rsb and go through _request_lock again.
2743 		   confirm_master() or process_lookup_list() needs to be
2744 		   called after this. */
2745 		log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2746 			  lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2747 			  r->res_name);
2748 		r->res_master_nodeid = our_nodeid;
2749 		r->res_nodeid = 0;
2750 		lkb->lkb_nodeid = 0;
2751 		return 0;
2752 	}
2753 
2754 	wait_pending_remove(r);
2755 
2756 	r->res_first_lkid = lkb->lkb_id;
2757 	send_lookup(r, lkb);
2758 	return 1;
2759 }
2760 
process_lookup_list(struct dlm_rsb * r)2761 static void process_lookup_list(struct dlm_rsb *r)
2762 {
2763 	struct dlm_lkb *lkb, *safe;
2764 
2765 	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2766 		list_del_init(&lkb->lkb_rsb_lookup);
2767 		_request_lock(r, lkb);
2768 		schedule();
2769 	}
2770 }
2771 
2772 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2773 
confirm_master(struct dlm_rsb * r,int error)2774 static void confirm_master(struct dlm_rsb *r, int error)
2775 {
2776 	struct dlm_lkb *lkb;
2777 
2778 	if (!r->res_first_lkid)
2779 		return;
2780 
2781 	switch (error) {
2782 	case 0:
2783 	case -EINPROGRESS:
2784 		r->res_first_lkid = 0;
2785 		process_lookup_list(r);
2786 		break;
2787 
2788 	case -EAGAIN:
2789 	case -EBADR:
2790 	case -ENOTBLK:
2791 		/* the remote request failed and won't be retried (it was
2792 		   a NOQUEUE, or has been canceled/unlocked); make a waiting
2793 		   lkb the first_lkid */
2794 
2795 		r->res_first_lkid = 0;
2796 
2797 		if (!list_empty(&r->res_lookup)) {
2798 			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2799 					 lkb_rsb_lookup);
2800 			list_del_init(&lkb->lkb_rsb_lookup);
2801 			r->res_first_lkid = lkb->lkb_id;
2802 			_request_lock(r, lkb);
2803 		}
2804 		break;
2805 
2806 	default:
2807 		log_error(r->res_ls, "confirm_master unknown error %d", error);
2808 	}
2809 }
2810 
set_lock_args(int mode,struct dlm_lksb * lksb,uint32_t flags,int namelen,unsigned long timeout_cs,void (* ast)(void * astparam),void * astparam,void (* bast)(void * astparam,int mode),struct dlm_args * args)2811 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2812 			 int namelen, unsigned long timeout_cs,
2813 			 void (*ast) (void *astparam),
2814 			 void *astparam,
2815 			 void (*bast) (void *astparam, int mode),
2816 			 struct dlm_args *args)
2817 {
2818 	int rv = -EINVAL;
2819 
2820 	/* check for invalid arg usage */
2821 
2822 	if (mode < 0 || mode > DLM_LOCK_EX)
2823 		goto out;
2824 
2825 	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2826 		goto out;
2827 
2828 	if (flags & DLM_LKF_CANCEL)
2829 		goto out;
2830 
2831 	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2832 		goto out;
2833 
2834 	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2835 		goto out;
2836 
2837 	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2838 		goto out;
2839 
2840 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2841 		goto out;
2842 
2843 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2844 		goto out;
2845 
2846 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2847 		goto out;
2848 
2849 	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2850 		goto out;
2851 
2852 	if (!ast || !lksb)
2853 		goto out;
2854 
2855 	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2856 		goto out;
2857 
2858 	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2859 		goto out;
2860 
2861 	/* these args will be copied to the lkb in validate_lock_args,
2862 	   it cannot be done now because when converting locks, fields in
2863 	   an active lkb cannot be modified before locking the rsb */
2864 
2865 	args->flags = flags;
2866 	args->astfn = ast;
2867 	args->astparam = astparam;
2868 	args->bastfn = bast;
2869 	args->timeout = timeout_cs;
2870 	args->mode = mode;
2871 	args->lksb = lksb;
2872 	rv = 0;
2873  out:
2874 	return rv;
2875 }
2876 
set_unlock_args(uint32_t flags,void * astarg,struct dlm_args * args)2877 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2878 {
2879 	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2880  		      DLM_LKF_FORCEUNLOCK))
2881 		return -EINVAL;
2882 
2883 	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2884 		return -EINVAL;
2885 
2886 	args->flags = flags;
2887 	args->astparam = astarg;
2888 	return 0;
2889 }
2890 
validate_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)2891 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2892 			      struct dlm_args *args)
2893 {
2894 	int rv = -EBUSY;
2895 
2896 	if (args->flags & DLM_LKF_CONVERT) {
2897 		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2898 			goto out;
2899 
2900 		if (lkb->lkb_wait_type)
2901 			goto out;
2902 
2903 		if (is_overlap(lkb))
2904 			goto out;
2905 
2906 		rv = -EINVAL;
2907 		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2908 			goto out;
2909 
2910 		if (args->flags & DLM_LKF_QUECVT &&
2911 		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2912 			goto out;
2913 	}
2914 
2915 	lkb->lkb_exflags = args->flags;
2916 	lkb->lkb_sbflags = 0;
2917 	lkb->lkb_astfn = args->astfn;
2918 	lkb->lkb_astparam = args->astparam;
2919 	lkb->lkb_bastfn = args->bastfn;
2920 	lkb->lkb_rqmode = args->mode;
2921 	lkb->lkb_lksb = args->lksb;
2922 	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2923 	lkb->lkb_ownpid = (int) current->pid;
2924 	lkb->lkb_timeout_cs = args->timeout;
2925 	rv = 0;
2926  out:
2927 	if (rv)
2928 		log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2929 			  rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2930 			  lkb->lkb_status, lkb->lkb_wait_type,
2931 			  lkb->lkb_resource->res_name);
2932 	return rv;
2933 }
2934 
2935 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2936    for success */
2937 
2938 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2939    because there may be a lookup in progress and it's valid to do
2940    cancel/unlockf on it */
2941 
validate_unlock_args(struct dlm_lkb * lkb,struct dlm_args * args)2942 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2943 {
2944 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2945 	int rv = -EINVAL;
2946 
2947 	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2948 		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2949 		dlm_print_lkb(lkb);
2950 		goto out;
2951 	}
2952 
2953 	/* an lkb may still exist even though the lock is EOL'ed due to a
2954 	   cancel, unlock or failed noqueue request; an app can't use these
2955 	   locks; return same error as if the lkid had not been found at all */
2956 
2957 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2958 		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2959 		rv = -ENOENT;
2960 		goto out;
2961 	}
2962 
2963 	/* an lkb may be waiting for an rsb lookup to complete where the
2964 	   lookup was initiated by another lock */
2965 
2966 	if (!list_empty(&lkb->lkb_rsb_lookup)) {
2967 		if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2968 			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2969 			list_del_init(&lkb->lkb_rsb_lookup);
2970 			queue_cast(lkb->lkb_resource, lkb,
2971 				   args->flags & DLM_LKF_CANCEL ?
2972 				   -DLM_ECANCEL : -DLM_EUNLOCK);
2973 			unhold_lkb(lkb); /* undoes create_lkb() */
2974 		}
2975 		/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2976 		rv = -EBUSY;
2977 		goto out;
2978 	}
2979 
2980 	/* cancel not allowed with another cancel/unlock in progress */
2981 
2982 	if (args->flags & DLM_LKF_CANCEL) {
2983 		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2984 			goto out;
2985 
2986 		if (is_overlap(lkb))
2987 			goto out;
2988 
2989 		/* don't let scand try to do a cancel */
2990 		del_timeout(lkb);
2991 
2992 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2993 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2994 			rv = -EBUSY;
2995 			goto out;
2996 		}
2997 
2998 		/* there's nothing to cancel */
2999 		if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
3000 		    !lkb->lkb_wait_type) {
3001 			rv = -EBUSY;
3002 			goto out;
3003 		}
3004 
3005 		switch (lkb->lkb_wait_type) {
3006 		case DLM_MSG_LOOKUP:
3007 		case DLM_MSG_REQUEST:
3008 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3009 			rv = -EBUSY;
3010 			goto out;
3011 		case DLM_MSG_UNLOCK:
3012 		case DLM_MSG_CANCEL:
3013 			goto out;
3014 		}
3015 		/* add_to_waiters() will set OVERLAP_CANCEL */
3016 		goto out_ok;
3017 	}
3018 
3019 	/* do we need to allow a force-unlock if there's a normal unlock
3020 	   already in progress?  in what conditions could the normal unlock
3021 	   fail such that we'd want to send a force-unlock to be sure? */
3022 
3023 	if (args->flags & DLM_LKF_FORCEUNLOCK) {
3024 		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3025 			goto out;
3026 
3027 		if (is_overlap_unlock(lkb))
3028 			goto out;
3029 
3030 		/* don't let scand try to do a cancel */
3031 		del_timeout(lkb);
3032 
3033 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3034 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3035 			rv = -EBUSY;
3036 			goto out;
3037 		}
3038 
3039 		switch (lkb->lkb_wait_type) {
3040 		case DLM_MSG_LOOKUP:
3041 		case DLM_MSG_REQUEST:
3042 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3043 			rv = -EBUSY;
3044 			goto out;
3045 		case DLM_MSG_UNLOCK:
3046 			goto out;
3047 		}
3048 		/* add_to_waiters() will set OVERLAP_UNLOCK */
3049 		goto out_ok;
3050 	}
3051 
3052 	/* normal unlock not allowed if there's any op in progress */
3053 	rv = -EBUSY;
3054 	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3055 		goto out;
3056 
3057  out_ok:
3058 	/* an overlapping op shouldn't blow away exflags from other op */
3059 	lkb->lkb_exflags |= args->flags;
3060 	lkb->lkb_sbflags = 0;
3061 	lkb->lkb_astparam = args->astparam;
3062 	rv = 0;
3063  out:
3064 	if (rv)
3065 		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3066 			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3067 			  args->flags, lkb->lkb_wait_type,
3068 			  lkb->lkb_resource->res_name);
3069 	return rv;
3070 }
3071 
3072 /*
3073  * Four stage 4 varieties:
3074  * do_request(), do_convert(), do_unlock(), do_cancel()
3075  * These are called on the master node for the given lock and
3076  * from the central locking logic.
3077  */
3078 
do_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3079 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3080 {
3081 	int error = 0;
3082 
3083 	if (can_be_granted(r, lkb, 1, 0, NULL)) {
3084 		grant_lock(r, lkb);
3085 		queue_cast(r, lkb, 0);
3086 		goto out;
3087 	}
3088 
3089 	if (can_be_queued(lkb)) {
3090 		error = -EINPROGRESS;
3091 		add_lkb(r, lkb, DLM_LKSTS_WAITING);
3092 		add_timeout(lkb);
3093 		goto out;
3094 	}
3095 
3096 	error = -EAGAIN;
3097 	queue_cast(r, lkb, -EAGAIN);
3098  out:
3099 	return error;
3100 }
3101 
do_request_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3102 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3103 			       int error)
3104 {
3105 	switch (error) {
3106 	case -EAGAIN:
3107 		if (force_blocking_asts(lkb))
3108 			send_blocking_asts_all(r, lkb);
3109 		break;
3110 	case -EINPROGRESS:
3111 		send_blocking_asts(r, lkb);
3112 		break;
3113 	}
3114 }
3115 
do_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3116 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3117 {
3118 	int error = 0;
3119 	int deadlk = 0;
3120 
3121 	/* changing an existing lock may allow others to be granted */
3122 
3123 	if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3124 		grant_lock(r, lkb);
3125 		queue_cast(r, lkb, 0);
3126 		goto out;
3127 	}
3128 
3129 	/* can_be_granted() detected that this lock would block in a conversion
3130 	   deadlock, so we leave it on the granted queue and return EDEADLK in
3131 	   the ast for the convert. */
3132 
3133 	if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3134 		/* it's left on the granted queue */
3135 		revert_lock(r, lkb);
3136 		queue_cast(r, lkb, -EDEADLK);
3137 		error = -EDEADLK;
3138 		goto out;
3139 	}
3140 
3141 	/* is_demoted() means the can_be_granted() above set the grmode
3142 	   to NL, and left us on the granted queue.  This auto-demotion
3143 	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
3144 	   now grantable.  We have to try to grant other converting locks
3145 	   before we try again to grant this one. */
3146 
3147 	if (is_demoted(lkb)) {
3148 		grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3149 		if (_can_be_granted(r, lkb, 1, 0)) {
3150 			grant_lock(r, lkb);
3151 			queue_cast(r, lkb, 0);
3152 			goto out;
3153 		}
3154 		/* else fall through and move to convert queue */
3155 	}
3156 
3157 	if (can_be_queued(lkb)) {
3158 		error = -EINPROGRESS;
3159 		del_lkb(r, lkb);
3160 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3161 		add_timeout(lkb);
3162 		goto out;
3163 	}
3164 
3165 	error = -EAGAIN;
3166 	queue_cast(r, lkb, -EAGAIN);
3167  out:
3168 	return error;
3169 }
3170 
do_convert_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3171 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3172 			       int error)
3173 {
3174 	switch (error) {
3175 	case 0:
3176 		grant_pending_locks(r, NULL);
3177 		/* grant_pending_locks also sends basts */
3178 		break;
3179 	case -EAGAIN:
3180 		if (force_blocking_asts(lkb))
3181 			send_blocking_asts_all(r, lkb);
3182 		break;
3183 	case -EINPROGRESS:
3184 		send_blocking_asts(r, lkb);
3185 		break;
3186 	}
3187 }
3188 
do_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3189 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3190 {
3191 	remove_lock(r, lkb);
3192 	queue_cast(r, lkb, -DLM_EUNLOCK);
3193 	return -DLM_EUNLOCK;
3194 }
3195 
do_unlock_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3196 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3197 			      int error)
3198 {
3199 	grant_pending_locks(r, NULL);
3200 }
3201 
3202 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3203 
do_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3204 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3205 {
3206 	int error;
3207 
3208 	error = revert_lock(r, lkb);
3209 	if (error) {
3210 		queue_cast(r, lkb, -DLM_ECANCEL);
3211 		return -DLM_ECANCEL;
3212 	}
3213 	return 0;
3214 }
3215 
do_cancel_effects(struct dlm_rsb * r,struct dlm_lkb * lkb,int error)3216 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3217 			      int error)
3218 {
3219 	if (error)
3220 		grant_pending_locks(r, NULL);
3221 }
3222 
3223 /*
3224  * Four stage 3 varieties:
3225  * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3226  */
3227 
3228 /* add a new lkb to a possibly new rsb, called by requesting process */
3229 
_request_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3230 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3231 {
3232 	int error;
3233 
3234 	/* set_master: sets lkb nodeid from r */
3235 
3236 	error = set_master(r, lkb);
3237 	if (error < 0)
3238 		goto out;
3239 	if (error) {
3240 		error = 0;
3241 		goto out;
3242 	}
3243 
3244 	if (is_remote(r)) {
3245 		/* receive_request() calls do_request() on remote node */
3246 		error = send_request(r, lkb);
3247 	} else {
3248 		error = do_request(r, lkb);
3249 		/* for remote locks the request_reply is sent
3250 		   between do_request and do_request_effects */
3251 		do_request_effects(r, lkb, error);
3252 	}
3253  out:
3254 	return error;
3255 }
3256 
3257 /* change some property of an existing lkb, e.g. mode */
3258 
_convert_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3259 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3260 {
3261 	int error;
3262 
3263 	if (is_remote(r)) {
3264 		/* receive_convert() calls do_convert() on remote node */
3265 		error = send_convert(r, lkb);
3266 	} else {
3267 		error = do_convert(r, lkb);
3268 		/* for remote locks the convert_reply is sent
3269 		   between do_convert and do_convert_effects */
3270 		do_convert_effects(r, lkb, error);
3271 	}
3272 
3273 	return error;
3274 }
3275 
3276 /* remove an existing lkb from the granted queue */
3277 
_unlock_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3278 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3279 {
3280 	int error;
3281 
3282 	if (is_remote(r)) {
3283 		/* receive_unlock() calls do_unlock() on remote node */
3284 		error = send_unlock(r, lkb);
3285 	} else {
3286 		error = do_unlock(r, lkb);
3287 		/* for remote locks the unlock_reply is sent
3288 		   between do_unlock and do_unlock_effects */
3289 		do_unlock_effects(r, lkb, error);
3290 	}
3291 
3292 	return error;
3293 }
3294 
3295 /* remove an existing lkb from the convert or wait queue */
3296 
_cancel_lock(struct dlm_rsb * r,struct dlm_lkb * lkb)3297 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3298 {
3299 	int error;
3300 
3301 	if (is_remote(r)) {
3302 		/* receive_cancel() calls do_cancel() on remote node */
3303 		error = send_cancel(r, lkb);
3304 	} else {
3305 		error = do_cancel(r, lkb);
3306 		/* for remote locks the cancel_reply is sent
3307 		   between do_cancel and do_cancel_effects */
3308 		do_cancel_effects(r, lkb, error);
3309 	}
3310 
3311 	return error;
3312 }
3313 
3314 /*
3315  * Four stage 2 varieties:
3316  * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3317  */
3318 
request_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,char * name,int len,struct dlm_args * args)3319 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3320 			int len, struct dlm_args *args)
3321 {
3322 	struct dlm_rsb *r;
3323 	int error;
3324 
3325 	error = validate_lock_args(ls, lkb, args);
3326 	if (error)
3327 		return error;
3328 
3329 	error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3330 	if (error)
3331 		return error;
3332 
3333 	lock_rsb(r);
3334 
3335 	attach_lkb(r, lkb);
3336 	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3337 
3338 	error = _request_lock(r, lkb);
3339 
3340 	unlock_rsb(r);
3341 	put_rsb(r);
3342 	return error;
3343 }
3344 
convert_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3345 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3346 			struct dlm_args *args)
3347 {
3348 	struct dlm_rsb *r;
3349 	int error;
3350 
3351 	r = lkb->lkb_resource;
3352 
3353 	hold_rsb(r);
3354 	lock_rsb(r);
3355 
3356 	error = validate_lock_args(ls, lkb, args);
3357 	if (error)
3358 		goto out;
3359 
3360 	error = _convert_lock(r, lkb);
3361  out:
3362 	unlock_rsb(r);
3363 	put_rsb(r);
3364 	return error;
3365 }
3366 
unlock_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3367 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3368 		       struct dlm_args *args)
3369 {
3370 	struct dlm_rsb *r;
3371 	int error;
3372 
3373 	r = lkb->lkb_resource;
3374 
3375 	hold_rsb(r);
3376 	lock_rsb(r);
3377 
3378 	error = validate_unlock_args(lkb, args);
3379 	if (error)
3380 		goto out;
3381 
3382 	error = _unlock_lock(r, lkb);
3383  out:
3384 	unlock_rsb(r);
3385 	put_rsb(r);
3386 	return error;
3387 }
3388 
cancel_lock(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_args * args)3389 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3390 		       struct dlm_args *args)
3391 {
3392 	struct dlm_rsb *r;
3393 	int error;
3394 
3395 	r = lkb->lkb_resource;
3396 
3397 	hold_rsb(r);
3398 	lock_rsb(r);
3399 
3400 	error = validate_unlock_args(lkb, args);
3401 	if (error)
3402 		goto out;
3403 
3404 	error = _cancel_lock(r, lkb);
3405  out:
3406 	unlock_rsb(r);
3407 	put_rsb(r);
3408 	return error;
3409 }
3410 
3411 /*
3412  * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
3413  */
3414 
dlm_lock(dlm_lockspace_t * lockspace,int mode,struct dlm_lksb * lksb,uint32_t flags,void * name,unsigned int namelen,uint32_t parent_lkid,void (* ast)(void * astarg),void * astarg,void (* bast)(void * astarg,int mode))3415 int dlm_lock(dlm_lockspace_t *lockspace,
3416 	     int mode,
3417 	     struct dlm_lksb *lksb,
3418 	     uint32_t flags,
3419 	     void *name,
3420 	     unsigned int namelen,
3421 	     uint32_t parent_lkid,
3422 	     void (*ast) (void *astarg),
3423 	     void *astarg,
3424 	     void (*bast) (void *astarg, int mode))
3425 {
3426 	struct dlm_ls *ls;
3427 	struct dlm_lkb *lkb;
3428 	struct dlm_args args;
3429 	int error, convert = flags & DLM_LKF_CONVERT;
3430 
3431 	ls = dlm_find_lockspace_local(lockspace);
3432 	if (!ls)
3433 		return -EINVAL;
3434 
3435 	dlm_lock_recovery(ls);
3436 
3437 	if (convert)
3438 		error = find_lkb(ls, lksb->sb_lkid, &lkb);
3439 	else
3440 		error = create_lkb(ls, &lkb);
3441 
3442 	if (error)
3443 		goto out;
3444 
3445 	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3446 			      astarg, bast, &args);
3447 	if (error)
3448 		goto out_put;
3449 
3450 	if (convert)
3451 		error = convert_lock(ls, lkb, &args);
3452 	else
3453 		error = request_lock(ls, lkb, name, namelen, &args);
3454 
3455 	if (error == -EINPROGRESS)
3456 		error = 0;
3457  out_put:
3458 	if (convert || error)
3459 		__put_lkb(ls, lkb);
3460 	if (error == -EAGAIN || error == -EDEADLK)
3461 		error = 0;
3462  out:
3463 	dlm_unlock_recovery(ls);
3464 	dlm_put_lockspace(ls);
3465 	return error;
3466 }
3467 
dlm_unlock(dlm_lockspace_t * lockspace,uint32_t lkid,uint32_t flags,struct dlm_lksb * lksb,void * astarg)3468 int dlm_unlock(dlm_lockspace_t *lockspace,
3469 	       uint32_t lkid,
3470 	       uint32_t flags,
3471 	       struct dlm_lksb *lksb,
3472 	       void *astarg)
3473 {
3474 	struct dlm_ls *ls;
3475 	struct dlm_lkb *lkb;
3476 	struct dlm_args args;
3477 	int error;
3478 
3479 	ls = dlm_find_lockspace_local(lockspace);
3480 	if (!ls)
3481 		return -EINVAL;
3482 
3483 	dlm_lock_recovery(ls);
3484 
3485 	error = find_lkb(ls, lkid, &lkb);
3486 	if (error)
3487 		goto out;
3488 
3489 	error = set_unlock_args(flags, astarg, &args);
3490 	if (error)
3491 		goto out_put;
3492 
3493 	if (flags & DLM_LKF_CANCEL)
3494 		error = cancel_lock(ls, lkb, &args);
3495 	else
3496 		error = unlock_lock(ls, lkb, &args);
3497 
3498 	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3499 		error = 0;
3500 	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3501 		error = 0;
3502  out_put:
3503 	dlm_put_lkb(lkb);
3504  out:
3505 	dlm_unlock_recovery(ls);
3506 	dlm_put_lockspace(ls);
3507 	return error;
3508 }
3509 
3510 /*
3511  * send/receive routines for remote operations and replies
3512  *
3513  * send_args
3514  * send_common
3515  * send_request			receive_request
3516  * send_convert			receive_convert
3517  * send_unlock			receive_unlock
3518  * send_cancel			receive_cancel
3519  * send_grant			receive_grant
3520  * send_bast			receive_bast
3521  * send_lookup			receive_lookup
3522  * send_remove			receive_remove
3523  *
3524  * 				send_common_reply
3525  * receive_request_reply	send_request_reply
3526  * receive_convert_reply	send_convert_reply
3527  * receive_unlock_reply		send_unlock_reply
3528  * receive_cancel_reply		send_cancel_reply
3529  * receive_lookup_reply		send_lookup_reply
3530  */
3531 
_create_message(struct dlm_ls * ls,int mb_len,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3532 static int _create_message(struct dlm_ls *ls, int mb_len,
3533 			   int to_nodeid, int mstype,
3534 			   struct dlm_message **ms_ret,
3535 			   struct dlm_mhandle **mh_ret)
3536 {
3537 	struct dlm_message *ms;
3538 	struct dlm_mhandle *mh;
3539 	char *mb;
3540 
3541 	/* get_buffer gives us a message handle (mh) that we need to
3542 	   pass into lowcomms_commit and a message buffer (mb) that we
3543 	   write our data into */
3544 
3545 	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3546 	if (!mh)
3547 		return -ENOBUFS;
3548 
3549 	memset(mb, 0, mb_len);
3550 
3551 	ms = (struct dlm_message *) mb;
3552 
3553 	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3554 	ms->m_header.h_lockspace = ls->ls_global_id;
3555 	ms->m_header.h_nodeid = dlm_our_nodeid();
3556 	ms->m_header.h_length = mb_len;
3557 	ms->m_header.h_cmd = DLM_MSG;
3558 
3559 	ms->m_type = mstype;
3560 
3561 	*mh_ret = mh;
3562 	*ms_ret = ms;
3563 	return 0;
3564 }
3565 
create_message(struct dlm_rsb * r,struct dlm_lkb * lkb,int to_nodeid,int mstype,struct dlm_message ** ms_ret,struct dlm_mhandle ** mh_ret)3566 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3567 			  int to_nodeid, int mstype,
3568 			  struct dlm_message **ms_ret,
3569 			  struct dlm_mhandle **mh_ret)
3570 {
3571 	int mb_len = sizeof(struct dlm_message);
3572 
3573 	switch (mstype) {
3574 	case DLM_MSG_REQUEST:
3575 	case DLM_MSG_LOOKUP:
3576 	case DLM_MSG_REMOVE:
3577 		mb_len += r->res_length;
3578 		break;
3579 	case DLM_MSG_CONVERT:
3580 	case DLM_MSG_UNLOCK:
3581 	case DLM_MSG_REQUEST_REPLY:
3582 	case DLM_MSG_CONVERT_REPLY:
3583 	case DLM_MSG_GRANT:
3584 		if (lkb && lkb->lkb_lvbptr)
3585 			mb_len += r->res_ls->ls_lvblen;
3586 		break;
3587 	}
3588 
3589 	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3590 			       ms_ret, mh_ret);
3591 }
3592 
3593 /* further lowcomms enhancements or alternate implementations may make
3594    the return value from this function useful at some point */
3595 
send_message(struct dlm_mhandle * mh,struct dlm_message * ms)3596 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3597 {
3598 	dlm_message_out(ms);
3599 	dlm_lowcomms_commit_buffer(mh);
3600 	return 0;
3601 }
3602 
send_args(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)3603 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3604 		      struct dlm_message *ms)
3605 {
3606 	ms->m_nodeid   = lkb->lkb_nodeid;
3607 	ms->m_pid      = lkb->lkb_ownpid;
3608 	ms->m_lkid     = lkb->lkb_id;
3609 	ms->m_remid    = lkb->lkb_remid;
3610 	ms->m_exflags  = lkb->lkb_exflags;
3611 	ms->m_sbflags  = lkb->lkb_sbflags;
3612 	ms->m_flags    = lkb->lkb_flags;
3613 	ms->m_lvbseq   = lkb->lkb_lvbseq;
3614 	ms->m_status   = lkb->lkb_status;
3615 	ms->m_grmode   = lkb->lkb_grmode;
3616 	ms->m_rqmode   = lkb->lkb_rqmode;
3617 	ms->m_hash     = r->res_hash;
3618 
3619 	/* m_result and m_bastmode are set from function args,
3620 	   not from lkb fields */
3621 
3622 	if (lkb->lkb_bastfn)
3623 		ms->m_asts |= DLM_CB_BAST;
3624 	if (lkb->lkb_astfn)
3625 		ms->m_asts |= DLM_CB_CAST;
3626 
3627 	/* compare with switch in create_message; send_remove() doesn't
3628 	   use send_args() */
3629 
3630 	switch (ms->m_type) {
3631 	case DLM_MSG_REQUEST:
3632 	case DLM_MSG_LOOKUP:
3633 		memcpy(ms->m_extra, r->res_name, r->res_length);
3634 		break;
3635 	case DLM_MSG_CONVERT:
3636 	case DLM_MSG_UNLOCK:
3637 	case DLM_MSG_REQUEST_REPLY:
3638 	case DLM_MSG_CONVERT_REPLY:
3639 	case DLM_MSG_GRANT:
3640 		if (!lkb->lkb_lvbptr)
3641 			break;
3642 		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3643 		break;
3644 	}
3645 }
3646 
send_common(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype)3647 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3648 {
3649 	struct dlm_message *ms;
3650 	struct dlm_mhandle *mh;
3651 	int to_nodeid, error;
3652 
3653 	to_nodeid = r->res_nodeid;
3654 
3655 	error = add_to_waiters(lkb, mstype, to_nodeid);
3656 	if (error)
3657 		return error;
3658 
3659 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3660 	if (error)
3661 		goto fail;
3662 
3663 	send_args(r, lkb, ms);
3664 
3665 	error = send_message(mh, ms);
3666 	if (error)
3667 		goto fail;
3668 	return 0;
3669 
3670  fail:
3671 	remove_from_waiters(lkb, msg_reply_type(mstype));
3672 	return error;
3673 }
3674 
send_request(struct dlm_rsb * r,struct dlm_lkb * lkb)3675 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3676 {
3677 	return send_common(r, lkb, DLM_MSG_REQUEST);
3678 }
3679 
send_convert(struct dlm_rsb * r,struct dlm_lkb * lkb)3680 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3681 {
3682 	int error;
3683 
3684 	error = send_common(r, lkb, DLM_MSG_CONVERT);
3685 
3686 	/* down conversions go without a reply from the master */
3687 	if (!error && down_conversion(lkb)) {
3688 		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3689 		r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3690 		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3691 		r->res_ls->ls_stub_ms.m_result = 0;
3692 		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3693 	}
3694 
3695 	return error;
3696 }
3697 
3698 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3699    MASTER_UNCERTAIN to force the next request on the rsb to confirm
3700    that the master is still correct. */
3701 
send_unlock(struct dlm_rsb * r,struct dlm_lkb * lkb)3702 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3703 {
3704 	return send_common(r, lkb, DLM_MSG_UNLOCK);
3705 }
3706 
send_cancel(struct dlm_rsb * r,struct dlm_lkb * lkb)3707 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3708 {
3709 	return send_common(r, lkb, DLM_MSG_CANCEL);
3710 }
3711 
send_grant(struct dlm_rsb * r,struct dlm_lkb * lkb)3712 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3713 {
3714 	struct dlm_message *ms;
3715 	struct dlm_mhandle *mh;
3716 	int to_nodeid, error;
3717 
3718 	to_nodeid = lkb->lkb_nodeid;
3719 
3720 	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3721 	if (error)
3722 		goto out;
3723 
3724 	send_args(r, lkb, ms);
3725 
3726 	ms->m_result = 0;
3727 
3728 	error = send_message(mh, ms);
3729  out:
3730 	return error;
3731 }
3732 
send_bast(struct dlm_rsb * r,struct dlm_lkb * lkb,int mode)3733 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3734 {
3735 	struct dlm_message *ms;
3736 	struct dlm_mhandle *mh;
3737 	int to_nodeid, error;
3738 
3739 	to_nodeid = lkb->lkb_nodeid;
3740 
3741 	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3742 	if (error)
3743 		goto out;
3744 
3745 	send_args(r, lkb, ms);
3746 
3747 	ms->m_bastmode = mode;
3748 
3749 	error = send_message(mh, ms);
3750  out:
3751 	return error;
3752 }
3753 
send_lookup(struct dlm_rsb * r,struct dlm_lkb * lkb)3754 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3755 {
3756 	struct dlm_message *ms;
3757 	struct dlm_mhandle *mh;
3758 	int to_nodeid, error;
3759 
3760 	to_nodeid = dlm_dir_nodeid(r);
3761 
3762 	error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3763 	if (error)
3764 		return error;
3765 
3766 	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3767 	if (error)
3768 		goto fail;
3769 
3770 	send_args(r, lkb, ms);
3771 
3772 	error = send_message(mh, ms);
3773 	if (error)
3774 		goto fail;
3775 	return 0;
3776 
3777  fail:
3778 	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3779 	return error;
3780 }
3781 
send_remove(struct dlm_rsb * r)3782 static int send_remove(struct dlm_rsb *r)
3783 {
3784 	struct dlm_message *ms;
3785 	struct dlm_mhandle *mh;
3786 	int to_nodeid, error;
3787 
3788 	to_nodeid = dlm_dir_nodeid(r);
3789 
3790 	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3791 	if (error)
3792 		goto out;
3793 
3794 	memcpy(ms->m_extra, r->res_name, r->res_length);
3795 	ms->m_hash = r->res_hash;
3796 
3797 	error = send_message(mh, ms);
3798  out:
3799 	return error;
3800 }
3801 
send_common_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int mstype,int rv)3802 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3803 			     int mstype, int rv)
3804 {
3805 	struct dlm_message *ms;
3806 	struct dlm_mhandle *mh;
3807 	int to_nodeid, error;
3808 
3809 	to_nodeid = lkb->lkb_nodeid;
3810 
3811 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3812 	if (error)
3813 		goto out;
3814 
3815 	send_args(r, lkb, ms);
3816 
3817 	ms->m_result = rv;
3818 
3819 	error = send_message(mh, ms);
3820  out:
3821 	return error;
3822 }
3823 
send_request_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3824 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3825 {
3826 	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3827 }
3828 
send_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3829 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3830 {
3831 	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3832 }
3833 
send_unlock_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3834 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3835 {
3836 	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3837 }
3838 
send_cancel_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,int rv)3839 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3840 {
3841 	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3842 }
3843 
send_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms_in,int ret_nodeid,int rv)3844 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3845 			     int ret_nodeid, int rv)
3846 {
3847 	struct dlm_rsb *r = &ls->ls_stub_rsb;
3848 	struct dlm_message *ms;
3849 	struct dlm_mhandle *mh;
3850 	int error, nodeid = ms_in->m_header.h_nodeid;
3851 
3852 	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3853 	if (error)
3854 		goto out;
3855 
3856 	ms->m_lkid = ms_in->m_lkid;
3857 	ms->m_result = rv;
3858 	ms->m_nodeid = ret_nodeid;
3859 
3860 	error = send_message(mh, ms);
3861  out:
3862 	return error;
3863 }
3864 
3865 /* which args we save from a received message depends heavily on the type
3866    of message, unlike the send side where we can safely send everything about
3867    the lkb for any type of message */
3868 
receive_flags(struct dlm_lkb * lkb,struct dlm_message * ms)3869 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3870 {
3871 	lkb->lkb_exflags = ms->m_exflags;
3872 	lkb->lkb_sbflags = ms->m_sbflags;
3873 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3874 		         (ms->m_flags & 0x0000FFFF);
3875 }
3876 
receive_flags_reply(struct dlm_lkb * lkb,struct dlm_message * ms)3877 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3878 {
3879 	if (ms->m_flags == DLM_IFL_STUB_MS)
3880 		return;
3881 
3882 	lkb->lkb_sbflags = ms->m_sbflags;
3883 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3884 		         (ms->m_flags & 0x0000FFFF);
3885 }
3886 
receive_extralen(struct dlm_message * ms)3887 static int receive_extralen(struct dlm_message *ms)
3888 {
3889 	return (ms->m_header.h_length - sizeof(struct dlm_message));
3890 }
3891 
receive_lvb(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3892 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3893 		       struct dlm_message *ms)
3894 {
3895 	int len;
3896 
3897 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3898 		if (!lkb->lkb_lvbptr)
3899 			lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3900 		if (!lkb->lkb_lvbptr)
3901 			return -ENOMEM;
3902 		len = receive_extralen(ms);
3903 		if (len > ls->ls_lvblen)
3904 			len = ls->ls_lvblen;
3905 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3906 	}
3907 	return 0;
3908 }
3909 
fake_bastfn(void * astparam,int mode)3910 static void fake_bastfn(void *astparam, int mode)
3911 {
3912 	log_print("fake_bastfn should not be called");
3913 }
3914 
fake_astfn(void * astparam)3915 static void fake_astfn(void *astparam)
3916 {
3917 	log_print("fake_astfn should not be called");
3918 }
3919 
receive_request_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3920 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3921 				struct dlm_message *ms)
3922 {
3923 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3924 	lkb->lkb_ownpid = ms->m_pid;
3925 	lkb->lkb_remid = ms->m_lkid;
3926 	lkb->lkb_grmode = DLM_LOCK_IV;
3927 	lkb->lkb_rqmode = ms->m_rqmode;
3928 
3929 	lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3930 	lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3931 
3932 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3933 		/* lkb was just created so there won't be an lvb yet */
3934 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3935 		if (!lkb->lkb_lvbptr)
3936 			return -ENOMEM;
3937 	}
3938 
3939 	return 0;
3940 }
3941 
receive_convert_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3942 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3943 				struct dlm_message *ms)
3944 {
3945 	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3946 		return -EBUSY;
3947 
3948 	if (receive_lvb(ls, lkb, ms))
3949 		return -ENOMEM;
3950 
3951 	lkb->lkb_rqmode = ms->m_rqmode;
3952 	lkb->lkb_lvbseq = ms->m_lvbseq;
3953 
3954 	return 0;
3955 }
3956 
receive_unlock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms)3957 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3958 			       struct dlm_message *ms)
3959 {
3960 	if (receive_lvb(ls, lkb, ms))
3961 		return -ENOMEM;
3962 	return 0;
3963 }
3964 
3965 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3966    uses to send a reply and that the remote end uses to process the reply. */
3967 
setup_stub_lkb(struct dlm_ls * ls,struct dlm_message * ms)3968 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3969 {
3970 	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3971 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3972 	lkb->lkb_remid = ms->m_lkid;
3973 }
3974 
3975 /* This is called after the rsb is locked so that we can safely inspect
3976    fields in the lkb. */
3977 
validate_message(struct dlm_lkb * lkb,struct dlm_message * ms)3978 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3979 {
3980 	int from = ms->m_header.h_nodeid;
3981 	int error = 0;
3982 
3983 	/* currently mixing of user/kernel locks are not supported */
3984 	if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3985 		log_error(lkb->lkb_resource->res_ls,
3986 			  "got user dlm message for a kernel lock");
3987 		error = -EINVAL;
3988 		goto out;
3989 	}
3990 
3991 	switch (ms->m_type) {
3992 	case DLM_MSG_CONVERT:
3993 	case DLM_MSG_UNLOCK:
3994 	case DLM_MSG_CANCEL:
3995 		if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3996 			error = -EINVAL;
3997 		break;
3998 
3999 	case DLM_MSG_CONVERT_REPLY:
4000 	case DLM_MSG_UNLOCK_REPLY:
4001 	case DLM_MSG_CANCEL_REPLY:
4002 	case DLM_MSG_GRANT:
4003 	case DLM_MSG_BAST:
4004 		if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4005 			error = -EINVAL;
4006 		break;
4007 
4008 	case DLM_MSG_REQUEST_REPLY:
4009 		if (!is_process_copy(lkb))
4010 			error = -EINVAL;
4011 		else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4012 			error = -EINVAL;
4013 		break;
4014 
4015 	default:
4016 		error = -EINVAL;
4017 	}
4018 
4019 out:
4020 	if (error)
4021 		log_error(lkb->lkb_resource->res_ls,
4022 			  "ignore invalid message %d from %d %x %x %x %d",
4023 			  ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4024 			  lkb->lkb_flags, lkb->lkb_nodeid);
4025 	return error;
4026 }
4027 
send_repeat_remove(struct dlm_ls * ls,char * ms_name,int len)4028 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4029 {
4030 	char name[DLM_RESNAME_MAXLEN + 1];
4031 	struct dlm_message *ms;
4032 	struct dlm_mhandle *mh;
4033 	struct dlm_rsb *r;
4034 	uint32_t hash, b;
4035 	int rv, dir_nodeid;
4036 
4037 	memset(name, 0, sizeof(name));
4038 	memcpy(name, ms_name, len);
4039 
4040 	hash = jhash(name, len, 0);
4041 	b = hash & (ls->ls_rsbtbl_size - 1);
4042 
4043 	dir_nodeid = dlm_hash2nodeid(ls, hash);
4044 
4045 	log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4046 
4047 	spin_lock(&ls->ls_rsbtbl[b].lock);
4048 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4049 	if (!rv) {
4050 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4051 		log_error(ls, "repeat_remove on keep %s", name);
4052 		return;
4053 	}
4054 
4055 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4056 	if (!rv) {
4057 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4058 		log_error(ls, "repeat_remove on toss %s", name);
4059 		return;
4060 	}
4061 
4062 	/* use ls->remove_name2 to avoid conflict with shrink? */
4063 
4064 	spin_lock(&ls->ls_remove_spin);
4065 	ls->ls_remove_len = len;
4066 	memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4067 	spin_unlock(&ls->ls_remove_spin);
4068 	spin_unlock(&ls->ls_rsbtbl[b].lock);
4069 
4070 	rv = _create_message(ls, sizeof(struct dlm_message) + len,
4071 			     dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4072 	if (rv)
4073 		goto out;
4074 
4075 	memcpy(ms->m_extra, name, len);
4076 	ms->m_hash = hash;
4077 
4078 	send_message(mh, ms);
4079 
4080 out:
4081 	spin_lock(&ls->ls_remove_spin);
4082 	ls->ls_remove_len = 0;
4083 	memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4084 	spin_unlock(&ls->ls_remove_spin);
4085 }
4086 
receive_request(struct dlm_ls * ls,struct dlm_message * ms)4087 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4088 {
4089 	struct dlm_lkb *lkb;
4090 	struct dlm_rsb *r;
4091 	int from_nodeid;
4092 	int error, namelen = 0;
4093 
4094 	from_nodeid = ms->m_header.h_nodeid;
4095 
4096 	error = create_lkb(ls, &lkb);
4097 	if (error)
4098 		goto fail;
4099 
4100 	receive_flags(lkb, ms);
4101 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4102 	error = receive_request_args(ls, lkb, ms);
4103 	if (error) {
4104 		__put_lkb(ls, lkb);
4105 		goto fail;
4106 	}
4107 
4108 	/* The dir node is the authority on whether we are the master
4109 	   for this rsb or not, so if the master sends us a request, we should
4110 	   recreate the rsb if we've destroyed it.   This race happens when we
4111 	   send a remove message to the dir node at the same time that the dir
4112 	   node sends us a request for the rsb. */
4113 
4114 	namelen = receive_extralen(ms);
4115 
4116 	error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4117 			 R_RECEIVE_REQUEST, &r);
4118 	if (error) {
4119 		__put_lkb(ls, lkb);
4120 		goto fail;
4121 	}
4122 
4123 	lock_rsb(r);
4124 
4125 	if (r->res_master_nodeid != dlm_our_nodeid()) {
4126 		error = validate_master_nodeid(ls, r, from_nodeid);
4127 		if (error) {
4128 			unlock_rsb(r);
4129 			put_rsb(r);
4130 			__put_lkb(ls, lkb);
4131 			goto fail;
4132 		}
4133 	}
4134 
4135 	attach_lkb(r, lkb);
4136 	error = do_request(r, lkb);
4137 	send_request_reply(r, lkb, error);
4138 	do_request_effects(r, lkb, error);
4139 
4140 	unlock_rsb(r);
4141 	put_rsb(r);
4142 
4143 	if (error == -EINPROGRESS)
4144 		error = 0;
4145 	if (error)
4146 		dlm_put_lkb(lkb);
4147 	return 0;
4148 
4149  fail:
4150 	/* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4151 	   and do this receive_request again from process_lookup_list once
4152 	   we get the lookup reply.  This would avoid a many repeated
4153 	   ENOTBLK request failures when the lookup reply designating us
4154 	   as master is delayed. */
4155 
4156 	/* We could repeatedly return -EBADR here if our send_remove() is
4157 	   delayed in being sent/arriving/being processed on the dir node.
4158 	   Another node would repeatedly lookup up the master, and the dir
4159 	   node would continue returning our nodeid until our send_remove
4160 	   took effect.
4161 
4162 	   We send another remove message in case our previous send_remove
4163 	   was lost/ignored/missed somehow. */
4164 
4165 	if (error != -ENOTBLK) {
4166 		log_limit(ls, "receive_request %x from %d %d",
4167 			  ms->m_lkid, from_nodeid, error);
4168 	}
4169 
4170 	if (namelen && error == -EBADR) {
4171 		send_repeat_remove(ls, ms->m_extra, namelen);
4172 		msleep(1000);
4173 	}
4174 
4175 	setup_stub_lkb(ls, ms);
4176 	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4177 	return error;
4178 }
4179 
receive_convert(struct dlm_ls * ls,struct dlm_message * ms)4180 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4181 {
4182 	struct dlm_lkb *lkb;
4183 	struct dlm_rsb *r;
4184 	int error, reply = 1;
4185 
4186 	error = find_lkb(ls, ms->m_remid, &lkb);
4187 	if (error)
4188 		goto fail;
4189 
4190 	if (lkb->lkb_remid != ms->m_lkid) {
4191 		log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4192 			  "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4193 			  (unsigned long long)lkb->lkb_recover_seq,
4194 			  ms->m_header.h_nodeid, ms->m_lkid);
4195 		error = -ENOENT;
4196 		dlm_put_lkb(lkb);
4197 		goto fail;
4198 	}
4199 
4200 	r = lkb->lkb_resource;
4201 
4202 	hold_rsb(r);
4203 	lock_rsb(r);
4204 
4205 	error = validate_message(lkb, ms);
4206 	if (error)
4207 		goto out;
4208 
4209 	receive_flags(lkb, ms);
4210 
4211 	error = receive_convert_args(ls, lkb, ms);
4212 	if (error) {
4213 		send_convert_reply(r, lkb, error);
4214 		goto out;
4215 	}
4216 
4217 	reply = !down_conversion(lkb);
4218 
4219 	error = do_convert(r, lkb);
4220 	if (reply)
4221 		send_convert_reply(r, lkb, error);
4222 	do_convert_effects(r, lkb, error);
4223  out:
4224 	unlock_rsb(r);
4225 	put_rsb(r);
4226 	dlm_put_lkb(lkb);
4227 	return 0;
4228 
4229  fail:
4230 	setup_stub_lkb(ls, ms);
4231 	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4232 	return error;
4233 }
4234 
receive_unlock(struct dlm_ls * ls,struct dlm_message * ms)4235 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4236 {
4237 	struct dlm_lkb *lkb;
4238 	struct dlm_rsb *r;
4239 	int error;
4240 
4241 	error = find_lkb(ls, ms->m_remid, &lkb);
4242 	if (error)
4243 		goto fail;
4244 
4245 	if (lkb->lkb_remid != ms->m_lkid) {
4246 		log_error(ls, "receive_unlock %x remid %x remote %d %x",
4247 			  lkb->lkb_id, lkb->lkb_remid,
4248 			  ms->m_header.h_nodeid, ms->m_lkid);
4249 		error = -ENOENT;
4250 		dlm_put_lkb(lkb);
4251 		goto fail;
4252 	}
4253 
4254 	r = lkb->lkb_resource;
4255 
4256 	hold_rsb(r);
4257 	lock_rsb(r);
4258 
4259 	error = validate_message(lkb, ms);
4260 	if (error)
4261 		goto out;
4262 
4263 	receive_flags(lkb, ms);
4264 
4265 	error = receive_unlock_args(ls, lkb, ms);
4266 	if (error) {
4267 		send_unlock_reply(r, lkb, error);
4268 		goto out;
4269 	}
4270 
4271 	error = do_unlock(r, lkb);
4272 	send_unlock_reply(r, lkb, error);
4273 	do_unlock_effects(r, lkb, error);
4274  out:
4275 	unlock_rsb(r);
4276 	put_rsb(r);
4277 	dlm_put_lkb(lkb);
4278 	return 0;
4279 
4280  fail:
4281 	setup_stub_lkb(ls, ms);
4282 	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4283 	return error;
4284 }
4285 
receive_cancel(struct dlm_ls * ls,struct dlm_message * ms)4286 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4287 {
4288 	struct dlm_lkb *lkb;
4289 	struct dlm_rsb *r;
4290 	int error;
4291 
4292 	error = find_lkb(ls, ms->m_remid, &lkb);
4293 	if (error)
4294 		goto fail;
4295 
4296 	receive_flags(lkb, ms);
4297 
4298 	r = lkb->lkb_resource;
4299 
4300 	hold_rsb(r);
4301 	lock_rsb(r);
4302 
4303 	error = validate_message(lkb, ms);
4304 	if (error)
4305 		goto out;
4306 
4307 	error = do_cancel(r, lkb);
4308 	send_cancel_reply(r, lkb, error);
4309 	do_cancel_effects(r, lkb, error);
4310  out:
4311 	unlock_rsb(r);
4312 	put_rsb(r);
4313 	dlm_put_lkb(lkb);
4314 	return 0;
4315 
4316  fail:
4317 	setup_stub_lkb(ls, ms);
4318 	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4319 	return error;
4320 }
4321 
receive_grant(struct dlm_ls * ls,struct dlm_message * ms)4322 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4323 {
4324 	struct dlm_lkb *lkb;
4325 	struct dlm_rsb *r;
4326 	int error;
4327 
4328 	error = find_lkb(ls, ms->m_remid, &lkb);
4329 	if (error)
4330 		return error;
4331 
4332 	r = lkb->lkb_resource;
4333 
4334 	hold_rsb(r);
4335 	lock_rsb(r);
4336 
4337 	error = validate_message(lkb, ms);
4338 	if (error)
4339 		goto out;
4340 
4341 	receive_flags_reply(lkb, ms);
4342 	if (is_altmode(lkb))
4343 		munge_altmode(lkb, ms);
4344 	grant_lock_pc(r, lkb, ms);
4345 	queue_cast(r, lkb, 0);
4346  out:
4347 	unlock_rsb(r);
4348 	put_rsb(r);
4349 	dlm_put_lkb(lkb);
4350 	return 0;
4351 }
4352 
receive_bast(struct dlm_ls * ls,struct dlm_message * ms)4353 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4354 {
4355 	struct dlm_lkb *lkb;
4356 	struct dlm_rsb *r;
4357 	int error;
4358 
4359 	error = find_lkb(ls, ms->m_remid, &lkb);
4360 	if (error)
4361 		return error;
4362 
4363 	r = lkb->lkb_resource;
4364 
4365 	hold_rsb(r);
4366 	lock_rsb(r);
4367 
4368 	error = validate_message(lkb, ms);
4369 	if (error)
4370 		goto out;
4371 
4372 	queue_bast(r, lkb, ms->m_bastmode);
4373 	lkb->lkb_highbast = ms->m_bastmode;
4374  out:
4375 	unlock_rsb(r);
4376 	put_rsb(r);
4377 	dlm_put_lkb(lkb);
4378 	return 0;
4379 }
4380 
receive_lookup(struct dlm_ls * ls,struct dlm_message * ms)4381 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4382 {
4383 	int len, error, ret_nodeid, from_nodeid, our_nodeid;
4384 
4385 	from_nodeid = ms->m_header.h_nodeid;
4386 	our_nodeid = dlm_our_nodeid();
4387 
4388 	len = receive_extralen(ms);
4389 
4390 	error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4391 				  &ret_nodeid, NULL);
4392 
4393 	/* Optimization: we're master so treat lookup as a request */
4394 	if (!error && ret_nodeid == our_nodeid) {
4395 		receive_request(ls, ms);
4396 		return;
4397 	}
4398 	send_lookup_reply(ls, ms, ret_nodeid, error);
4399 }
4400 
receive_remove(struct dlm_ls * ls,struct dlm_message * ms)4401 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4402 {
4403 	char name[DLM_RESNAME_MAXLEN+1];
4404 	struct dlm_rsb *r;
4405 	uint32_t hash, b;
4406 	int rv, len, dir_nodeid, from_nodeid;
4407 
4408 	from_nodeid = ms->m_header.h_nodeid;
4409 
4410 	len = receive_extralen(ms);
4411 
4412 	if (len > DLM_RESNAME_MAXLEN) {
4413 		log_error(ls, "receive_remove from %d bad len %d",
4414 			  from_nodeid, len);
4415 		return;
4416 	}
4417 
4418 	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4419 	if (dir_nodeid != dlm_our_nodeid()) {
4420 		log_error(ls, "receive_remove from %d bad nodeid %d",
4421 			  from_nodeid, dir_nodeid);
4422 		return;
4423 	}
4424 
4425 	/* Look for name on rsbtbl.toss, if it's there, kill it.
4426 	   If it's on rsbtbl.keep, it's being used, and we should ignore this
4427 	   message.  This is an expected race between the dir node sending a
4428 	   request to the master node at the same time as the master node sends
4429 	   a remove to the dir node.  The resolution to that race is for the
4430 	   dir node to ignore the remove message, and the master node to
4431 	   recreate the master rsb when it gets a request from the dir node for
4432 	   an rsb it doesn't have. */
4433 
4434 	memset(name, 0, sizeof(name));
4435 	memcpy(name, ms->m_extra, len);
4436 
4437 	hash = jhash(name, len, 0);
4438 	b = hash & (ls->ls_rsbtbl_size - 1);
4439 
4440 	spin_lock(&ls->ls_rsbtbl[b].lock);
4441 
4442 	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4443 	if (rv) {
4444 		/* verify the rsb is on keep list per comment above */
4445 		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4446 		if (rv) {
4447 			/* should not happen */
4448 			log_error(ls, "receive_remove from %d not found %s",
4449 				  from_nodeid, name);
4450 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4451 			return;
4452 		}
4453 		if (r->res_master_nodeid != from_nodeid) {
4454 			/* should not happen */
4455 			log_error(ls, "receive_remove keep from %d master %d",
4456 				  from_nodeid, r->res_master_nodeid);
4457 			dlm_print_rsb(r);
4458 			spin_unlock(&ls->ls_rsbtbl[b].lock);
4459 			return;
4460 		}
4461 
4462 		log_debug(ls, "receive_remove from %d master %d first %x %s",
4463 			  from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4464 			  name);
4465 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4466 		return;
4467 	}
4468 
4469 	if (r->res_master_nodeid != from_nodeid) {
4470 		log_error(ls, "receive_remove toss from %d master %d",
4471 			  from_nodeid, r->res_master_nodeid);
4472 		dlm_print_rsb(r);
4473 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4474 		return;
4475 	}
4476 
4477 	if (kref_put(&r->res_ref, kill_rsb)) {
4478 		rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4479 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4480 		dlm_free_rsb(r);
4481 	} else {
4482 		log_error(ls, "receive_remove from %d rsb ref error",
4483 			  from_nodeid);
4484 		dlm_print_rsb(r);
4485 		spin_unlock(&ls->ls_rsbtbl[b].lock);
4486 	}
4487 }
4488 
receive_purge(struct dlm_ls * ls,struct dlm_message * ms)4489 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4490 {
4491 	do_purge(ls, ms->m_nodeid, ms->m_pid);
4492 }
4493 
receive_request_reply(struct dlm_ls * ls,struct dlm_message * ms)4494 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4495 {
4496 	struct dlm_lkb *lkb;
4497 	struct dlm_rsb *r;
4498 	int error, mstype, result;
4499 	int from_nodeid = ms->m_header.h_nodeid;
4500 
4501 	error = find_lkb(ls, ms->m_remid, &lkb);
4502 	if (error)
4503 		return error;
4504 
4505 	r = lkb->lkb_resource;
4506 	hold_rsb(r);
4507 	lock_rsb(r);
4508 
4509 	error = validate_message(lkb, ms);
4510 	if (error)
4511 		goto out;
4512 
4513 	mstype = lkb->lkb_wait_type;
4514 	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4515 	if (error) {
4516 		log_error(ls, "receive_request_reply %x remote %d %x result %d",
4517 			  lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4518 		dlm_dump_rsb(r);
4519 		goto out;
4520 	}
4521 
4522 	/* Optimization: the dir node was also the master, so it took our
4523 	   lookup as a request and sent request reply instead of lookup reply */
4524 	if (mstype == DLM_MSG_LOOKUP) {
4525 		r->res_master_nodeid = from_nodeid;
4526 		r->res_nodeid = from_nodeid;
4527 		lkb->lkb_nodeid = from_nodeid;
4528 	}
4529 
4530 	/* this is the value returned from do_request() on the master */
4531 	result = ms->m_result;
4532 
4533 	switch (result) {
4534 	case -EAGAIN:
4535 		/* request would block (be queued) on remote master */
4536 		queue_cast(r, lkb, -EAGAIN);
4537 		confirm_master(r, -EAGAIN);
4538 		unhold_lkb(lkb); /* undoes create_lkb() */
4539 		break;
4540 
4541 	case -EINPROGRESS:
4542 	case 0:
4543 		/* request was queued or granted on remote master */
4544 		receive_flags_reply(lkb, ms);
4545 		lkb->lkb_remid = ms->m_lkid;
4546 		if (is_altmode(lkb))
4547 			munge_altmode(lkb, ms);
4548 		if (result) {
4549 			add_lkb(r, lkb, DLM_LKSTS_WAITING);
4550 			add_timeout(lkb);
4551 		} else {
4552 			grant_lock_pc(r, lkb, ms);
4553 			queue_cast(r, lkb, 0);
4554 		}
4555 		confirm_master(r, result);
4556 		break;
4557 
4558 	case -EBADR:
4559 	case -ENOTBLK:
4560 		/* find_rsb failed to find rsb or rsb wasn't master */
4561 		log_limit(ls, "receive_request_reply %x from %d %d "
4562 			  "master %d dir %d first %x %s", lkb->lkb_id,
4563 			  from_nodeid, result, r->res_master_nodeid,
4564 			  r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4565 
4566 		if (r->res_dir_nodeid != dlm_our_nodeid() &&
4567 		    r->res_master_nodeid != dlm_our_nodeid()) {
4568 			/* cause _request_lock->set_master->send_lookup */
4569 			r->res_master_nodeid = 0;
4570 			r->res_nodeid = -1;
4571 			lkb->lkb_nodeid = -1;
4572 		}
4573 
4574 		if (is_overlap(lkb)) {
4575 			/* we'll ignore error in cancel/unlock reply */
4576 			queue_cast_overlap(r, lkb);
4577 			confirm_master(r, result);
4578 			unhold_lkb(lkb); /* undoes create_lkb() */
4579 		} else {
4580 			_request_lock(r, lkb);
4581 
4582 			if (r->res_master_nodeid == dlm_our_nodeid())
4583 				confirm_master(r, 0);
4584 		}
4585 		break;
4586 
4587 	default:
4588 		log_error(ls, "receive_request_reply %x error %d",
4589 			  lkb->lkb_id, result);
4590 	}
4591 
4592 	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4593 		log_debug(ls, "receive_request_reply %x result %d unlock",
4594 			  lkb->lkb_id, result);
4595 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4596 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4597 		send_unlock(r, lkb);
4598 	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4599 		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4600 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4601 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4602 		send_cancel(r, lkb);
4603 	} else {
4604 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4605 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4606 	}
4607  out:
4608 	unlock_rsb(r);
4609 	put_rsb(r);
4610 	dlm_put_lkb(lkb);
4611 	return 0;
4612 }
4613 
__receive_convert_reply(struct dlm_rsb * r,struct dlm_lkb * lkb,struct dlm_message * ms)4614 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4615 				    struct dlm_message *ms)
4616 {
4617 	/* this is the value returned from do_convert() on the master */
4618 	switch (ms->m_result) {
4619 	case -EAGAIN:
4620 		/* convert would block (be queued) on remote master */
4621 		queue_cast(r, lkb, -EAGAIN);
4622 		break;
4623 
4624 	case -EDEADLK:
4625 		receive_flags_reply(lkb, ms);
4626 		revert_lock_pc(r, lkb);
4627 		queue_cast(r, lkb, -EDEADLK);
4628 		break;
4629 
4630 	case -EINPROGRESS:
4631 		/* convert was queued on remote master */
4632 		receive_flags_reply(lkb, ms);
4633 		if (is_demoted(lkb))
4634 			munge_demoted(lkb);
4635 		del_lkb(r, lkb);
4636 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4637 		add_timeout(lkb);
4638 		break;
4639 
4640 	case 0:
4641 		/* convert was granted on remote master */
4642 		receive_flags_reply(lkb, ms);
4643 		if (is_demoted(lkb))
4644 			munge_demoted(lkb);
4645 		grant_lock_pc(r, lkb, ms);
4646 		queue_cast(r, lkb, 0);
4647 		break;
4648 
4649 	default:
4650 		log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4651 			  lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4652 			  ms->m_result);
4653 		dlm_print_rsb(r);
4654 		dlm_print_lkb(lkb);
4655 	}
4656 }
4657 
_receive_convert_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4658 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4659 {
4660 	struct dlm_rsb *r = lkb->lkb_resource;
4661 	int error;
4662 
4663 	hold_rsb(r);
4664 	lock_rsb(r);
4665 
4666 	error = validate_message(lkb, ms);
4667 	if (error)
4668 		goto out;
4669 
4670 	/* stub reply can happen with waiters_mutex held */
4671 	error = remove_from_waiters_ms(lkb, ms);
4672 	if (error)
4673 		goto out;
4674 
4675 	__receive_convert_reply(r, lkb, ms);
4676  out:
4677 	unlock_rsb(r);
4678 	put_rsb(r);
4679 }
4680 
receive_convert_reply(struct dlm_ls * ls,struct dlm_message * ms)4681 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4682 {
4683 	struct dlm_lkb *lkb;
4684 	int error;
4685 
4686 	error = find_lkb(ls, ms->m_remid, &lkb);
4687 	if (error)
4688 		return error;
4689 
4690 	_receive_convert_reply(lkb, ms);
4691 	dlm_put_lkb(lkb);
4692 	return 0;
4693 }
4694 
_receive_unlock_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4695 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4696 {
4697 	struct dlm_rsb *r = lkb->lkb_resource;
4698 	int error;
4699 
4700 	hold_rsb(r);
4701 	lock_rsb(r);
4702 
4703 	error = validate_message(lkb, ms);
4704 	if (error)
4705 		goto out;
4706 
4707 	/* stub reply can happen with waiters_mutex held */
4708 	error = remove_from_waiters_ms(lkb, ms);
4709 	if (error)
4710 		goto out;
4711 
4712 	/* this is the value returned from do_unlock() on the master */
4713 
4714 	switch (ms->m_result) {
4715 	case -DLM_EUNLOCK:
4716 		receive_flags_reply(lkb, ms);
4717 		remove_lock_pc(r, lkb);
4718 		queue_cast(r, lkb, -DLM_EUNLOCK);
4719 		break;
4720 	case -ENOENT:
4721 		break;
4722 	default:
4723 		log_error(r->res_ls, "receive_unlock_reply %x error %d",
4724 			  lkb->lkb_id, ms->m_result);
4725 	}
4726  out:
4727 	unlock_rsb(r);
4728 	put_rsb(r);
4729 }
4730 
receive_unlock_reply(struct dlm_ls * ls,struct dlm_message * ms)4731 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4732 {
4733 	struct dlm_lkb *lkb;
4734 	int error;
4735 
4736 	error = find_lkb(ls, ms->m_remid, &lkb);
4737 	if (error)
4738 		return error;
4739 
4740 	_receive_unlock_reply(lkb, ms);
4741 	dlm_put_lkb(lkb);
4742 	return 0;
4743 }
4744 
_receive_cancel_reply(struct dlm_lkb * lkb,struct dlm_message * ms)4745 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4746 {
4747 	struct dlm_rsb *r = lkb->lkb_resource;
4748 	int error;
4749 
4750 	hold_rsb(r);
4751 	lock_rsb(r);
4752 
4753 	error = validate_message(lkb, ms);
4754 	if (error)
4755 		goto out;
4756 
4757 	/* stub reply can happen with waiters_mutex held */
4758 	error = remove_from_waiters_ms(lkb, ms);
4759 	if (error)
4760 		goto out;
4761 
4762 	/* this is the value returned from do_cancel() on the master */
4763 
4764 	switch (ms->m_result) {
4765 	case -DLM_ECANCEL:
4766 		receive_flags_reply(lkb, ms);
4767 		revert_lock_pc(r, lkb);
4768 		queue_cast(r, lkb, -DLM_ECANCEL);
4769 		break;
4770 	case 0:
4771 		break;
4772 	default:
4773 		log_error(r->res_ls, "receive_cancel_reply %x error %d",
4774 			  lkb->lkb_id, ms->m_result);
4775 	}
4776  out:
4777 	unlock_rsb(r);
4778 	put_rsb(r);
4779 }
4780 
receive_cancel_reply(struct dlm_ls * ls,struct dlm_message * ms)4781 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4782 {
4783 	struct dlm_lkb *lkb;
4784 	int error;
4785 
4786 	error = find_lkb(ls, ms->m_remid, &lkb);
4787 	if (error)
4788 		return error;
4789 
4790 	_receive_cancel_reply(lkb, ms);
4791 	dlm_put_lkb(lkb);
4792 	return 0;
4793 }
4794 
receive_lookup_reply(struct dlm_ls * ls,struct dlm_message * ms)4795 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4796 {
4797 	struct dlm_lkb *lkb;
4798 	struct dlm_rsb *r;
4799 	int error, ret_nodeid;
4800 	int do_lookup_list = 0;
4801 
4802 	error = find_lkb(ls, ms->m_lkid, &lkb);
4803 	if (error) {
4804 		log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4805 		return;
4806 	}
4807 
4808 	/* ms->m_result is the value returned by dlm_master_lookup on dir node
4809 	   FIXME: will a non-zero error ever be returned? */
4810 
4811 	r = lkb->lkb_resource;
4812 	hold_rsb(r);
4813 	lock_rsb(r);
4814 
4815 	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4816 	if (error)
4817 		goto out;
4818 
4819 	ret_nodeid = ms->m_nodeid;
4820 
4821 	/* We sometimes receive a request from the dir node for this
4822 	   rsb before we've received the dir node's loookup_reply for it.
4823 	   The request from the dir node implies we're the master, so we set
4824 	   ourself as master in receive_request_reply, and verify here that
4825 	   we are indeed the master. */
4826 
4827 	if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4828 		/* This should never happen */
4829 		log_error(ls, "receive_lookup_reply %x from %d ret %d "
4830 			  "master %d dir %d our %d first %x %s",
4831 			  lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4832 			  r->res_master_nodeid, r->res_dir_nodeid,
4833 			  dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4834 	}
4835 
4836 	if (ret_nodeid == dlm_our_nodeid()) {
4837 		r->res_master_nodeid = ret_nodeid;
4838 		r->res_nodeid = 0;
4839 		do_lookup_list = 1;
4840 		r->res_first_lkid = 0;
4841 	} else if (ret_nodeid == -1) {
4842 		/* the remote node doesn't believe it's the dir node */
4843 		log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4844 			  lkb->lkb_id, ms->m_header.h_nodeid);
4845 		r->res_master_nodeid = 0;
4846 		r->res_nodeid = -1;
4847 		lkb->lkb_nodeid = -1;
4848 	} else {
4849 		/* set_master() will set lkb_nodeid from r */
4850 		r->res_master_nodeid = ret_nodeid;
4851 		r->res_nodeid = ret_nodeid;
4852 	}
4853 
4854 	if (is_overlap(lkb)) {
4855 		log_debug(ls, "receive_lookup_reply %x unlock %x",
4856 			  lkb->lkb_id, lkb->lkb_flags);
4857 		queue_cast_overlap(r, lkb);
4858 		unhold_lkb(lkb); /* undoes create_lkb() */
4859 		goto out_list;
4860 	}
4861 
4862 	_request_lock(r, lkb);
4863 
4864  out_list:
4865 	if (do_lookup_list)
4866 		process_lookup_list(r);
4867  out:
4868 	unlock_rsb(r);
4869 	put_rsb(r);
4870 	dlm_put_lkb(lkb);
4871 }
4872 
_receive_message(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)4873 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4874 			     uint32_t saved_seq)
4875 {
4876 	int error = 0, noent = 0;
4877 
4878 	if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4879 		log_limit(ls, "receive %d from non-member %d %x %x %d",
4880 			  ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4881 			  ms->m_remid, ms->m_result);
4882 		return;
4883 	}
4884 
4885 	switch (ms->m_type) {
4886 
4887 	/* messages sent to a master node */
4888 
4889 	case DLM_MSG_REQUEST:
4890 		error = receive_request(ls, ms);
4891 		break;
4892 
4893 	case DLM_MSG_CONVERT:
4894 		error = receive_convert(ls, ms);
4895 		break;
4896 
4897 	case DLM_MSG_UNLOCK:
4898 		error = receive_unlock(ls, ms);
4899 		break;
4900 
4901 	case DLM_MSG_CANCEL:
4902 		noent = 1;
4903 		error = receive_cancel(ls, ms);
4904 		break;
4905 
4906 	/* messages sent from a master node (replies to above) */
4907 
4908 	case DLM_MSG_REQUEST_REPLY:
4909 		error = receive_request_reply(ls, ms);
4910 		break;
4911 
4912 	case DLM_MSG_CONVERT_REPLY:
4913 		error = receive_convert_reply(ls, ms);
4914 		break;
4915 
4916 	case DLM_MSG_UNLOCK_REPLY:
4917 		error = receive_unlock_reply(ls, ms);
4918 		break;
4919 
4920 	case DLM_MSG_CANCEL_REPLY:
4921 		error = receive_cancel_reply(ls, ms);
4922 		break;
4923 
4924 	/* messages sent from a master node (only two types of async msg) */
4925 
4926 	case DLM_MSG_GRANT:
4927 		noent = 1;
4928 		error = receive_grant(ls, ms);
4929 		break;
4930 
4931 	case DLM_MSG_BAST:
4932 		noent = 1;
4933 		error = receive_bast(ls, ms);
4934 		break;
4935 
4936 	/* messages sent to a dir node */
4937 
4938 	case DLM_MSG_LOOKUP:
4939 		receive_lookup(ls, ms);
4940 		break;
4941 
4942 	case DLM_MSG_REMOVE:
4943 		receive_remove(ls, ms);
4944 		break;
4945 
4946 	/* messages sent from a dir node (remove has no reply) */
4947 
4948 	case DLM_MSG_LOOKUP_REPLY:
4949 		receive_lookup_reply(ls, ms);
4950 		break;
4951 
4952 	/* other messages */
4953 
4954 	case DLM_MSG_PURGE:
4955 		receive_purge(ls, ms);
4956 		break;
4957 
4958 	default:
4959 		log_error(ls, "unknown message type %d", ms->m_type);
4960 	}
4961 
4962 	/*
4963 	 * When checking for ENOENT, we're checking the result of
4964 	 * find_lkb(m_remid):
4965 	 *
4966 	 * The lock id referenced in the message wasn't found.  This may
4967 	 * happen in normal usage for the async messages and cancel, so
4968 	 * only use log_debug for them.
4969 	 *
4970 	 * Some errors are expected and normal.
4971 	 */
4972 
4973 	if (error == -ENOENT && noent) {
4974 		log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4975 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4976 			  ms->m_lkid, saved_seq);
4977 	} else if (error == -ENOENT) {
4978 		log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4979 			  ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4980 			  ms->m_lkid, saved_seq);
4981 
4982 		if (ms->m_type == DLM_MSG_CONVERT)
4983 			dlm_dump_rsb_hash(ls, ms->m_hash);
4984 	}
4985 
4986 	if (error == -EINVAL) {
4987 		log_error(ls, "receive %d inval from %d lkid %x remid %x "
4988 			  "saved_seq %u",
4989 			  ms->m_type, ms->m_header.h_nodeid,
4990 			  ms->m_lkid, ms->m_remid, saved_seq);
4991 	}
4992 }
4993 
4994 /* If the lockspace is in recovery mode (locking stopped), then normal
4995    messages are saved on the requestqueue for processing after recovery is
4996    done.  When not in recovery mode, we wait for dlm_recoverd to drain saved
4997    messages off the requestqueue before we process new ones. This occurs right
4998    after recovery completes when we transition from saving all messages on
4999    requestqueue, to processing all the saved messages, to processing new
5000    messages as they arrive. */
5001 
dlm_receive_message(struct dlm_ls * ls,struct dlm_message * ms,int nodeid)5002 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5003 				int nodeid)
5004 {
5005 	if (dlm_locking_stopped(ls)) {
5006 		/* If we were a member of this lockspace, left, and rejoined,
5007 		   other nodes may still be sending us messages from the
5008 		   lockspace generation before we left. */
5009 		if (!ls->ls_generation) {
5010 			log_limit(ls, "receive %d from %d ignore old gen",
5011 				  ms->m_type, nodeid);
5012 			return;
5013 		}
5014 
5015 		dlm_add_requestqueue(ls, nodeid, ms);
5016 	} else {
5017 		dlm_wait_requestqueue(ls);
5018 		_receive_message(ls, ms, 0);
5019 	}
5020 }
5021 
5022 /* This is called by dlm_recoverd to process messages that were saved on
5023    the requestqueue. */
5024 
dlm_receive_message_saved(struct dlm_ls * ls,struct dlm_message * ms,uint32_t saved_seq)5025 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5026 			       uint32_t saved_seq)
5027 {
5028 	_receive_message(ls, ms, saved_seq);
5029 }
5030 
5031 /* This is called by the midcomms layer when something is received for
5032    the lockspace.  It could be either a MSG (normal message sent as part of
5033    standard locking activity) or an RCOM (recovery message sent as part of
5034    lockspace recovery). */
5035 
dlm_receive_buffer(union dlm_packet * p,int nodeid)5036 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5037 {
5038 	struct dlm_header *hd = &p->header;
5039 	struct dlm_ls *ls;
5040 	int type = 0;
5041 
5042 	switch (hd->h_cmd) {
5043 	case DLM_MSG:
5044 		dlm_message_in(&p->message);
5045 		type = p->message.m_type;
5046 		break;
5047 	case DLM_RCOM:
5048 		dlm_rcom_in(&p->rcom);
5049 		type = p->rcom.rc_type;
5050 		break;
5051 	default:
5052 		log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5053 		return;
5054 	}
5055 
5056 	if (hd->h_nodeid != nodeid) {
5057 		log_print("invalid h_nodeid %d from %d lockspace %x",
5058 			  hd->h_nodeid, nodeid, hd->h_lockspace);
5059 		return;
5060 	}
5061 
5062 	ls = dlm_find_lockspace_global(hd->h_lockspace);
5063 	if (!ls) {
5064 		if (dlm_config.ci_log_debug) {
5065 			printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5066 				"%u from %d cmd %d type %d\n",
5067 				hd->h_lockspace, nodeid, hd->h_cmd, type);
5068 		}
5069 
5070 		if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5071 			dlm_send_ls_not_ready(nodeid, &p->rcom);
5072 		return;
5073 	}
5074 
5075 	/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5076 	   be inactive (in this ls) before transitioning to recovery mode */
5077 
5078 	down_read(&ls->ls_recv_active);
5079 	if (hd->h_cmd == DLM_MSG)
5080 		dlm_receive_message(ls, &p->message, nodeid);
5081 	else
5082 		dlm_receive_rcom(ls, &p->rcom, nodeid);
5083 	up_read(&ls->ls_recv_active);
5084 
5085 	dlm_put_lockspace(ls);
5086 }
5087 
recover_convert_waiter(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_message * ms_stub)5088 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5089 				   struct dlm_message *ms_stub)
5090 {
5091 	if (middle_conversion(lkb)) {
5092 		hold_lkb(lkb);
5093 		memset(ms_stub, 0, sizeof(struct dlm_message));
5094 		ms_stub->m_flags = DLM_IFL_STUB_MS;
5095 		ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5096 		ms_stub->m_result = -EINPROGRESS;
5097 		ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5098 		_receive_convert_reply(lkb, ms_stub);
5099 
5100 		/* Same special case as in receive_rcom_lock_args() */
5101 		lkb->lkb_grmode = DLM_LOCK_IV;
5102 		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5103 		unhold_lkb(lkb);
5104 
5105 	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5106 		lkb->lkb_flags |= DLM_IFL_RESEND;
5107 	}
5108 
5109 	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5110 	   conversions are async; there's no reply from the remote master */
5111 }
5112 
5113 /* A waiting lkb needs recovery if the master node has failed, or
5114    the master node is changing (only when no directory is used) */
5115 
waiter_needs_recovery(struct dlm_ls * ls,struct dlm_lkb * lkb,int dir_nodeid)5116 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5117 				 int dir_nodeid)
5118 {
5119 	if (dlm_no_directory(ls))
5120 		return 1;
5121 
5122 	if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5123 		return 1;
5124 
5125 	return 0;
5126 }
5127 
5128 /* Recovery for locks that are waiting for replies from nodes that are now
5129    gone.  We can just complete unlocks and cancels by faking a reply from the
5130    dead node.  Requests and up-conversions we flag to be resent after
5131    recovery.  Down-conversions can just be completed with a fake reply like
5132    unlocks.  Conversions between PR and CW need special attention. */
5133 
dlm_recover_waiters_pre(struct dlm_ls * ls)5134 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5135 {
5136 	struct dlm_lkb *lkb, *safe;
5137 	struct dlm_message *ms_stub;
5138 	int wait_type, stub_unlock_result, stub_cancel_result;
5139 	int dir_nodeid;
5140 
5141 	ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5142 	if (!ms_stub)
5143 		return;
5144 
5145 	mutex_lock(&ls->ls_waiters_mutex);
5146 
5147 	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5148 
5149 		dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5150 
5151 		/* exclude debug messages about unlocks because there can be so
5152 		   many and they aren't very interesting */
5153 
5154 		if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5155 			log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5156 				  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5157 				  lkb->lkb_id,
5158 				  lkb->lkb_remid,
5159 				  lkb->lkb_wait_type,
5160 				  lkb->lkb_resource->res_nodeid,
5161 				  lkb->lkb_nodeid,
5162 				  lkb->lkb_wait_nodeid,
5163 				  dir_nodeid);
5164 		}
5165 
5166 		/* all outstanding lookups, regardless of destination  will be
5167 		   resent after recovery is done */
5168 
5169 		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5170 			lkb->lkb_flags |= DLM_IFL_RESEND;
5171 			continue;
5172 		}
5173 
5174 		if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5175 			continue;
5176 
5177 		wait_type = lkb->lkb_wait_type;
5178 		stub_unlock_result = -DLM_EUNLOCK;
5179 		stub_cancel_result = -DLM_ECANCEL;
5180 
5181 		/* Main reply may have been received leaving a zero wait_type,
5182 		   but a reply for the overlapping op may not have been
5183 		   received.  In that case we need to fake the appropriate
5184 		   reply for the overlap op. */
5185 
5186 		if (!wait_type) {
5187 			if (is_overlap_cancel(lkb)) {
5188 				wait_type = DLM_MSG_CANCEL;
5189 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5190 					stub_cancel_result = 0;
5191 			}
5192 			if (is_overlap_unlock(lkb)) {
5193 				wait_type = DLM_MSG_UNLOCK;
5194 				if (lkb->lkb_grmode == DLM_LOCK_IV)
5195 					stub_unlock_result = -ENOENT;
5196 			}
5197 
5198 			log_debug(ls, "rwpre overlap %x %x %d %d %d",
5199 				  lkb->lkb_id, lkb->lkb_flags, wait_type,
5200 				  stub_cancel_result, stub_unlock_result);
5201 		}
5202 
5203 		switch (wait_type) {
5204 
5205 		case DLM_MSG_REQUEST:
5206 			lkb->lkb_flags |= DLM_IFL_RESEND;
5207 			break;
5208 
5209 		case DLM_MSG_CONVERT:
5210 			recover_convert_waiter(ls, lkb, ms_stub);
5211 			break;
5212 
5213 		case DLM_MSG_UNLOCK:
5214 			hold_lkb(lkb);
5215 			memset(ms_stub, 0, sizeof(struct dlm_message));
5216 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5217 			ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5218 			ms_stub->m_result = stub_unlock_result;
5219 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5220 			_receive_unlock_reply(lkb, ms_stub);
5221 			dlm_put_lkb(lkb);
5222 			break;
5223 
5224 		case DLM_MSG_CANCEL:
5225 			hold_lkb(lkb);
5226 			memset(ms_stub, 0, sizeof(struct dlm_message));
5227 			ms_stub->m_flags = DLM_IFL_STUB_MS;
5228 			ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5229 			ms_stub->m_result = stub_cancel_result;
5230 			ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5231 			_receive_cancel_reply(lkb, ms_stub);
5232 			dlm_put_lkb(lkb);
5233 			break;
5234 
5235 		default:
5236 			log_error(ls, "invalid lkb wait_type %d %d",
5237 				  lkb->lkb_wait_type, wait_type);
5238 		}
5239 		schedule();
5240 	}
5241 	mutex_unlock(&ls->ls_waiters_mutex);
5242 	kfree(ms_stub);
5243 }
5244 
find_resend_waiter(struct dlm_ls * ls)5245 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5246 {
5247 	struct dlm_lkb *lkb = NULL, *iter;
5248 
5249 	mutex_lock(&ls->ls_waiters_mutex);
5250 	list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
5251 		if (iter->lkb_flags & DLM_IFL_RESEND) {
5252 			hold_lkb(iter);
5253 			lkb = iter;
5254 			break;
5255 		}
5256 	}
5257 	mutex_unlock(&ls->ls_waiters_mutex);
5258 
5259 	return lkb;
5260 }
5261 
5262 /* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
5263    master or dir-node for r.  Processing the lkb may result in it being placed
5264    back on waiters. */
5265 
5266 /* We do this after normal locking has been enabled and any saved messages
5267    (in requestqueue) have been processed.  We should be confident that at
5268    this point we won't get or process a reply to any of these waiting
5269    operations.  But, new ops may be coming in on the rsbs/locks here from
5270    userspace or remotely. */
5271 
5272 /* there may have been an overlap unlock/cancel prior to recovery or after
5273    recovery.  if before, the lkb may still have a pos wait_count; if after, the
5274    overlap flag would just have been set and nothing new sent.  we can be
5275    confident here than any replies to either the initial op or overlap ops
5276    prior to recovery have been received. */
5277 
dlm_recover_waiters_post(struct dlm_ls * ls)5278 int dlm_recover_waiters_post(struct dlm_ls *ls)
5279 {
5280 	struct dlm_lkb *lkb;
5281 	struct dlm_rsb *r;
5282 	int error = 0, mstype, err, oc, ou;
5283 
5284 	while (1) {
5285 		if (dlm_locking_stopped(ls)) {
5286 			log_debug(ls, "recover_waiters_post aborted");
5287 			error = -EINTR;
5288 			break;
5289 		}
5290 
5291 		lkb = find_resend_waiter(ls);
5292 		if (!lkb)
5293 			break;
5294 
5295 		r = lkb->lkb_resource;
5296 		hold_rsb(r);
5297 		lock_rsb(r);
5298 
5299 		mstype = lkb->lkb_wait_type;
5300 		oc = is_overlap_cancel(lkb);
5301 		ou = is_overlap_unlock(lkb);
5302 		err = 0;
5303 
5304 		log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5305 			  "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5306 			  "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5307 			  r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5308 			  dlm_dir_nodeid(r), oc, ou);
5309 
5310 		/* At this point we assume that we won't get a reply to any
5311 		   previous op or overlap op on this lock.  First, do a big
5312 		   remove_from_waiters() for all previous ops. */
5313 
5314 		lkb->lkb_flags &= ~DLM_IFL_RESEND;
5315 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5316 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5317 		lkb->lkb_wait_type = 0;
5318 		/* drop all wait_count references we still
5319 		 * hold a reference for this iteration.
5320 		 */
5321 		while (lkb->lkb_wait_count) {
5322 			lkb->lkb_wait_count--;
5323 			unhold_lkb(lkb);
5324 		}
5325 		mutex_lock(&ls->ls_waiters_mutex);
5326 		list_del_init(&lkb->lkb_wait_reply);
5327 		mutex_unlock(&ls->ls_waiters_mutex);
5328 
5329 		if (oc || ou) {
5330 			/* do an unlock or cancel instead of resending */
5331 			switch (mstype) {
5332 			case DLM_MSG_LOOKUP:
5333 			case DLM_MSG_REQUEST:
5334 				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5335 							-DLM_ECANCEL);
5336 				unhold_lkb(lkb); /* undoes create_lkb() */
5337 				break;
5338 			case DLM_MSG_CONVERT:
5339 				if (oc) {
5340 					queue_cast(r, lkb, -DLM_ECANCEL);
5341 				} else {
5342 					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5343 					_unlock_lock(r, lkb);
5344 				}
5345 				break;
5346 			default:
5347 				err = 1;
5348 			}
5349 		} else {
5350 			switch (mstype) {
5351 			case DLM_MSG_LOOKUP:
5352 			case DLM_MSG_REQUEST:
5353 				_request_lock(r, lkb);
5354 				if (is_master(r))
5355 					confirm_master(r, 0);
5356 				break;
5357 			case DLM_MSG_CONVERT:
5358 				_convert_lock(r, lkb);
5359 				break;
5360 			default:
5361 				err = 1;
5362 			}
5363 		}
5364 
5365 		if (err) {
5366 			log_error(ls, "waiter %x msg %d r_nodeid %d "
5367 				  "dir_nodeid %d overlap %d %d",
5368 				  lkb->lkb_id, mstype, r->res_nodeid,
5369 				  dlm_dir_nodeid(r), oc, ou);
5370 		}
5371 		unlock_rsb(r);
5372 		put_rsb(r);
5373 		dlm_put_lkb(lkb);
5374 	}
5375 
5376 	return error;
5377 }
5378 
purge_mstcpy_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list)5379 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5380 			      struct list_head *list)
5381 {
5382 	struct dlm_lkb *lkb, *safe;
5383 
5384 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5385 		if (!is_master_copy(lkb))
5386 			continue;
5387 
5388 		/* don't purge lkbs we've added in recover_master_copy for
5389 		   the current recovery seq */
5390 
5391 		if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5392 			continue;
5393 
5394 		del_lkb(r, lkb);
5395 
5396 		/* this put should free the lkb */
5397 		if (!dlm_put_lkb(lkb))
5398 			log_error(ls, "purged mstcpy lkb not released");
5399 	}
5400 }
5401 
dlm_purge_mstcpy_locks(struct dlm_rsb * r)5402 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5403 {
5404 	struct dlm_ls *ls = r->res_ls;
5405 
5406 	purge_mstcpy_list(ls, r, &r->res_grantqueue);
5407 	purge_mstcpy_list(ls, r, &r->res_convertqueue);
5408 	purge_mstcpy_list(ls, r, &r->res_waitqueue);
5409 }
5410 
purge_dead_list(struct dlm_ls * ls,struct dlm_rsb * r,struct list_head * list,int nodeid_gone,unsigned int * count)5411 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5412 			    struct list_head *list,
5413 			    int nodeid_gone, unsigned int *count)
5414 {
5415 	struct dlm_lkb *lkb, *safe;
5416 
5417 	list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5418 		if (!is_master_copy(lkb))
5419 			continue;
5420 
5421 		if ((lkb->lkb_nodeid == nodeid_gone) ||
5422 		    dlm_is_removed(ls, lkb->lkb_nodeid)) {
5423 
5424 			/* tell recover_lvb to invalidate the lvb
5425 			   because a node holding EX/PW failed */
5426 			if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5427 			    (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5428 				rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5429 			}
5430 
5431 			del_lkb(r, lkb);
5432 
5433 			/* this put should free the lkb */
5434 			if (!dlm_put_lkb(lkb))
5435 				log_error(ls, "purged dead lkb not released");
5436 
5437 			rsb_set_flag(r, RSB_RECOVER_GRANT);
5438 
5439 			(*count)++;
5440 		}
5441 	}
5442 }
5443 
5444 /* Get rid of locks held by nodes that are gone. */
5445 
dlm_recover_purge(struct dlm_ls * ls)5446 void dlm_recover_purge(struct dlm_ls *ls)
5447 {
5448 	struct dlm_rsb *r;
5449 	struct dlm_member *memb;
5450 	int nodes_count = 0;
5451 	int nodeid_gone = 0;
5452 	unsigned int lkb_count = 0;
5453 
5454 	/* cache one removed nodeid to optimize the common
5455 	   case of a single node removed */
5456 
5457 	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5458 		nodes_count++;
5459 		nodeid_gone = memb->nodeid;
5460 	}
5461 
5462 	if (!nodes_count)
5463 		return;
5464 
5465 	down_write(&ls->ls_root_sem);
5466 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5467 		hold_rsb(r);
5468 		lock_rsb(r);
5469 		if (is_master(r)) {
5470 			purge_dead_list(ls, r, &r->res_grantqueue,
5471 					nodeid_gone, &lkb_count);
5472 			purge_dead_list(ls, r, &r->res_convertqueue,
5473 					nodeid_gone, &lkb_count);
5474 			purge_dead_list(ls, r, &r->res_waitqueue,
5475 					nodeid_gone, &lkb_count);
5476 		}
5477 		unlock_rsb(r);
5478 		unhold_rsb(r);
5479 		cond_resched();
5480 	}
5481 	up_write(&ls->ls_root_sem);
5482 
5483 	if (lkb_count)
5484 		log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5485 			  lkb_count, nodes_count);
5486 }
5487 
find_grant_rsb(struct dlm_ls * ls,int bucket)5488 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5489 {
5490 	struct rb_node *n;
5491 	struct dlm_rsb *r;
5492 
5493 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
5494 	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5495 		r = rb_entry(n, struct dlm_rsb, res_hashnode);
5496 
5497 		if (!rsb_flag(r, RSB_RECOVER_GRANT))
5498 			continue;
5499 		if (!is_master(r)) {
5500 			rsb_clear_flag(r, RSB_RECOVER_GRANT);
5501 			continue;
5502 		}
5503 		hold_rsb(r);
5504 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5505 		return r;
5506 	}
5507 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5508 	return NULL;
5509 }
5510 
5511 /*
5512  * Attempt to grant locks on resources that we are the master of.
5513  * Locks may have become grantable during recovery because locks
5514  * from departed nodes have been purged (or not rebuilt), allowing
5515  * previously blocked locks to now be granted.  The subset of rsb's
5516  * we are interested in are those with lkb's on either the convert or
5517  * waiting queues.
5518  *
5519  * Simplest would be to go through each master rsb and check for non-empty
5520  * convert or waiting queues, and attempt to grant on those rsbs.
5521  * Checking the queues requires lock_rsb, though, for which we'd need
5522  * to release the rsbtbl lock.  This would make iterating through all
5523  * rsb's very inefficient.  So, we rely on earlier recovery routines
5524  * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5525  * locks for.
5526  */
5527 
dlm_recover_grant(struct dlm_ls * ls)5528 void dlm_recover_grant(struct dlm_ls *ls)
5529 {
5530 	struct dlm_rsb *r;
5531 	int bucket = 0;
5532 	unsigned int count = 0;
5533 	unsigned int rsb_count = 0;
5534 	unsigned int lkb_count = 0;
5535 
5536 	while (1) {
5537 		r = find_grant_rsb(ls, bucket);
5538 		if (!r) {
5539 			if (bucket == ls->ls_rsbtbl_size - 1)
5540 				break;
5541 			bucket++;
5542 			continue;
5543 		}
5544 		rsb_count++;
5545 		count = 0;
5546 		lock_rsb(r);
5547 		/* the RECOVER_GRANT flag is checked in the grant path */
5548 		grant_pending_locks(r, &count);
5549 		rsb_clear_flag(r, RSB_RECOVER_GRANT);
5550 		lkb_count += count;
5551 		confirm_master(r, 0);
5552 		unlock_rsb(r);
5553 		put_rsb(r);
5554 		cond_resched();
5555 	}
5556 
5557 	if (lkb_count)
5558 		log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5559 			  lkb_count, rsb_count);
5560 }
5561 
search_remid_list(struct list_head * head,int nodeid,uint32_t remid)5562 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5563 					 uint32_t remid)
5564 {
5565 	struct dlm_lkb *lkb;
5566 
5567 	list_for_each_entry(lkb, head, lkb_statequeue) {
5568 		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5569 			return lkb;
5570 	}
5571 	return NULL;
5572 }
5573 
search_remid(struct dlm_rsb * r,int nodeid,uint32_t remid)5574 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5575 				    uint32_t remid)
5576 {
5577 	struct dlm_lkb *lkb;
5578 
5579 	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5580 	if (lkb)
5581 		return lkb;
5582 	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5583 	if (lkb)
5584 		return lkb;
5585 	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5586 	if (lkb)
5587 		return lkb;
5588 	return NULL;
5589 }
5590 
5591 /* needs at least dlm_rcom + rcom_lock */
receive_rcom_lock_args(struct dlm_ls * ls,struct dlm_lkb * lkb,struct dlm_rsb * r,struct dlm_rcom * rc)5592 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5593 				  struct dlm_rsb *r, struct dlm_rcom *rc)
5594 {
5595 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5596 
5597 	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5598 	lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5599 	lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5600 	lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5601 	lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5602 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
5603 	lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5604 	lkb->lkb_rqmode = rl->rl_rqmode;
5605 	lkb->lkb_grmode = rl->rl_grmode;
5606 	/* don't set lkb_status because add_lkb wants to itself */
5607 
5608 	lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5609 	lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5610 
5611 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5612 		int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5613 			 sizeof(struct rcom_lock);
5614 		if (lvblen > ls->ls_lvblen)
5615 			return -EINVAL;
5616 		lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5617 		if (!lkb->lkb_lvbptr)
5618 			return -ENOMEM;
5619 		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5620 	}
5621 
5622 	/* Conversions between PR and CW (middle modes) need special handling.
5623 	   The real granted mode of these converting locks cannot be determined
5624 	   until all locks have been rebuilt on the rsb (recover_conversion) */
5625 
5626 	if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5627 	    middle_conversion(lkb)) {
5628 		rl->rl_status = DLM_LKSTS_CONVERT;
5629 		lkb->lkb_grmode = DLM_LOCK_IV;
5630 		rsb_set_flag(r, RSB_RECOVER_CONVERT);
5631 	}
5632 
5633 	return 0;
5634 }
5635 
5636 /* This lkb may have been recovered in a previous aborted recovery so we need
5637    to check if the rsb already has an lkb with the given remote nodeid/lkid.
5638    If so we just send back a standard reply.  If not, we create a new lkb with
5639    the given values and send back our lkid.  We send back our lkid by sending
5640    back the rcom_lock struct we got but with the remid field filled in. */
5641 
5642 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_master_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5643 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5644 {
5645 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5646 	struct dlm_rsb *r;
5647 	struct dlm_lkb *lkb;
5648 	uint32_t remid = 0;
5649 	int from_nodeid = rc->rc_header.h_nodeid;
5650 	int error;
5651 
5652 	if (rl->rl_parent_lkid) {
5653 		error = -EOPNOTSUPP;
5654 		goto out;
5655 	}
5656 
5657 	remid = le32_to_cpu(rl->rl_lkid);
5658 
5659 	/* In general we expect the rsb returned to be R_MASTER, but we don't
5660 	   have to require it.  Recovery of masters on one node can overlap
5661 	   recovery of locks on another node, so one node can send us MSTCPY
5662 	   locks before we've made ourselves master of this rsb.  We can still
5663 	   add new MSTCPY locks that we receive here without any harm; when
5664 	   we make ourselves master, dlm_recover_masters() won't touch the
5665 	   MSTCPY locks we've received early. */
5666 
5667 	error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5668 			 from_nodeid, R_RECEIVE_RECOVER, &r);
5669 	if (error)
5670 		goto out;
5671 
5672 	lock_rsb(r);
5673 
5674 	if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5675 		log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5676 			  from_nodeid, remid);
5677 		error = -EBADR;
5678 		goto out_unlock;
5679 	}
5680 
5681 	lkb = search_remid(r, from_nodeid, remid);
5682 	if (lkb) {
5683 		error = -EEXIST;
5684 		goto out_remid;
5685 	}
5686 
5687 	error = create_lkb(ls, &lkb);
5688 	if (error)
5689 		goto out_unlock;
5690 
5691 	error = receive_rcom_lock_args(ls, lkb, r, rc);
5692 	if (error) {
5693 		__put_lkb(ls, lkb);
5694 		goto out_unlock;
5695 	}
5696 
5697 	attach_lkb(r, lkb);
5698 	add_lkb(r, lkb, rl->rl_status);
5699 	error = 0;
5700 	ls->ls_recover_locks_in++;
5701 
5702 	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5703 		rsb_set_flag(r, RSB_RECOVER_GRANT);
5704 
5705  out_remid:
5706 	/* this is the new value returned to the lock holder for
5707 	   saving in its process-copy lkb */
5708 	rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5709 
5710 	lkb->lkb_recover_seq = ls->ls_recover_seq;
5711 
5712  out_unlock:
5713 	unlock_rsb(r);
5714 	put_rsb(r);
5715  out:
5716 	if (error && error != -EEXIST)
5717 		log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5718 			  from_nodeid, remid, error);
5719 	rl->rl_result = cpu_to_le32(error);
5720 	return error;
5721 }
5722 
5723 /* needs at least dlm_rcom + rcom_lock */
dlm_recover_process_copy(struct dlm_ls * ls,struct dlm_rcom * rc)5724 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5725 {
5726 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5727 	struct dlm_rsb *r;
5728 	struct dlm_lkb *lkb;
5729 	uint32_t lkid, remid;
5730 	int error, result;
5731 
5732 	lkid = le32_to_cpu(rl->rl_lkid);
5733 	remid = le32_to_cpu(rl->rl_remid);
5734 	result = le32_to_cpu(rl->rl_result);
5735 
5736 	error = find_lkb(ls, lkid, &lkb);
5737 	if (error) {
5738 		log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5739 			  lkid, rc->rc_header.h_nodeid, remid, result);
5740 		return error;
5741 	}
5742 
5743 	r = lkb->lkb_resource;
5744 	hold_rsb(r);
5745 	lock_rsb(r);
5746 
5747 	if (!is_process_copy(lkb)) {
5748 		log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5749 			  lkid, rc->rc_header.h_nodeid, remid, result);
5750 		dlm_dump_rsb(r);
5751 		unlock_rsb(r);
5752 		put_rsb(r);
5753 		dlm_put_lkb(lkb);
5754 		return -EINVAL;
5755 	}
5756 
5757 	switch (result) {
5758 	case -EBADR:
5759 		/* There's a chance the new master received our lock before
5760 		   dlm_recover_master_reply(), this wouldn't happen if we did
5761 		   a barrier between recover_masters and recover_locks. */
5762 
5763 		log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5764 			  lkid, rc->rc_header.h_nodeid, remid, result);
5765 
5766 		dlm_send_rcom_lock(r, lkb);
5767 		goto out;
5768 	case -EEXIST:
5769 	case 0:
5770 		lkb->lkb_remid = remid;
5771 		break;
5772 	default:
5773 		log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5774 			  lkid, rc->rc_header.h_nodeid, remid, result);
5775 	}
5776 
5777 	/* an ack for dlm_recover_locks() which waits for replies from
5778 	   all the locks it sends to new masters */
5779 	dlm_recovered_lock(r);
5780  out:
5781 	unlock_rsb(r);
5782 	put_rsb(r);
5783 	dlm_put_lkb(lkb);
5784 
5785 	return 0;
5786 }
5787 
dlm_user_request(struct dlm_ls * ls,struct dlm_user_args * ua,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs)5788 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5789 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5790 		     unsigned long timeout_cs)
5791 {
5792 	struct dlm_lkb *lkb;
5793 	struct dlm_args args;
5794 	int error;
5795 
5796 	dlm_lock_recovery(ls);
5797 
5798 	error = create_lkb(ls, &lkb);
5799 	if (error) {
5800 		kfree(ua);
5801 		goto out;
5802 	}
5803 
5804 	if (flags & DLM_LKF_VALBLK) {
5805 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5806 		if (!ua->lksb.sb_lvbptr) {
5807 			kfree(ua);
5808 			__put_lkb(ls, lkb);
5809 			error = -ENOMEM;
5810 			goto out;
5811 		}
5812 	}
5813 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5814 			      fake_astfn, ua, fake_bastfn, &args);
5815 	if (error) {
5816 		kfree(ua->lksb.sb_lvbptr);
5817 		ua->lksb.sb_lvbptr = NULL;
5818 		kfree(ua);
5819 		__put_lkb(ls, lkb);
5820 		goto out;
5821 	}
5822 
5823 	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
5824 	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
5825 	   lock and that lkb_astparam is the dlm_user_args structure. */
5826 	lkb->lkb_flags |= DLM_IFL_USER;
5827 	error = request_lock(ls, lkb, name, namelen, &args);
5828 
5829 	switch (error) {
5830 	case 0:
5831 		break;
5832 	case -EINPROGRESS:
5833 		error = 0;
5834 		break;
5835 	case -EAGAIN:
5836 		error = 0;
5837 		/* fall through */
5838 	default:
5839 		__put_lkb(ls, lkb);
5840 		goto out;
5841 	}
5842 
5843 	/* add this new lkb to the per-process list of locks */
5844 	spin_lock(&ua->proc->locks_spin);
5845 	hold_lkb(lkb);
5846 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5847 	spin_unlock(&ua->proc->locks_spin);
5848  out:
5849 	dlm_unlock_recovery(ls);
5850 	return error;
5851 }
5852 
dlm_user_convert(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,uint32_t lkid,char * lvb_in,unsigned long timeout_cs)5853 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5854 		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5855 		     unsigned long timeout_cs)
5856 {
5857 	struct dlm_lkb *lkb;
5858 	struct dlm_args args;
5859 	struct dlm_user_args *ua;
5860 	int error;
5861 
5862 	dlm_lock_recovery(ls);
5863 
5864 	error = find_lkb(ls, lkid, &lkb);
5865 	if (error)
5866 		goto out;
5867 
5868 	/* user can change the params on its lock when it converts it, or
5869 	   add an lvb that didn't exist before */
5870 
5871 	ua = lkb->lkb_ua;
5872 
5873 	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5874 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5875 		if (!ua->lksb.sb_lvbptr) {
5876 			error = -ENOMEM;
5877 			goto out_put;
5878 		}
5879 	}
5880 	if (lvb_in && ua->lksb.sb_lvbptr)
5881 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5882 
5883 	ua->xid = ua_tmp->xid;
5884 	ua->castparam = ua_tmp->castparam;
5885 	ua->castaddr = ua_tmp->castaddr;
5886 	ua->bastparam = ua_tmp->bastparam;
5887 	ua->bastaddr = ua_tmp->bastaddr;
5888 	ua->user_lksb = ua_tmp->user_lksb;
5889 
5890 	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5891 			      fake_astfn, ua, fake_bastfn, &args);
5892 	if (error)
5893 		goto out_put;
5894 
5895 	error = convert_lock(ls, lkb, &args);
5896 
5897 	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5898 		error = 0;
5899  out_put:
5900 	dlm_put_lkb(lkb);
5901  out:
5902 	dlm_unlock_recovery(ls);
5903 	kfree(ua_tmp);
5904 	return error;
5905 }
5906 
5907 /*
5908  * The caller asks for an orphan lock on a given resource with a given mode.
5909  * If a matching lock exists, it's moved to the owner's list of locks and
5910  * the lkid is returned.
5911  */
5912 
dlm_user_adopt_orphan(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,int mode,uint32_t flags,void * name,unsigned int namelen,unsigned long timeout_cs,uint32_t * lkid)5913 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5914 		     int mode, uint32_t flags, void *name, unsigned int namelen,
5915 		     unsigned long timeout_cs, uint32_t *lkid)
5916 {
5917 	struct dlm_lkb *lkb = NULL, *iter;
5918 	struct dlm_user_args *ua;
5919 	int found_other_mode = 0;
5920 	int rv = 0;
5921 
5922 	mutex_lock(&ls->ls_orphans_mutex);
5923 	list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
5924 		if (iter->lkb_resource->res_length != namelen)
5925 			continue;
5926 		if (memcmp(iter->lkb_resource->res_name, name, namelen))
5927 			continue;
5928 		if (iter->lkb_grmode != mode) {
5929 			found_other_mode = 1;
5930 			continue;
5931 		}
5932 
5933 		lkb = iter;
5934 		list_del_init(&iter->lkb_ownqueue);
5935 		iter->lkb_flags &= ~DLM_IFL_ORPHAN;
5936 		*lkid = iter->lkb_id;
5937 		break;
5938 	}
5939 	mutex_unlock(&ls->ls_orphans_mutex);
5940 
5941 	if (!lkb && found_other_mode) {
5942 		rv = -EAGAIN;
5943 		goto out;
5944 	}
5945 
5946 	if (!lkb) {
5947 		rv = -ENOENT;
5948 		goto out;
5949 	}
5950 
5951 	lkb->lkb_exflags = flags;
5952 	lkb->lkb_ownpid = (int) current->pid;
5953 
5954 	ua = lkb->lkb_ua;
5955 
5956 	ua->proc = ua_tmp->proc;
5957 	ua->xid = ua_tmp->xid;
5958 	ua->castparam = ua_tmp->castparam;
5959 	ua->castaddr = ua_tmp->castaddr;
5960 	ua->bastparam = ua_tmp->bastparam;
5961 	ua->bastaddr = ua_tmp->bastaddr;
5962 	ua->user_lksb = ua_tmp->user_lksb;
5963 
5964 	/*
5965 	 * The lkb reference from the ls_orphans list was not
5966 	 * removed above, and is now considered the reference
5967 	 * for the proc locks list.
5968 	 */
5969 
5970 	spin_lock(&ua->proc->locks_spin);
5971 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5972 	spin_unlock(&ua->proc->locks_spin);
5973  out:
5974 	kfree(ua_tmp);
5975 	return rv;
5976 }
5977 
dlm_user_unlock(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid,char * lvb_in)5978 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5979 		    uint32_t flags, uint32_t lkid, char *lvb_in)
5980 {
5981 	struct dlm_lkb *lkb;
5982 	struct dlm_args args;
5983 	struct dlm_user_args *ua;
5984 	int error;
5985 
5986 	dlm_lock_recovery(ls);
5987 
5988 	error = find_lkb(ls, lkid, &lkb);
5989 	if (error)
5990 		goto out;
5991 
5992 	ua = lkb->lkb_ua;
5993 
5994 	if (lvb_in && ua->lksb.sb_lvbptr)
5995 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5996 	if (ua_tmp->castparam)
5997 		ua->castparam = ua_tmp->castparam;
5998 	ua->user_lksb = ua_tmp->user_lksb;
5999 
6000 	error = set_unlock_args(flags, ua, &args);
6001 	if (error)
6002 		goto out_put;
6003 
6004 	error = unlock_lock(ls, lkb, &args);
6005 
6006 	if (error == -DLM_EUNLOCK)
6007 		error = 0;
6008 	/* from validate_unlock_args() */
6009 	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6010 		error = 0;
6011 	if (error)
6012 		goto out_put;
6013 
6014 	spin_lock(&ua->proc->locks_spin);
6015 	/* dlm_user_add_cb() may have already taken lkb off the proc list */
6016 	if (!list_empty(&lkb->lkb_ownqueue))
6017 		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6018 	spin_unlock(&ua->proc->locks_spin);
6019  out_put:
6020 	dlm_put_lkb(lkb);
6021  out:
6022 	dlm_unlock_recovery(ls);
6023 	kfree(ua_tmp);
6024 	return error;
6025 }
6026 
dlm_user_cancel(struct dlm_ls * ls,struct dlm_user_args * ua_tmp,uint32_t flags,uint32_t lkid)6027 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6028 		    uint32_t flags, uint32_t lkid)
6029 {
6030 	struct dlm_lkb *lkb;
6031 	struct dlm_args args;
6032 	struct dlm_user_args *ua;
6033 	int error;
6034 
6035 	dlm_lock_recovery(ls);
6036 
6037 	error = find_lkb(ls, lkid, &lkb);
6038 	if (error)
6039 		goto out;
6040 
6041 	ua = lkb->lkb_ua;
6042 	if (ua_tmp->castparam)
6043 		ua->castparam = ua_tmp->castparam;
6044 	ua->user_lksb = ua_tmp->user_lksb;
6045 
6046 	error = set_unlock_args(flags, ua, &args);
6047 	if (error)
6048 		goto out_put;
6049 
6050 	error = cancel_lock(ls, lkb, &args);
6051 
6052 	if (error == -DLM_ECANCEL)
6053 		error = 0;
6054 	/* from validate_unlock_args() */
6055 	if (error == -EBUSY)
6056 		error = 0;
6057  out_put:
6058 	dlm_put_lkb(lkb);
6059  out:
6060 	dlm_unlock_recovery(ls);
6061 	kfree(ua_tmp);
6062 	return error;
6063 }
6064 
dlm_user_deadlock(struct dlm_ls * ls,uint32_t flags,uint32_t lkid)6065 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6066 {
6067 	struct dlm_lkb *lkb;
6068 	struct dlm_args args;
6069 	struct dlm_user_args *ua;
6070 	struct dlm_rsb *r;
6071 	int error;
6072 
6073 	dlm_lock_recovery(ls);
6074 
6075 	error = find_lkb(ls, lkid, &lkb);
6076 	if (error)
6077 		goto out;
6078 
6079 	ua = lkb->lkb_ua;
6080 
6081 	error = set_unlock_args(flags, ua, &args);
6082 	if (error)
6083 		goto out_put;
6084 
6085 	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6086 
6087 	r = lkb->lkb_resource;
6088 	hold_rsb(r);
6089 	lock_rsb(r);
6090 
6091 	error = validate_unlock_args(lkb, &args);
6092 	if (error)
6093 		goto out_r;
6094 	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6095 
6096 	error = _cancel_lock(r, lkb);
6097  out_r:
6098 	unlock_rsb(r);
6099 	put_rsb(r);
6100 
6101 	if (error == -DLM_ECANCEL)
6102 		error = 0;
6103 	/* from validate_unlock_args() */
6104 	if (error == -EBUSY)
6105 		error = 0;
6106  out_put:
6107 	dlm_put_lkb(lkb);
6108  out:
6109 	dlm_unlock_recovery(ls);
6110 	return error;
6111 }
6112 
6113 /* lkb's that are removed from the waiters list by revert are just left on the
6114    orphans list with the granted orphan locks, to be freed by purge */
6115 
orphan_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6116 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6117 {
6118 	struct dlm_args args;
6119 	int error;
6120 
6121 	hold_lkb(lkb); /* reference for the ls_orphans list */
6122 	mutex_lock(&ls->ls_orphans_mutex);
6123 	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6124 	mutex_unlock(&ls->ls_orphans_mutex);
6125 
6126 	set_unlock_args(0, lkb->lkb_ua, &args);
6127 
6128 	error = cancel_lock(ls, lkb, &args);
6129 	if (error == -DLM_ECANCEL)
6130 		error = 0;
6131 	return error;
6132 }
6133 
6134 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6135    granted.  Regardless of what rsb queue the lock is on, it's removed and
6136    freed.  The IVVALBLK flag causes the lvb on the resource to be invalidated
6137    if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6138 
unlock_proc_lock(struct dlm_ls * ls,struct dlm_lkb * lkb)6139 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6140 {
6141 	struct dlm_args args;
6142 	int error;
6143 
6144 	set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6145 			lkb->lkb_ua, &args);
6146 
6147 	error = unlock_lock(ls, lkb, &args);
6148 	if (error == -DLM_EUNLOCK)
6149 		error = 0;
6150 	return error;
6151 }
6152 
6153 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6154    (which does lock_rsb) due to deadlock with receiving a message that does
6155    lock_rsb followed by dlm_user_add_cb() */
6156 
del_proc_lock(struct dlm_ls * ls,struct dlm_user_proc * proc)6157 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6158 				     struct dlm_user_proc *proc)
6159 {
6160 	struct dlm_lkb *lkb = NULL;
6161 
6162 	mutex_lock(&ls->ls_clear_proc_locks);
6163 	if (list_empty(&proc->locks))
6164 		goto out;
6165 
6166 	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6167 	list_del_init(&lkb->lkb_ownqueue);
6168 
6169 	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6170 		lkb->lkb_flags |= DLM_IFL_ORPHAN;
6171 	else
6172 		lkb->lkb_flags |= DLM_IFL_DEAD;
6173  out:
6174 	mutex_unlock(&ls->ls_clear_proc_locks);
6175 	return lkb;
6176 }
6177 
6178 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6179    1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6180    which we clear here. */
6181 
6182 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6183    list, and no more device_writes should add lkb's to proc->locks list; so we
6184    shouldn't need to take asts_spin or locks_spin here.  this assumes that
6185    device reads/writes/closes are serialized -- FIXME: we may need to serialize
6186    them ourself. */
6187 
dlm_clear_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6188 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6189 {
6190 	struct dlm_lkb *lkb, *safe;
6191 
6192 	dlm_lock_recovery(ls);
6193 
6194 	while (1) {
6195 		lkb = del_proc_lock(ls, proc);
6196 		if (!lkb)
6197 			break;
6198 		del_timeout(lkb);
6199 		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6200 			orphan_proc_lock(ls, lkb);
6201 		else
6202 			unlock_proc_lock(ls, lkb);
6203 
6204 		/* this removes the reference for the proc->locks list
6205 		   added by dlm_user_request, it may result in the lkb
6206 		   being freed */
6207 
6208 		dlm_put_lkb(lkb);
6209 	}
6210 
6211 	mutex_lock(&ls->ls_clear_proc_locks);
6212 
6213 	/* in-progress unlocks */
6214 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6215 		list_del_init(&lkb->lkb_ownqueue);
6216 		lkb->lkb_flags |= DLM_IFL_DEAD;
6217 		dlm_put_lkb(lkb);
6218 	}
6219 
6220 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6221 		memset(&lkb->lkb_callbacks, 0,
6222 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6223 		list_del_init(&lkb->lkb_cb_list);
6224 		dlm_put_lkb(lkb);
6225 	}
6226 
6227 	mutex_unlock(&ls->ls_clear_proc_locks);
6228 	dlm_unlock_recovery(ls);
6229 }
6230 
purge_proc_locks(struct dlm_ls * ls,struct dlm_user_proc * proc)6231 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6232 {
6233 	struct dlm_lkb *lkb, *safe;
6234 
6235 	while (1) {
6236 		lkb = NULL;
6237 		spin_lock(&proc->locks_spin);
6238 		if (!list_empty(&proc->locks)) {
6239 			lkb = list_entry(proc->locks.next, struct dlm_lkb,
6240 					 lkb_ownqueue);
6241 			list_del_init(&lkb->lkb_ownqueue);
6242 		}
6243 		spin_unlock(&proc->locks_spin);
6244 
6245 		if (!lkb)
6246 			break;
6247 
6248 		lkb->lkb_flags |= DLM_IFL_DEAD;
6249 		unlock_proc_lock(ls, lkb);
6250 		dlm_put_lkb(lkb); /* ref from proc->locks list */
6251 	}
6252 
6253 	spin_lock(&proc->locks_spin);
6254 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6255 		list_del_init(&lkb->lkb_ownqueue);
6256 		lkb->lkb_flags |= DLM_IFL_DEAD;
6257 		dlm_put_lkb(lkb);
6258 	}
6259 	spin_unlock(&proc->locks_spin);
6260 
6261 	spin_lock(&proc->asts_spin);
6262 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6263 		memset(&lkb->lkb_callbacks, 0,
6264 		       sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6265 		list_del_init(&lkb->lkb_cb_list);
6266 		dlm_put_lkb(lkb);
6267 	}
6268 	spin_unlock(&proc->asts_spin);
6269 }
6270 
6271 /* pid of 0 means purge all orphans */
6272 
do_purge(struct dlm_ls * ls,int nodeid,int pid)6273 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6274 {
6275 	struct dlm_lkb *lkb, *safe;
6276 
6277 	mutex_lock(&ls->ls_orphans_mutex);
6278 	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6279 		if (pid && lkb->lkb_ownpid != pid)
6280 			continue;
6281 		unlock_proc_lock(ls, lkb);
6282 		list_del_init(&lkb->lkb_ownqueue);
6283 		dlm_put_lkb(lkb);
6284 	}
6285 	mutex_unlock(&ls->ls_orphans_mutex);
6286 }
6287 
send_purge(struct dlm_ls * ls,int nodeid,int pid)6288 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6289 {
6290 	struct dlm_message *ms;
6291 	struct dlm_mhandle *mh;
6292 	int error;
6293 
6294 	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6295 				DLM_MSG_PURGE, &ms, &mh);
6296 	if (error)
6297 		return error;
6298 	ms->m_nodeid = nodeid;
6299 	ms->m_pid = pid;
6300 
6301 	return send_message(mh, ms);
6302 }
6303 
dlm_user_purge(struct dlm_ls * ls,struct dlm_user_proc * proc,int nodeid,int pid)6304 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6305 		   int nodeid, int pid)
6306 {
6307 	int error = 0;
6308 
6309 	if (nodeid && (nodeid != dlm_our_nodeid())) {
6310 		error = send_purge(ls, nodeid, pid);
6311 	} else {
6312 		dlm_lock_recovery(ls);
6313 		if (pid == current->pid)
6314 			purge_proc_locks(ls, proc);
6315 		else
6316 			do_purge(ls, nodeid, pid);
6317 		dlm_unlock_recovery(ls);
6318 	}
6319 	return error;
6320 }
6321 
6322