1 /*
2 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
7 */
8
9 #include <linux/fs.h>
10 #include <linux/miscdevice.h>
11 #include <linux/poll.h>
12 #include <linux/dlm.h>
13 #include <linux/dlm_plock.h>
14 #include <linux/slab.h>
15
16 #include "dlm_internal.h"
17 #include "lockspace.h"
18
19 static spinlock_t ops_lock;
20 static struct list_head send_list;
21 static struct list_head recv_list;
22 static wait_queue_head_t send_wq;
23 static wait_queue_head_t recv_wq;
24
25 struct plock_async_data {
26 void *fl;
27 void *file;
28 struct file_lock flc;
29 int (*callback)(struct file_lock *fl, int result);
30 };
31
32 struct plock_op {
33 struct list_head list;
34 int done;
35 struct dlm_plock_info info;
36 /* if set indicates async handling */
37 struct plock_async_data *data;
38 };
39
set_version(struct dlm_plock_info * info)40 static inline void set_version(struct dlm_plock_info *info)
41 {
42 info->version[0] = DLM_PLOCK_VERSION_MAJOR;
43 info->version[1] = DLM_PLOCK_VERSION_MINOR;
44 info->version[2] = DLM_PLOCK_VERSION_PATCH;
45 }
46
check_version(struct dlm_plock_info * info)47 static int check_version(struct dlm_plock_info *info)
48 {
49 if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
50 (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
51 log_print("plock device version mismatch: "
52 "kernel (%u.%u.%u), user (%u.%u.%u)",
53 DLM_PLOCK_VERSION_MAJOR,
54 DLM_PLOCK_VERSION_MINOR,
55 DLM_PLOCK_VERSION_PATCH,
56 info->version[0],
57 info->version[1],
58 info->version[2]);
59 return -EINVAL;
60 }
61 return 0;
62 }
63
dlm_release_plock_op(struct plock_op * op)64 static void dlm_release_plock_op(struct plock_op *op)
65 {
66 kfree(op->data);
67 kfree(op);
68 }
69
send_op(struct plock_op * op)70 static void send_op(struct plock_op *op)
71 {
72 set_version(&op->info);
73 INIT_LIST_HEAD(&op->list);
74 spin_lock(&ops_lock);
75 list_add_tail(&op->list, &send_list);
76 spin_unlock(&ops_lock);
77 wake_up(&send_wq);
78 }
79
80 /* If a process was killed while waiting for the only plock on a file,
81 locks_remove_posix will not see any lock on the file so it won't
82 send an unlock-close to us to pass on to userspace to clean up the
83 abandoned waiter. So, we have to insert the unlock-close when the
84 lock call is interrupted. */
85
do_unlock_close(const struct dlm_plock_info * info)86 static void do_unlock_close(const struct dlm_plock_info *info)
87 {
88 struct plock_op *op;
89
90 op = kzalloc(sizeof(*op), GFP_NOFS);
91 if (!op)
92 return;
93
94 op->info.optype = DLM_PLOCK_OP_UNLOCK;
95 op->info.pid = info->pid;
96 op->info.fsid = info->fsid;
97 op->info.number = info->number;
98 op->info.start = 0;
99 op->info.end = OFFSET_MAX;
100 op->info.owner = info->owner;
101
102 op->info.flags |= DLM_PLOCK_FL_CLOSE;
103 send_op(op);
104 }
105
dlm_posix_lock(dlm_lockspace_t * lockspace,u64 number,struct file * file,int cmd,struct file_lock * fl)106 int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
107 int cmd, struct file_lock *fl)
108 {
109 struct plock_async_data *op_data;
110 struct dlm_ls *ls;
111 struct plock_op *op;
112 int rv;
113
114 ls = dlm_find_lockspace_local(lockspace);
115 if (!ls)
116 return -EINVAL;
117
118 op = kzalloc(sizeof(*op), GFP_NOFS);
119 if (!op) {
120 rv = -ENOMEM;
121 goto out;
122 }
123
124 op->info.optype = DLM_PLOCK_OP_LOCK;
125 op->info.pid = fl->fl_pid;
126 op->info.ex = (fl->fl_type == F_WRLCK);
127 op->info.wait = IS_SETLKW(cmd);
128 op->info.fsid = ls->ls_global_id;
129 op->info.number = number;
130 op->info.start = fl->fl_start;
131 op->info.end = fl->fl_end;
132 /* async handling */
133 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
134 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
135 if (!op_data) {
136 dlm_release_plock_op(op);
137 rv = -ENOMEM;
138 goto out;
139 }
140
141 /* fl_owner is lockd which doesn't distinguish
142 processes on the nfs client */
143 op->info.owner = (__u64) fl->fl_pid;
144 op_data->callback = fl->fl_lmops->lm_grant;
145 locks_init_lock(&op_data->flc);
146 locks_copy_lock(&op_data->flc, fl);
147 op_data->fl = fl;
148 op_data->file = file;
149
150 op->data = op_data;
151
152 send_op(op);
153 rv = FILE_LOCK_DEFERRED;
154 goto out;
155 } else {
156 op->info.owner = (__u64)(long) fl->fl_owner;
157 }
158
159 send_op(op);
160
161 rv = wait_event_killable(recv_wq, (op->done != 0));
162 if (rv == -ERESTARTSYS) {
163 spin_lock(&ops_lock);
164 list_del(&op->list);
165 spin_unlock(&ops_lock);
166 log_debug(ls, "%s: wait interrupted %x %llx pid %d",
167 __func__, ls->ls_global_id,
168 (unsigned long long)number, op->info.pid);
169 dlm_release_plock_op(op);
170 do_unlock_close(&op->info);
171 goto out;
172 }
173
174 spin_lock(&ops_lock);
175 if (!list_empty(&op->list)) {
176 log_error(ls, "dlm_posix_lock: op on list %llx",
177 (unsigned long long)number);
178 list_del(&op->list);
179 }
180 spin_unlock(&ops_lock);
181
182 rv = op->info.rv;
183
184 if (!rv) {
185 if (locks_lock_file_wait(file, fl) < 0)
186 log_error(ls, "dlm_posix_lock: vfs lock error %llx",
187 (unsigned long long)number);
188 }
189
190 dlm_release_plock_op(op);
191 out:
192 dlm_put_lockspace(ls);
193 return rv;
194 }
195 EXPORT_SYMBOL_GPL(dlm_posix_lock);
196
197 /* Returns failure iff a successful lock operation should be canceled */
dlm_plock_callback(struct plock_op * op)198 static int dlm_plock_callback(struct plock_op *op)
199 {
200 struct plock_async_data *op_data = op->data;
201 struct file *file;
202 struct file_lock *fl;
203 struct file_lock *flc;
204 int (*notify)(struct file_lock *fl, int result) = NULL;
205 int rv = 0;
206
207 spin_lock(&ops_lock);
208 if (!list_empty(&op->list)) {
209 log_print("dlm_plock_callback: op on list %llx",
210 (unsigned long long)op->info.number);
211 list_del(&op->list);
212 }
213 spin_unlock(&ops_lock);
214
215 /* check if the following 2 are still valid or make a copy */
216 file = op_data->file;
217 flc = &op_data->flc;
218 fl = op_data->fl;
219 notify = op_data->callback;
220
221 if (op->info.rv) {
222 notify(fl, op->info.rv);
223 goto out;
224 }
225
226 /* got fs lock; bookkeep locally as well: */
227 flc->fl_flags &= ~FL_SLEEP;
228 if (posix_lock_file(file, flc, NULL)) {
229 /*
230 * This can only happen in the case of kmalloc() failure.
231 * The filesystem's own lock is the authoritative lock,
232 * so a failure to get the lock locally is not a disaster.
233 * As long as the fs cannot reliably cancel locks (especially
234 * in a low-memory situation), we're better off ignoring
235 * this failure than trying to recover.
236 */
237 log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
238 (unsigned long long)op->info.number, file, fl);
239 }
240
241 rv = notify(fl, 0);
242 if (rv) {
243 /* XXX: We need to cancel the fs lock here: */
244 log_print("dlm_plock_callback: lock granted after lock request "
245 "failed; dangling lock!\n");
246 goto out;
247 }
248
249 out:
250 dlm_release_plock_op(op);
251 return rv;
252 }
253
dlm_posix_unlock(dlm_lockspace_t * lockspace,u64 number,struct file * file,struct file_lock * fl)254 int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
255 struct file_lock *fl)
256 {
257 struct dlm_ls *ls;
258 struct plock_op *op;
259 int rv;
260 unsigned char fl_flags = fl->fl_flags;
261
262 ls = dlm_find_lockspace_local(lockspace);
263 if (!ls)
264 return -EINVAL;
265
266 op = kzalloc(sizeof(*op), GFP_NOFS);
267 if (!op) {
268 rv = -ENOMEM;
269 goto out;
270 }
271
272 /* cause the vfs unlock to return ENOENT if lock is not found */
273 fl->fl_flags |= FL_EXISTS;
274
275 rv = locks_lock_file_wait(file, fl);
276 if (rv == -ENOENT) {
277 rv = 0;
278 goto out_free;
279 }
280 if (rv < 0) {
281 log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx",
282 rv, (unsigned long long)number);
283 }
284
285 op->info.optype = DLM_PLOCK_OP_UNLOCK;
286 op->info.pid = fl->fl_pid;
287 op->info.fsid = ls->ls_global_id;
288 op->info.number = number;
289 op->info.start = fl->fl_start;
290 op->info.end = fl->fl_end;
291 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
292 op->info.owner = (__u64) fl->fl_pid;
293 else
294 op->info.owner = (__u64)(long) fl->fl_owner;
295
296 if (fl->fl_flags & FL_CLOSE) {
297 op->info.flags |= DLM_PLOCK_FL_CLOSE;
298 send_op(op);
299 rv = 0;
300 goto out;
301 }
302
303 send_op(op);
304 wait_event(recv_wq, (op->done != 0));
305
306 spin_lock(&ops_lock);
307 if (!list_empty(&op->list)) {
308 log_error(ls, "dlm_posix_unlock: op on list %llx",
309 (unsigned long long)number);
310 list_del(&op->list);
311 }
312 spin_unlock(&ops_lock);
313
314 rv = op->info.rv;
315
316 if (rv == -ENOENT)
317 rv = 0;
318
319 out_free:
320 dlm_release_plock_op(op);
321 out:
322 dlm_put_lockspace(ls);
323 fl->fl_flags = fl_flags;
324 return rv;
325 }
326 EXPORT_SYMBOL_GPL(dlm_posix_unlock);
327
dlm_posix_get(dlm_lockspace_t * lockspace,u64 number,struct file * file,struct file_lock * fl)328 int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
329 struct file_lock *fl)
330 {
331 struct dlm_ls *ls;
332 struct plock_op *op;
333 int rv;
334
335 ls = dlm_find_lockspace_local(lockspace);
336 if (!ls)
337 return -EINVAL;
338
339 op = kzalloc(sizeof(*op), GFP_NOFS);
340 if (!op) {
341 rv = -ENOMEM;
342 goto out;
343 }
344
345 op->info.optype = DLM_PLOCK_OP_GET;
346 op->info.pid = fl->fl_pid;
347 op->info.ex = (fl->fl_type == F_WRLCK);
348 op->info.fsid = ls->ls_global_id;
349 op->info.number = number;
350 op->info.start = fl->fl_start;
351 op->info.end = fl->fl_end;
352 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
353 op->info.owner = (__u64) fl->fl_pid;
354 else
355 op->info.owner = (__u64)(long) fl->fl_owner;
356
357 send_op(op);
358 wait_event(recv_wq, (op->done != 0));
359
360 spin_lock(&ops_lock);
361 if (!list_empty(&op->list)) {
362 log_error(ls, "dlm_posix_get: op on list %llx",
363 (unsigned long long)number);
364 list_del(&op->list);
365 }
366 spin_unlock(&ops_lock);
367
368 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
369 -ENOENT if there are no locks on the file */
370
371 rv = op->info.rv;
372
373 fl->fl_type = F_UNLCK;
374 if (rv == -ENOENT)
375 rv = 0;
376 else if (rv > 0) {
377 locks_init_lock(fl);
378 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
379 fl->fl_flags = FL_POSIX;
380 fl->fl_pid = op->info.pid;
381 if (op->info.nodeid != dlm_our_nodeid())
382 fl->fl_pid = -fl->fl_pid;
383 fl->fl_start = op->info.start;
384 fl->fl_end = op->info.end;
385 rv = 0;
386 }
387
388 dlm_release_plock_op(op);
389 out:
390 dlm_put_lockspace(ls);
391 return rv;
392 }
393 EXPORT_SYMBOL_GPL(dlm_posix_get);
394
395 /* a read copies out one plock request from the send list */
dev_read(struct file * file,char __user * u,size_t count,loff_t * ppos)396 static ssize_t dev_read(struct file *file, char __user *u, size_t count,
397 loff_t *ppos)
398 {
399 struct dlm_plock_info info;
400 struct plock_op *op = NULL;
401
402 if (count < sizeof(info))
403 return -EINVAL;
404
405 spin_lock(&ops_lock);
406 if (!list_empty(&send_list)) {
407 op = list_entry(send_list.next, struct plock_op, list);
408 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
409 list_del(&op->list);
410 else
411 list_move_tail(&op->list, &recv_list);
412 memcpy(&info, &op->info, sizeof(info));
413 }
414 spin_unlock(&ops_lock);
415
416 if (!op)
417 return -EAGAIN;
418
419 /* there is no need to get a reply from userspace for unlocks
420 that were generated by the vfs cleaning up for a close
421 (the process did not make an unlock call). */
422
423 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
424 dlm_release_plock_op(op);
425
426 if (copy_to_user(u, &info, sizeof(info)))
427 return -EFAULT;
428 return sizeof(info);
429 }
430
431 /* a write copies in one plock result that should match a plock_op
432 on the recv list */
dev_write(struct file * file,const char __user * u,size_t count,loff_t * ppos)433 static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
434 loff_t *ppos)
435 {
436 struct plock_op *op = NULL, *iter;
437 struct dlm_plock_info info;
438 int do_callback = 0;
439
440 if (count != sizeof(info))
441 return -EINVAL;
442
443 if (copy_from_user(&info, u, sizeof(info)))
444 return -EFAULT;
445
446 if (check_version(&info))
447 return -EINVAL;
448
449 /*
450 * The results for waiting ops (SETLKW) can be returned in any
451 * order, so match all fields to find the op. The results for
452 * non-waiting ops are returned in the order that they were sent
453 * to userspace, so match the result with the first non-waiting op.
454 */
455 spin_lock(&ops_lock);
456 if (info.wait) {
457 list_for_each_entry(iter, &recv_list, list) {
458 if (iter->info.fsid == info.fsid &&
459 iter->info.number == info.number &&
460 iter->info.owner == info.owner &&
461 iter->info.pid == info.pid &&
462 iter->info.start == info.start &&
463 iter->info.end == info.end &&
464 iter->info.ex == info.ex &&
465 iter->info.wait) {
466 op = iter;
467 break;
468 }
469 }
470 } else {
471 list_for_each_entry(iter, &recv_list, list) {
472 if (!iter->info.wait &&
473 iter->info.fsid == info.fsid) {
474 op = iter;
475 break;
476 }
477 }
478 }
479
480 if (op) {
481 /* Sanity check that op and info match. */
482 if (info.wait)
483 WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
484 else
485 WARN_ON(op->info.number != info.number ||
486 op->info.owner != info.owner ||
487 op->info.optype != info.optype);
488
489 list_del_init(&op->list);
490 memcpy(&op->info, &info, sizeof(info));
491 if (op->data)
492 do_callback = 1;
493 else
494 op->done = 1;
495 }
496 spin_unlock(&ops_lock);
497
498 if (op) {
499 if (do_callback)
500 dlm_plock_callback(op);
501 else
502 wake_up(&recv_wq);
503 } else
504 log_print("%s: no op %x %llx", __func__,
505 info.fsid, (unsigned long long)info.number);
506 return count;
507 }
508
dev_poll(struct file * file,poll_table * wait)509 static __poll_t dev_poll(struct file *file, poll_table *wait)
510 {
511 __poll_t mask = 0;
512
513 poll_wait(file, &send_wq, wait);
514
515 spin_lock(&ops_lock);
516 if (!list_empty(&send_list))
517 mask = EPOLLIN | EPOLLRDNORM;
518 spin_unlock(&ops_lock);
519
520 return mask;
521 }
522
523 static const struct file_operations dev_fops = {
524 .read = dev_read,
525 .write = dev_write,
526 .poll = dev_poll,
527 .owner = THIS_MODULE,
528 .llseek = noop_llseek,
529 };
530
531 static struct miscdevice plock_dev_misc = {
532 .minor = MISC_DYNAMIC_MINOR,
533 .name = DLM_PLOCK_MISC_NAME,
534 .fops = &dev_fops
535 };
536
dlm_plock_init(void)537 int dlm_plock_init(void)
538 {
539 int rv;
540
541 spin_lock_init(&ops_lock);
542 INIT_LIST_HEAD(&send_list);
543 INIT_LIST_HEAD(&recv_list);
544 init_waitqueue_head(&send_wq);
545 init_waitqueue_head(&recv_wq);
546
547 rv = misc_register(&plock_dev_misc);
548 if (rv)
549 log_print("dlm_plock_init: misc_register failed %d", rv);
550 return rv;
551 }
552
dlm_plock_exit(void)553 void dlm_plock_exit(void)
554 {
555 misc_deregister(&plock_dev_misc);
556 }
557
558