1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/nfs/callback_proc.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFSv4 callback procedures
8 */
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include "nfs4_fs.h"
14 #include "callback.h"
15 #include "delegation.h"
16 #include "internal.h"
17 #include "pnfs.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
20
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
22
nfs4_callback_getattr(void * argp,void * resp,struct cb_process_state * cps)23 __be32 nfs4_callback_getattr(void *argp, void *resp,
24 struct cb_process_state *cps)
25 {
26 struct cb_getattrargs *args = argp;
27 struct cb_getattrres *res = resp;
28 struct nfs_delegation *delegation;
29 struct nfs_inode *nfsi;
30 struct inode *inode;
31
32 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
33 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
34 goto out;
35
36 res->bitmap[0] = res->bitmap[1] = 0;
37 res->status = htonl(NFS4ERR_BADHANDLE);
38
39 dprintk_rcu("NFS: GETATTR callback request from %s\n",
40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41
42 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 if (IS_ERR(inode)) {
44 if (inode == ERR_PTR(-EAGAIN))
45 res->status = htonl(NFS4ERR_DELAY);
46 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47 -ntohl(res->status));
48 goto out;
49 }
50 nfsi = NFS_I(inode);
51 rcu_read_lock();
52 delegation = rcu_dereference(nfsi->delegation);
53 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
54 goto out_iput;
55 res->size = i_size_read(inode);
56 res->change_attr = delegation->change_attr;
57 if (nfs_have_writebacks(inode))
58 res->change_attr++;
59 res->ctime = timespec64_to_timespec(inode->i_ctime);
60 res->mtime = timespec64_to_timespec(inode->i_mtime);
61 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
62 args->bitmap[0];
63 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
64 args->bitmap[1];
65 res->status = 0;
66 out_iput:
67 rcu_read_unlock();
68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
69 nfs_iput_and_deactive(inode);
70 out:
71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
72 return res->status;
73 }
74
nfs4_callback_recall(void * argp,void * resp,struct cb_process_state * cps)75 __be32 nfs4_callback_recall(void *argp, void *resp,
76 struct cb_process_state *cps)
77 {
78 struct cb_recallargs *args = argp;
79 struct inode *inode;
80 __be32 res;
81
82 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
83 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
84 goto out;
85
86 dprintk_rcu("NFS: RECALL callback request from %s\n",
87 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
88
89 res = htonl(NFS4ERR_BADHANDLE);
90 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 if (IS_ERR(inode)) {
92 if (inode == ERR_PTR(-EAGAIN))
93 res = htonl(NFS4ERR_DELAY);
94 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
95 &args->stateid, -ntohl(res));
96 goto out;
97 }
98 /* Set up a helper thread to actually return the delegation */
99 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
100 case 0:
101 res = 0;
102 break;
103 case -ENOENT:
104 res = htonl(NFS4ERR_BAD_STATEID);
105 break;
106 default:
107 res = htonl(NFS4ERR_RESOURCE);
108 }
109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
110 &args->stateid, -ntohl(res));
111 nfs_iput_and_deactive(inode);
112 out:
113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
114 return res;
115 }
116
117 #if defined(CONFIG_NFS_V4_1)
118
119 /*
120 * Lookup a layout inode by stateid
121 *
122 * Note: returns a refcount on the inode and superblock
123 */
nfs_layout_find_inode_by_stateid(struct nfs_client * clp,const nfs4_stateid * stateid)124 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
125 const nfs4_stateid *stateid)
126 {
127 struct nfs_server *server;
128 struct inode *inode;
129 struct pnfs_layout_hdr *lo;
130
131 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 list_for_each_entry(lo, &server->layouts, plh_layouts) {
133 if (!pnfs_layout_is_valid(lo))
134 continue;
135 if (stateid != NULL &&
136 !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
137 continue;
138 inode = igrab(lo->plh_inode);
139 if (!inode)
140 return ERR_PTR(-EAGAIN);
141 if (!nfs_sb_active(inode->i_sb)) {
142 rcu_read_unlock();
143 spin_unlock(&clp->cl_lock);
144 iput(inode);
145 spin_lock(&clp->cl_lock);
146 rcu_read_lock();
147 return ERR_PTR(-EAGAIN);
148 }
149 return inode;
150 }
151 }
152
153 return ERR_PTR(-ENOENT);
154 }
155
156 /*
157 * Lookup a layout inode by filehandle.
158 *
159 * Note: returns a refcount on the inode and superblock
160 *
161 */
nfs_layout_find_inode_by_fh(struct nfs_client * clp,const struct nfs_fh * fh)162 static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
163 const struct nfs_fh *fh)
164 {
165 struct nfs_server *server;
166 struct nfs_inode *nfsi;
167 struct inode *inode;
168 struct pnfs_layout_hdr *lo;
169
170 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
171 list_for_each_entry(lo, &server->layouts, plh_layouts) {
172 nfsi = NFS_I(lo->plh_inode);
173 if (nfs_compare_fh(fh, &nfsi->fh))
174 continue;
175 if (nfsi->layout != lo)
176 continue;
177 inode = igrab(lo->plh_inode);
178 if (!inode)
179 return ERR_PTR(-EAGAIN);
180 if (!nfs_sb_active(inode->i_sb)) {
181 rcu_read_unlock();
182 spin_unlock(&clp->cl_lock);
183 iput(inode);
184 spin_lock(&clp->cl_lock);
185 rcu_read_lock();
186 return ERR_PTR(-EAGAIN);
187 }
188 return inode;
189 }
190 }
191
192 return ERR_PTR(-ENOENT);
193 }
194
nfs_layout_find_inode(struct nfs_client * clp,const struct nfs_fh * fh,const nfs4_stateid * stateid)195 static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
196 const struct nfs_fh *fh,
197 const nfs4_stateid *stateid)
198 {
199 struct inode *inode;
200
201 spin_lock(&clp->cl_lock);
202 rcu_read_lock();
203 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
204 if (inode == ERR_PTR(-ENOENT))
205 inode = nfs_layout_find_inode_by_fh(clp, fh);
206 rcu_read_unlock();
207 spin_unlock(&clp->cl_lock);
208
209 return inode;
210 }
211
212 /*
213 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
214 */
pnfs_check_callback_stateid(struct pnfs_layout_hdr * lo,const nfs4_stateid * new)215 static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
216 const nfs4_stateid *new)
217 {
218 u32 oldseq, newseq;
219
220 /* Is the stateid not initialised? */
221 if (!pnfs_layout_is_valid(lo))
222 return NFS4ERR_NOMATCHING_LAYOUT;
223
224 /* Mismatched stateid? */
225 if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
226 return NFS4ERR_BAD_STATEID;
227
228 newseq = be32_to_cpu(new->seqid);
229 /* Are we already in a layout recall situation? */
230 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
231 lo->plh_return_seq != 0) {
232 if (newseq < lo->plh_return_seq)
233 return NFS4ERR_OLD_STATEID;
234 if (newseq > lo->plh_return_seq)
235 return NFS4ERR_DELAY;
236 goto out;
237 }
238
239 /* Check that the stateid matches what we think it should be. */
240 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
241 if (newseq > oldseq + 1)
242 return NFS4ERR_DELAY;
243 /* Crazy server! */
244 if (newseq <= oldseq)
245 return NFS4ERR_OLD_STATEID;
246 out:
247 return NFS_OK;
248 }
249
initiate_file_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)250 static u32 initiate_file_draining(struct nfs_client *clp,
251 struct cb_layoutrecallargs *args)
252 {
253 struct inode *ino;
254 struct pnfs_layout_hdr *lo;
255 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256 LIST_HEAD(free_me_list);
257
258 ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
259 if (IS_ERR(ino)) {
260 if (ino == ERR_PTR(-EAGAIN))
261 rv = NFS4ERR_DELAY;
262 goto out_noput;
263 }
264
265 pnfs_layoutcommit_inode(ino, false);
266
267
268 spin_lock(&ino->i_lock);
269 lo = NFS_I(ino)->layout;
270 if (!lo) {
271 spin_unlock(&ino->i_lock);
272 goto out;
273 }
274 pnfs_get_layout_hdr(lo);
275 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
276 if (rv != NFS_OK)
277 goto unlock;
278
279 /*
280 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
281 */
282 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
283 rv = NFS4ERR_DELAY;
284 goto unlock;
285 }
286
287 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
288 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
289 &args->cbl_range,
290 be32_to_cpu(args->cbl_stateid.seqid))) {
291 case 0:
292 case -EBUSY:
293 /* There are layout segments that need to be returned */
294 rv = NFS4_OK;
295 break;
296 case -ENOENT:
297 /* Embrace your forgetfulness! */
298 rv = NFS4ERR_NOMATCHING_LAYOUT;
299
300 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
301 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
302 &args->cbl_range);
303 }
304 }
305 unlock:
306 spin_unlock(&ino->i_lock);
307 pnfs_free_lseg_list(&free_me_list);
308 /* Free all lsegs that are attached to commit buckets */
309 nfs_commit_inode(ino, 0);
310 pnfs_put_layout_hdr(lo);
311 out:
312 nfs_iput_and_deactive(ino);
313 out_noput:
314 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
315 &args->cbl_stateid, -rv);
316 return rv;
317 }
318
initiate_bulk_draining(struct nfs_client * clp,struct cb_layoutrecallargs * args)319 static u32 initiate_bulk_draining(struct nfs_client *clp,
320 struct cb_layoutrecallargs *args)
321 {
322 int stat;
323
324 if (args->cbl_recall_type == RETURN_FSID)
325 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
326 else
327 stat = pnfs_destroy_layouts_byclid(clp, true);
328 if (stat != 0)
329 return NFS4ERR_DELAY;
330 return NFS4ERR_NOMATCHING_LAYOUT;
331 }
332
do_callback_layoutrecall(struct nfs_client * clp,struct cb_layoutrecallargs * args)333 static u32 do_callback_layoutrecall(struct nfs_client *clp,
334 struct cb_layoutrecallargs *args)
335 {
336 if (args->cbl_recall_type == RETURN_FILE)
337 return initiate_file_draining(clp, args);
338 return initiate_bulk_draining(clp, args);
339 }
340
nfs4_callback_layoutrecall(void * argp,void * resp,struct cb_process_state * cps)341 __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
342 struct cb_process_state *cps)
343 {
344 struct cb_layoutrecallargs *args = argp;
345 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
346
347 if (cps->clp)
348 res = do_callback_layoutrecall(cps->clp, args);
349 return cpu_to_be32(res);
350 }
351
pnfs_recall_all_layouts(struct nfs_client * clp)352 static void pnfs_recall_all_layouts(struct nfs_client *clp)
353 {
354 struct cb_layoutrecallargs args;
355
356 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
357 memset(&args, 0, sizeof(args));
358 args.cbl_recall_type = RETURN_ALL;
359 /* FIXME we ignore errors, what should we do? */
360 do_callback_layoutrecall(clp, &args);
361 }
362
nfs4_callback_devicenotify(void * argp,void * resp,struct cb_process_state * cps)363 __be32 nfs4_callback_devicenotify(void *argp, void *resp,
364 struct cb_process_state *cps)
365 {
366 struct cb_devicenotifyargs *args = argp;
367 const struct pnfs_layoutdriver_type *ld = NULL;
368 uint32_t i;
369 __be32 res = 0;
370
371 if (!cps->clp) {
372 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
373 goto out;
374 }
375
376 for (i = 0; i < args->ndevs; i++) {
377 struct cb_devicenotifyitem *dev = &args->devs[i];
378
379 if (!ld || ld->id != dev->cbd_layout_type) {
380 pnfs_put_layoutdriver(ld);
381 ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
382 if (!ld)
383 continue;
384 }
385 nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
386 }
387 pnfs_put_layoutdriver(ld);
388 out:
389 kfree(args->devs);
390 return res;
391 }
392
393 /*
394 * Validate the sequenceID sent by the server.
395 * Return success if the sequenceID is one more than what we last saw on
396 * this slot, accounting for wraparound. Increments the slot's sequence.
397 *
398 * We don't yet implement a duplicate request cache, instead we set the
399 * back channel ca_maxresponsesize_cached to zero. This is OK for now
400 * since we only currently implement idempotent callbacks anyway.
401 *
402 * We have a single slot backchannel at this time, so we don't bother
403 * checking the used_slots bit array on the table. The lower layer guarantees
404 * a single outstanding callback request at a time.
405 */
406 static __be32
validate_seqid(const struct nfs4_slot_table * tbl,const struct nfs4_slot * slot,const struct cb_sequenceargs * args)407 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
408 const struct cb_sequenceargs * args)
409 {
410 if (args->csa_slotid > tbl->server_highest_slotid)
411 return htonl(NFS4ERR_BADSLOT);
412
413 /* Replay */
414 if (args->csa_sequenceid == slot->seq_nr) {
415 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
416 return htonl(NFS4ERR_DELAY);
417 /* Signal process_op to set this error on next op */
418 if (args->csa_cachethis == 0)
419 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
420
421 /* Liar! We never allowed you to set csa_cachethis != 0 */
422 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
423 }
424
425 /* Note: wraparound relies on seq_nr being of type u32 */
426 if (likely(args->csa_sequenceid == slot->seq_nr + 1))
427 return htonl(NFS4_OK);
428
429 /* Misordered request */
430 return htonl(NFS4ERR_SEQ_MISORDERED);
431 }
432
433 /*
434 * For each referring call triple, check the session's slot table for
435 * a match. If the slot is in use and the sequence numbers match, the
436 * client is still waiting for a response to the original request.
437 */
referring_call_exists(struct nfs_client * clp,uint32_t nrclists,struct referring_call_list * rclists,spinlock_t * lock)438 static int referring_call_exists(struct nfs_client *clp,
439 uint32_t nrclists,
440 struct referring_call_list *rclists,
441 spinlock_t *lock)
442 __releases(lock)
443 __acquires(lock)
444 {
445 int status = 0;
446 int i, j;
447 struct nfs4_session *session;
448 struct nfs4_slot_table *tbl;
449 struct referring_call_list *rclist;
450 struct referring_call *ref;
451
452 /*
453 * XXX When client trunking is implemented, this becomes
454 * a session lookup from within the loop
455 */
456 session = clp->cl_session;
457 tbl = &session->fc_slot_table;
458
459 for (i = 0; i < nrclists; i++) {
460 rclist = &rclists[i];
461 if (memcmp(session->sess_id.data,
462 rclist->rcl_sessionid.data,
463 NFS4_MAX_SESSIONID_LEN) != 0)
464 continue;
465
466 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
467 ref = &rclist->rcl_refcalls[j];
468 spin_unlock(lock);
469 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
470 ref->rc_sequenceid, HZ >> 1) < 0;
471 spin_lock(lock);
472 if (status)
473 goto out;
474 }
475 }
476
477 out:
478 return status;
479 }
480
nfs4_callback_sequence(void * argp,void * resp,struct cb_process_state * cps)481 __be32 nfs4_callback_sequence(void *argp, void *resp,
482 struct cb_process_state *cps)
483 {
484 struct cb_sequenceargs *args = argp;
485 struct cb_sequenceres *res = resp;
486 struct nfs4_slot_table *tbl;
487 struct nfs4_slot *slot;
488 struct nfs_client *clp;
489 int i;
490 __be32 status = htonl(NFS4ERR_BADSESSION);
491
492 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
493 &args->csa_sessionid, cps->minorversion);
494 if (clp == NULL)
495 goto out;
496
497 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
498 goto out;
499
500 tbl = &clp->cl_session->bc_slot_table;
501
502 /* Set up res before grabbing the spinlock */
503 memcpy(&res->csr_sessionid, &args->csa_sessionid,
504 sizeof(res->csr_sessionid));
505 res->csr_sequenceid = args->csa_sequenceid;
506 res->csr_slotid = args->csa_slotid;
507
508 spin_lock(&tbl->slot_tbl_lock);
509 /* state manager is resetting the session */
510 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
511 status = htonl(NFS4ERR_DELAY);
512 /* Return NFS4ERR_BADSESSION if we're draining the session
513 * in order to reset it.
514 */
515 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
516 status = htonl(NFS4ERR_BADSESSION);
517 goto out_unlock;
518 }
519
520 status = htonl(NFS4ERR_BADSLOT);
521 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
522 if (IS_ERR(slot))
523 goto out_unlock;
524
525 res->csr_highestslotid = tbl->server_highest_slotid;
526 res->csr_target_highestslotid = tbl->target_highest_slotid;
527
528 status = validate_seqid(tbl, slot, args);
529 if (status)
530 goto out_unlock;
531 if (!nfs4_try_to_lock_slot(tbl, slot)) {
532 status = htonl(NFS4ERR_DELAY);
533 goto out_unlock;
534 }
535 cps->slot = slot;
536
537 /* The ca_maxresponsesize_cached is 0 with no DRC */
538 if (args->csa_cachethis != 0) {
539 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
540 goto out_unlock;
541 }
542
543 /*
544 * Check for pending referring calls. If a match is found, a
545 * related callback was received before the response to the original
546 * call.
547 */
548 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
549 &tbl->slot_tbl_lock) < 0) {
550 status = htonl(NFS4ERR_DELAY);
551 goto out_unlock;
552 }
553
554 /*
555 * RFC5661 20.9.3
556 * If CB_SEQUENCE returns an error, then the state of the slot
557 * (sequence ID, cached reply) MUST NOT change.
558 */
559 slot->seq_nr = args->csa_sequenceid;
560 out_unlock:
561 spin_unlock(&tbl->slot_tbl_lock);
562
563 out:
564 cps->clp = clp; /* put in nfs4_callback_compound */
565 for (i = 0; i < args->csa_nrclists; i++)
566 kfree(args->csa_rclists[i].rcl_refcalls);
567 kfree(args->csa_rclists);
568
569 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
570 cps->drc_status = status;
571 status = 0;
572 } else
573 res->csr_status = status;
574
575 trace_nfs4_cb_sequence(args, res, status);
576 return status;
577 }
578
579 static bool
validate_bitmap_values(unsigned int mask)580 validate_bitmap_values(unsigned int mask)
581 {
582 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
583 }
584
nfs4_callback_recallany(void * argp,void * resp,struct cb_process_state * cps)585 __be32 nfs4_callback_recallany(void *argp, void *resp,
586 struct cb_process_state *cps)
587 {
588 struct cb_recallanyargs *args = argp;
589 __be32 status;
590 fmode_t flags = 0;
591
592 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
593 if (!cps->clp) /* set in cb_sequence */
594 goto out;
595
596 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
597 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
598
599 status = cpu_to_be32(NFS4ERR_INVAL);
600 if (!validate_bitmap_values(args->craa_type_mask))
601 goto out;
602
603 status = cpu_to_be32(NFS4_OK);
604 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
605 flags = FMODE_READ;
606 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
607 flags |= FMODE_WRITE;
608 if (flags)
609 nfs_expire_unused_delegation_types(cps->clp, flags);
610
611 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
612 pnfs_recall_all_layouts(cps->clp);
613 out:
614 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
615 return status;
616 }
617
618 /* Reduce the fore channel's max_slots to the target value */
nfs4_callback_recallslot(void * argp,void * resp,struct cb_process_state * cps)619 __be32 nfs4_callback_recallslot(void *argp, void *resp,
620 struct cb_process_state *cps)
621 {
622 struct cb_recallslotargs *args = argp;
623 struct nfs4_slot_table *fc_tbl;
624 __be32 status;
625
626 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
627 if (!cps->clp) /* set in cb_sequence */
628 goto out;
629
630 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
631 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
632 args->crsa_target_highest_slotid);
633
634 fc_tbl = &cps->clp->cl_session->fc_slot_table;
635
636 status = htonl(NFS4_OK);
637
638 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
639 nfs41_notify_server(cps->clp);
640 out:
641 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
642 return status;
643 }
644
nfs4_callback_notify_lock(void * argp,void * resp,struct cb_process_state * cps)645 __be32 nfs4_callback_notify_lock(void *argp, void *resp,
646 struct cb_process_state *cps)
647 {
648 struct cb_notify_lock_args *args = argp;
649
650 if (!cps->clp) /* set in cb_sequence */
651 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
652
653 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
654 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
655
656 /* Don't wake anybody if the string looked bogus */
657 if (args->cbnl_valid)
658 __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
659
660 return htonl(NFS4_OK);
661 }
662 #endif /* CONFIG_NFS_V4_1 */
663 #ifdef CONFIG_NFS_V4_2
nfs4_copy_cb_args(struct nfs4_copy_state * cp_state,struct cb_offloadargs * args)664 static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
665 struct cb_offloadargs *args)
666 {
667 cp_state->count = args->wr_count;
668 cp_state->error = args->error;
669 if (!args->error) {
670 cp_state->verf.committed = args->wr_writeverf.committed;
671 memcpy(&cp_state->verf.verifier.data[0],
672 &args->wr_writeverf.verifier.data[0],
673 NFS4_VERIFIER_SIZE);
674 }
675 }
676
nfs4_callback_offload(void * data,void * dummy,struct cb_process_state * cps)677 __be32 nfs4_callback_offload(void *data, void *dummy,
678 struct cb_process_state *cps)
679 {
680 struct cb_offloadargs *args = data;
681 struct nfs_server *server;
682 struct nfs4_copy_state *copy, *tmp_copy;
683 bool found = false;
684
685 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
686 if (!copy)
687 return htonl(NFS4ERR_SERVERFAULT);
688
689 spin_lock(&cps->clp->cl_lock);
690 rcu_read_lock();
691 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
692 client_link) {
693 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
694 if (memcmp(args->coa_stateid.other,
695 tmp_copy->stateid.other,
696 sizeof(args->coa_stateid.other)))
697 continue;
698 nfs4_copy_cb_args(tmp_copy, args);
699 complete(&tmp_copy->completion);
700 found = true;
701 goto out;
702 }
703 }
704 out:
705 rcu_read_unlock();
706 if (!found) {
707 memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
708 nfs4_copy_cb_args(copy, args);
709 list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
710 } else
711 kfree(copy);
712 spin_unlock(&cps->clp->cl_lock);
713
714 return 0;
715 }
716 #endif /* CONFIG_NFS_V4_2 */
717