1 /*
2  *  Device operations for the pnfs client.
3  *
4  *  Copyright (c) 2002
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *  Garth Goodson   <Garth.Goodson@netapp.com>
10  *
11  *  Permission is granted to use, copy, create derivative works, and
12  *  redistribute this software and such derivative works for any purpose,
13  *  so long as the name of the University of Michigan is not used in
14  *  any advertising or publicity pertaining to the use or distribution
15  *  of this software without specific, written prior authorization. If
16  *  the above copyright notice or any other identification of the
17  *  University of Michigan is included in any copy of any portion of
18  *  this software, then the disclaimer below must also be included.
19  *
20  *  This software is provided as is, without representation or warranty
21  *  of any kind either express or implied, including without limitation
22  *  the implied warranties of merchantability, fitness for a particular
23  *  purpose, or noninfringement.  The Regents of the University of
24  *  Michigan shall not be liable for any damages, including special,
25  *  indirect, incidental, or consequential damages, with respect to any
26  *  claim arising out of or in connection with the use of the software,
27  *  even if it has been or is hereafter advised of the possibility of
28  *  such damages.
29  */
30 
31 #include <linux/export.h>
32 #include <linux/nfs_fs.h>
33 #include "nfs4session.h"
34 #include "internal.h"
35 #include "pnfs.h"
36 
37 #define NFSDBG_FACILITY		NFSDBG_PNFS
38 
39 /*
40  * Device ID RCU cache. A device ID is unique per server and layout type.
41  */
42 #define NFS4_DEVICE_ID_HASH_BITS	5
43 #define NFS4_DEVICE_ID_HASH_SIZE	(1 << NFS4_DEVICE_ID_HASH_BITS)
44 #define NFS4_DEVICE_ID_HASH_MASK	(NFS4_DEVICE_ID_HASH_SIZE - 1)
45 
46 
47 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
48 static DEFINE_SPINLOCK(nfs4_deviceid_lock);
49 
50 #ifdef NFS_DEBUG
51 void
nfs4_print_deviceid(const struct nfs4_deviceid * id)52 nfs4_print_deviceid(const struct nfs4_deviceid *id)
53 {
54 	u32 *p = (u32 *)id;
55 
56 	dprintk("%s: device id= [%x%x%x%x]\n", __func__,
57 		p[0], p[1], p[2], p[3]);
58 }
59 EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
60 #endif
61 
62 static inline u32
nfs4_deviceid_hash(const struct nfs4_deviceid * id)63 nfs4_deviceid_hash(const struct nfs4_deviceid *id)
64 {
65 	unsigned char *cptr = (unsigned char *)id->data;
66 	unsigned int nbytes = NFS4_DEVICEID4_SIZE;
67 	u32 x = 0;
68 
69 	while (nbytes--) {
70 		x *= 37;
71 		x += *cptr++;
72 	}
73 	return x & NFS4_DEVICE_ID_HASH_MASK;
74 }
75 
76 static struct nfs4_deviceid_node *
_lookup_deviceid(const struct pnfs_layoutdriver_type * ld,const struct nfs_client * clp,const struct nfs4_deviceid * id,long hash)77 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
78 		 const struct nfs_client *clp, const struct nfs4_deviceid *id,
79 		 long hash)
80 {
81 	struct nfs4_deviceid_node *d;
82 
83 	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
84 		if (d->ld == ld && d->nfs_client == clp &&
85 		    !memcmp(&d->deviceid, id, sizeof(*id))) {
86 			if (atomic_read(&d->ref))
87 				return d;
88 			else
89 				continue;
90 		}
91 	return NULL;
92 }
93 
94 static struct nfs4_deviceid_node *
nfs4_get_device_info(struct nfs_server * server,const struct nfs4_deviceid * dev_id,struct rpc_cred * cred,gfp_t gfp_flags)95 nfs4_get_device_info(struct nfs_server *server,
96 		const struct nfs4_deviceid *dev_id,
97 		struct rpc_cred *cred, gfp_t gfp_flags)
98 {
99 	struct nfs4_deviceid_node *d = NULL;
100 	struct pnfs_device *pdev = NULL;
101 	struct page **pages = NULL;
102 	u32 max_resp_sz;
103 	int max_pages;
104 	int rc, i;
105 
106 	/*
107 	 * Use the session max response size as the basis for setting
108 	 * GETDEVICEINFO's maxcount
109 	 */
110 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
111 	if (server->pnfs_curr_ld->max_deviceinfo_size &&
112 	    server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
113 		max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
114 	max_pages = nfs_page_array_len(0, max_resp_sz);
115 	dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
116 		__func__, server, max_resp_sz, max_pages);
117 
118 	pdev = kzalloc(sizeof(*pdev), gfp_flags);
119 	if (!pdev)
120 		return NULL;
121 
122 	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
123 	if (!pages)
124 		goto out_free_pdev;
125 
126 	for (i = 0; i < max_pages; i++) {
127 		pages[i] = alloc_page(gfp_flags);
128 		if (!pages[i])
129 			goto out_free_pages;
130 	}
131 
132 	memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
133 	pdev->layout_type = server->pnfs_curr_ld->id;
134 	pdev->pages = pages;
135 	pdev->pgbase = 0;
136 	pdev->pglen = max_resp_sz;
137 	pdev->mincount = 0;
138 	pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
139 
140 	rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
141 	dprintk("%s getdevice info returns %d\n", __func__, rc);
142 	if (rc)
143 		goto out_free_pages;
144 
145 	/*
146 	 * Found new device, need to decode it and then add it to the
147 	 * list of known devices for this mountpoint.
148 	 */
149 	d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
150 			gfp_flags);
151 	if (d && pdev->nocache)
152 		set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
153 
154 out_free_pages:
155 	while (--i >= 0)
156 		__free_page(pages[i]);
157 	kfree(pages);
158 out_free_pdev:
159 	kfree(pdev);
160 	dprintk("<-- %s d %p\n", __func__, d);
161 	return d;
162 }
163 
164 /*
165  * Lookup a deviceid in cache and get a reference count on it if found
166  *
167  * @clp nfs_client associated with deviceid
168  * @id deviceid to look up
169  */
170 static struct nfs4_deviceid_node *
__nfs4_find_get_deviceid(struct nfs_server * server,const struct nfs4_deviceid * id,long hash)171 __nfs4_find_get_deviceid(struct nfs_server *server,
172 		const struct nfs4_deviceid *id, long hash)
173 {
174 	struct nfs4_deviceid_node *d;
175 
176 	rcu_read_lock();
177 	d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
178 			hash);
179 	if (d != NULL && !atomic_inc_not_zero(&d->ref))
180 		d = NULL;
181 	rcu_read_unlock();
182 	return d;
183 }
184 
185 struct nfs4_deviceid_node *
nfs4_find_get_deviceid(struct nfs_server * server,const struct nfs4_deviceid * id,struct rpc_cred * cred,gfp_t gfp_mask)186 nfs4_find_get_deviceid(struct nfs_server *server,
187 		const struct nfs4_deviceid *id, struct rpc_cred *cred,
188 		gfp_t gfp_mask)
189 {
190 	long hash = nfs4_deviceid_hash(id);
191 	struct nfs4_deviceid_node *d, *new;
192 
193 	d = __nfs4_find_get_deviceid(server, id, hash);
194 	if (d)
195 		return d;
196 
197 	new = nfs4_get_device_info(server, id, cred, gfp_mask);
198 	if (!new)
199 		return new;
200 
201 	spin_lock(&nfs4_deviceid_lock);
202 	d = __nfs4_find_get_deviceid(server, id, hash);
203 	if (d) {
204 		spin_unlock(&nfs4_deviceid_lock);
205 		server->pnfs_curr_ld->free_deviceid_node(new);
206 		return d;
207 	}
208 	hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
209 	atomic_inc(&new->ref);
210 	spin_unlock(&nfs4_deviceid_lock);
211 
212 	return new;
213 }
214 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
215 
216 /*
217  * Remove a deviceid from cache
218  *
219  * @clp nfs_client associated with deviceid
220  * @id the deviceid to unhash
221  *
222  * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
223  */
224 void
nfs4_delete_deviceid(const struct pnfs_layoutdriver_type * ld,const struct nfs_client * clp,const struct nfs4_deviceid * id)225 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
226 			 const struct nfs_client *clp, const struct nfs4_deviceid *id)
227 {
228 	struct nfs4_deviceid_node *d;
229 
230 	spin_lock(&nfs4_deviceid_lock);
231 	rcu_read_lock();
232 	d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
233 	rcu_read_unlock();
234 	if (!d) {
235 		spin_unlock(&nfs4_deviceid_lock);
236 		return;
237 	}
238 	hlist_del_init_rcu(&d->node);
239 	clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
240 	spin_unlock(&nfs4_deviceid_lock);
241 
242 	/* balance the initial ref set in pnfs_insert_deviceid */
243 	nfs4_put_deviceid_node(d);
244 }
245 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
246 
247 void
nfs4_init_deviceid_node(struct nfs4_deviceid_node * d,struct nfs_server * server,const struct nfs4_deviceid * id)248 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
249 			const struct nfs4_deviceid *id)
250 {
251 	INIT_HLIST_NODE(&d->node);
252 	INIT_HLIST_NODE(&d->tmpnode);
253 	d->ld = server->pnfs_curr_ld;
254 	d->nfs_client = server->nfs_client;
255 	d->flags = 0;
256 	d->deviceid = *id;
257 	atomic_set(&d->ref, 1);
258 }
259 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
260 
261 /*
262  * Dereference a deviceid node and delete it when its reference count drops
263  * to zero.
264  *
265  * @d deviceid node to put
266  *
267  * return true iff the node was deleted
268  * Note that since the test for d->ref == 0 is sufficient to establish
269  * that the node is no longer hashed in the global device id cache.
270  */
271 bool
nfs4_put_deviceid_node(struct nfs4_deviceid_node * d)272 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
273 {
274 	if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
275 		if (atomic_add_unless(&d->ref, -1, 2))
276 			return false;
277 		nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
278 	}
279 	if (!atomic_dec_and_test(&d->ref))
280 		return false;
281 	d->ld->free_deviceid_node(d);
282 	return true;
283 }
284 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
285 
286 void
nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node * node)287 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
288 {
289 	node->timestamp_unavailable = jiffies;
290 	set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
291 }
292 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
293 
294 bool
nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node * node)295 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
296 {
297 	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
298 		unsigned long start, end;
299 
300 		end = jiffies;
301 		start = end - PNFS_DEVICE_RETRY_TIMEOUT;
302 		if (time_in_range(node->timestamp_unavailable, start, end))
303 			return true;
304 		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
305 	}
306 	return false;
307 }
308 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
309 
310 static void
_deviceid_purge_client(const struct nfs_client * clp,long hash)311 _deviceid_purge_client(const struct nfs_client *clp, long hash)
312 {
313 	struct nfs4_deviceid_node *d;
314 	HLIST_HEAD(tmp);
315 
316 	spin_lock(&nfs4_deviceid_lock);
317 	rcu_read_lock();
318 	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
319 		if (d->nfs_client == clp && atomic_read(&d->ref)) {
320 			hlist_del_init_rcu(&d->node);
321 			hlist_add_head(&d->tmpnode, &tmp);
322 			clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
323 		}
324 	rcu_read_unlock();
325 	spin_unlock(&nfs4_deviceid_lock);
326 
327 	if (hlist_empty(&tmp))
328 		return;
329 
330 	while (!hlist_empty(&tmp)) {
331 		d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
332 		hlist_del(&d->tmpnode);
333 		nfs4_put_deviceid_node(d);
334 	}
335 }
336 
337 void
nfs4_deviceid_purge_client(const struct nfs_client * clp)338 nfs4_deviceid_purge_client(const struct nfs_client *clp)
339 {
340 	long h;
341 
342 	if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
343 		return;
344 	for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
345 		_deviceid_purge_client(clp, h);
346 }
347 
348 /*
349  * Stop use of all deviceids associated with an nfs_client
350  */
351 void
nfs4_deviceid_mark_client_invalid(struct nfs_client * clp)352 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
353 {
354 	struct nfs4_deviceid_node *d;
355 	int i;
356 
357 	rcu_read_lock();
358 	for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
359 		hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
360 			if (d->nfs_client == clp)
361 				set_bit(NFS_DEVICEID_INVALID, &d->flags);
362 	}
363 	rcu_read_unlock();
364 }
365