1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * Intel SCIF driver.
16  *
17  */
18 #include "scif_main.h"
19 
scif_fdopen(struct inode * inode,struct file * f)20 static int scif_fdopen(struct inode *inode, struct file *f)
21 {
22 	struct scif_endpt *priv = scif_open();
23 
24 	if (!priv)
25 		return -ENOMEM;
26 	f->private_data = priv;
27 	return 0;
28 }
29 
scif_fdclose(struct inode * inode,struct file * f)30 static int scif_fdclose(struct inode *inode, struct file *f)
31 {
32 	struct scif_endpt *priv = f->private_data;
33 
34 	return scif_close(priv);
35 }
36 
scif_fdmmap(struct file * f,struct vm_area_struct * vma)37 static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
38 {
39 	struct scif_endpt *priv = f->private_data;
40 
41 	return scif_mmap(vma, priv);
42 }
43 
scif_fdpoll(struct file * f,poll_table * wait)44 static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
45 {
46 	struct scif_endpt *priv = f->private_data;
47 
48 	return __scif_pollfd(f, wait, priv);
49 }
50 
scif_fdflush(struct file * f,fl_owner_t id)51 static int scif_fdflush(struct file *f, fl_owner_t id)
52 {
53 	struct scif_endpt *ep = f->private_data;
54 
55 	spin_lock(&ep->lock);
56 	/*
57 	 * The listening endpoint stashes the open file information before
58 	 * waiting for incoming connections. The release callback would never be
59 	 * called if the application closed the endpoint, while waiting for
60 	 * incoming connections from a separate thread since the file descriptor
61 	 * reference count is bumped up in the accept IOCTL. Call the flush
62 	 * routine if the id matches the endpoint open file information so that
63 	 * the listening endpoint can be woken up and the fd released.
64 	 */
65 	if (ep->files == id)
66 		__scif_flush(ep);
67 	spin_unlock(&ep->lock);
68 	return 0;
69 }
70 
scif_err_debug(int err,const char * str)71 static __always_inline void scif_err_debug(int err, const char *str)
72 {
73 	/*
74 	 * ENOTCONN is a common uninteresting error which is
75 	 * flooding debug messages to the console unnecessarily.
76 	 */
77 	if (err < 0 && err != -ENOTCONN)
78 		dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
79 }
80 
scif_fdioctl(struct file * f,unsigned int cmd,unsigned long arg)81 static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
82 {
83 	struct scif_endpt *priv = f->private_data;
84 	void __user *argp = (void __user *)arg;
85 	int err = 0;
86 	struct scifioctl_msg request;
87 	bool non_block = false;
88 
89 	non_block = !!(f->f_flags & O_NONBLOCK);
90 
91 	switch (cmd) {
92 	case SCIF_BIND:
93 	{
94 		int pn;
95 
96 		if (copy_from_user(&pn, argp, sizeof(pn)))
97 			return -EFAULT;
98 
99 		pn = scif_bind(priv, pn);
100 		if (pn < 0)
101 			return pn;
102 
103 		if (copy_to_user(argp, &pn, sizeof(pn)))
104 			return -EFAULT;
105 
106 		return 0;
107 	}
108 	case SCIF_LISTEN:
109 		return scif_listen(priv, arg);
110 	case SCIF_CONNECT:
111 	{
112 		struct scifioctl_connect req;
113 		struct scif_endpt *ep = (struct scif_endpt *)priv;
114 
115 		if (copy_from_user(&req, argp, sizeof(req)))
116 			return -EFAULT;
117 
118 		err = __scif_connect(priv, &req.peer, non_block);
119 		if (err < 0)
120 			return err;
121 
122 		req.self.node = ep->port.node;
123 		req.self.port = ep->port.port;
124 
125 		if (copy_to_user(argp, &req, sizeof(req)))
126 			return -EFAULT;
127 
128 		return 0;
129 	}
130 	/*
131 	 * Accept is done in two halves.  The request ioctl does the basic
132 	 * functionality of accepting the request and returning the information
133 	 * about it including the internal ID of the end point.  The register
134 	 * is done with the internal ID on a new file descriptor opened by the
135 	 * requesting process.
136 	 */
137 	case SCIF_ACCEPTREQ:
138 	{
139 		struct scifioctl_accept request;
140 		scif_epd_t *ep = (scif_epd_t *)&request.endpt;
141 
142 		if (copy_from_user(&request, argp, sizeof(request)))
143 			return -EFAULT;
144 
145 		err = scif_accept(priv, &request.peer, ep, request.flags);
146 		if (err < 0)
147 			return err;
148 
149 		if (copy_to_user(argp, &request, sizeof(request))) {
150 			scif_close(*ep);
151 			return -EFAULT;
152 		}
153 		/*
154 		 * Add to the list of user mode eps where the second half
155 		 * of the accept is not yet completed.
156 		 */
157 		mutex_lock(&scif_info.eplock);
158 		list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
159 		list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
160 		(*ep)->listenep = priv;
161 		priv->acceptcnt++;
162 		mutex_unlock(&scif_info.eplock);
163 
164 		return 0;
165 	}
166 	case SCIF_ACCEPTREG:
167 	{
168 		struct scif_endpt *priv = f->private_data;
169 		struct scif_endpt *newep;
170 		struct scif_endpt *lisep;
171 		struct scif_endpt *fep = NULL;
172 		struct scif_endpt *tmpep;
173 		struct list_head *pos, *tmpq;
174 
175 		/* Finally replace the pointer to the accepted endpoint */
176 		if (copy_from_user(&newep, argp, sizeof(void *)))
177 			return -EFAULT;
178 
179 		/* Remove form the user accept queue */
180 		mutex_lock(&scif_info.eplock);
181 		list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
182 			tmpep = list_entry(pos,
183 					   struct scif_endpt, miacceptlist);
184 			if (tmpep == newep) {
185 				list_del(pos);
186 				fep = tmpep;
187 				break;
188 			}
189 		}
190 
191 		if (!fep) {
192 			mutex_unlock(&scif_info.eplock);
193 			return -ENOENT;
194 		}
195 
196 		lisep = newep->listenep;
197 		list_for_each_safe(pos, tmpq, &lisep->li_accept) {
198 			tmpep = list_entry(pos,
199 					   struct scif_endpt, liacceptlist);
200 			if (tmpep == newep) {
201 				list_del(pos);
202 				lisep->acceptcnt--;
203 				break;
204 			}
205 		}
206 
207 		mutex_unlock(&scif_info.eplock);
208 
209 		/* Free the resources automatically created from the open. */
210 		scif_anon_inode_fput(priv);
211 		scif_teardown_ep(priv);
212 		scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
213 		f->private_data = newep;
214 		return 0;
215 	}
216 	case SCIF_SEND:
217 	{
218 		struct scif_endpt *priv = f->private_data;
219 
220 		if (copy_from_user(&request, argp,
221 				   sizeof(struct scifioctl_msg))) {
222 			err = -EFAULT;
223 			goto send_err;
224 		}
225 		err = scif_user_send(priv, (void __user *)request.msg,
226 				     request.len, request.flags);
227 		if (err < 0)
228 			goto send_err;
229 		if (copy_to_user(&
230 				 ((struct scifioctl_msg __user *)argp)->out_len,
231 				 &err, sizeof(err))) {
232 			err = -EFAULT;
233 			goto send_err;
234 		}
235 		err = 0;
236 send_err:
237 		scif_err_debug(err, "scif_send");
238 		return err;
239 	}
240 	case SCIF_RECV:
241 	{
242 		struct scif_endpt *priv = f->private_data;
243 
244 		if (copy_from_user(&request, argp,
245 				   sizeof(struct scifioctl_msg))) {
246 			err = -EFAULT;
247 			goto recv_err;
248 		}
249 
250 		err = scif_user_recv(priv, (void __user *)request.msg,
251 				     request.len, request.flags);
252 		if (err < 0)
253 			goto recv_err;
254 
255 		if (copy_to_user(&
256 				 ((struct scifioctl_msg __user *)argp)->out_len,
257 			&err, sizeof(err))) {
258 			err = -EFAULT;
259 			goto recv_err;
260 		}
261 		err = 0;
262 recv_err:
263 		scif_err_debug(err, "scif_recv");
264 		return err;
265 	}
266 	case SCIF_GET_NODEIDS:
267 	{
268 		struct scifioctl_node_ids node_ids;
269 		int entries;
270 		u16 *nodes;
271 		void __user *unodes, *uself;
272 		u16 self;
273 
274 		if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
275 			err = -EFAULT;
276 			goto getnodes_err2;
277 		}
278 
279 		entries = min_t(int, scif_info.maxid, node_ids.len);
280 		nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
281 		if (entries && !nodes) {
282 			err = -ENOMEM;
283 			goto getnodes_err2;
284 		}
285 		node_ids.len = scif_get_node_ids(nodes, entries, &self);
286 
287 		unodes = (void __user *)node_ids.nodes;
288 		if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
289 			err = -EFAULT;
290 			goto getnodes_err1;
291 		}
292 
293 		uself = (void __user *)node_ids.self;
294 		if (copy_to_user(uself, &self, sizeof(u16))) {
295 			err = -EFAULT;
296 			goto getnodes_err1;
297 		}
298 
299 		if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
300 			err = -EFAULT;
301 			goto getnodes_err1;
302 		}
303 getnodes_err1:
304 		kfree(nodes);
305 getnodes_err2:
306 		return err;
307 	}
308 	case SCIF_REG:
309 	{
310 		struct scif_endpt *priv = f->private_data;
311 		struct scifioctl_reg reg;
312 		off_t ret;
313 
314 		if (copy_from_user(&reg, argp, sizeof(reg))) {
315 			err = -EFAULT;
316 			goto reg_err;
317 		}
318 		if (reg.flags & SCIF_MAP_KERNEL) {
319 			err = -EINVAL;
320 			goto reg_err;
321 		}
322 		ret = scif_register(priv, (void *)reg.addr, reg.len,
323 				    reg.offset, reg.prot, reg.flags);
324 		if (ret < 0) {
325 			err = (int)ret;
326 			goto reg_err;
327 		}
328 
329 		if (copy_to_user(&((struct scifioctl_reg __user *)argp)
330 				 ->out_offset, &ret, sizeof(reg.out_offset))) {
331 			err = -EFAULT;
332 			goto reg_err;
333 		}
334 		err = 0;
335 reg_err:
336 		scif_err_debug(err, "scif_register");
337 		return err;
338 	}
339 	case SCIF_UNREG:
340 	{
341 		struct scif_endpt *priv = f->private_data;
342 		struct scifioctl_unreg unreg;
343 
344 		if (copy_from_user(&unreg, argp, sizeof(unreg))) {
345 			err = -EFAULT;
346 			goto unreg_err;
347 		}
348 		err = scif_unregister(priv, unreg.offset, unreg.len);
349 unreg_err:
350 		scif_err_debug(err, "scif_unregister");
351 		return err;
352 	}
353 	case SCIF_READFROM:
354 	{
355 		struct scif_endpt *priv = f->private_data;
356 		struct scifioctl_copy copy;
357 
358 		if (copy_from_user(&copy, argp, sizeof(copy))) {
359 			err = -EFAULT;
360 			goto readfrom_err;
361 		}
362 		err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
363 				    copy.flags);
364 readfrom_err:
365 		scif_err_debug(err, "scif_readfrom");
366 		return err;
367 	}
368 	case SCIF_WRITETO:
369 	{
370 		struct scif_endpt *priv = f->private_data;
371 		struct scifioctl_copy copy;
372 
373 		if (copy_from_user(&copy, argp, sizeof(copy))) {
374 			err = -EFAULT;
375 			goto writeto_err;
376 		}
377 		err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
378 				   copy.flags);
379 writeto_err:
380 		scif_err_debug(err, "scif_writeto");
381 		return err;
382 	}
383 	case SCIF_VREADFROM:
384 	{
385 		struct scif_endpt *priv = f->private_data;
386 		struct scifioctl_copy copy;
387 
388 		if (copy_from_user(&copy, argp, sizeof(copy))) {
389 			err = -EFAULT;
390 			goto vreadfrom_err;
391 		}
392 		err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
393 				     copy.roffset, copy.flags);
394 vreadfrom_err:
395 		scif_err_debug(err, "scif_vreadfrom");
396 		return err;
397 	}
398 	case SCIF_VWRITETO:
399 	{
400 		struct scif_endpt *priv = f->private_data;
401 		struct scifioctl_copy copy;
402 
403 		if (copy_from_user(&copy, argp, sizeof(copy))) {
404 			err = -EFAULT;
405 			goto vwriteto_err;
406 		}
407 		err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
408 				    copy.roffset, copy.flags);
409 vwriteto_err:
410 		scif_err_debug(err, "scif_vwriteto");
411 		return err;
412 	}
413 	case SCIF_FENCE_MARK:
414 	{
415 		struct scif_endpt *priv = f->private_data;
416 		struct scifioctl_fence_mark mark;
417 		int tmp_mark = 0;
418 
419 		if (copy_from_user(&mark, argp, sizeof(mark))) {
420 			err = -EFAULT;
421 			goto fence_mark_err;
422 		}
423 		err = scif_fence_mark(priv, mark.flags, &tmp_mark);
424 		if (err)
425 			goto fence_mark_err;
426 		if (copy_to_user((void __user *)mark.mark, &tmp_mark,
427 				 sizeof(tmp_mark))) {
428 			err = -EFAULT;
429 			goto fence_mark_err;
430 		}
431 fence_mark_err:
432 		scif_err_debug(err, "scif_fence_mark");
433 		return err;
434 	}
435 	case SCIF_FENCE_WAIT:
436 	{
437 		struct scif_endpt *priv = f->private_data;
438 
439 		err = scif_fence_wait(priv, arg);
440 		scif_err_debug(err, "scif_fence_wait");
441 		return err;
442 	}
443 	case SCIF_FENCE_SIGNAL:
444 	{
445 		struct scif_endpt *priv = f->private_data;
446 		struct scifioctl_fence_signal signal;
447 
448 		if (copy_from_user(&signal, argp, sizeof(signal))) {
449 			err = -EFAULT;
450 			goto fence_signal_err;
451 		}
452 
453 		err = scif_fence_signal(priv, signal.loff, signal.lval,
454 					signal.roff, signal.rval, signal.flags);
455 fence_signal_err:
456 		scif_err_debug(err, "scif_fence_signal");
457 		return err;
458 	}
459 	}
460 	return -EINVAL;
461 }
462 
463 const struct file_operations scif_fops = {
464 	.open = scif_fdopen,
465 	.release = scif_fdclose,
466 	.unlocked_ioctl = scif_fdioctl,
467 	.mmap = scif_fdmmap,
468 	.poll = scif_fdpoll,
469 	.flush = scif_fdflush,
470 	.owner = THIS_MODULE,
471 };
472