1 /*
2 * User-space I/O driver support for HID subsystem
3 * Copyright (c) 2012 David Herrmann
4 */
5
6 /*
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13 #include <linux/atomic.h>
14 #include <linux/compat.h>
15 #include <linux/cred.h>
16 #include <linux/device.h>
17 #include <linux/fs.h>
18 #include <linux/hid.h>
19 #include <linux/input.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/poll.h>
24 #include <linux/sched.h>
25 #include <linux/spinlock.h>
26 #include <linux/uhid.h>
27 #include <linux/wait.h>
28
29 #define UHID_NAME "uhid"
30 #define UHID_BUFSIZE 32
31
32 struct uhid_device {
33 struct mutex devlock;
34
35 /* This flag tracks whether the HID device is usable for commands from
36 * userspace. The flag is already set before hid_add_device(), which
37 * runs in workqueue context, to allow hid_add_device() to communicate
38 * with userspace.
39 * However, if hid_add_device() fails, the flag is cleared without
40 * holding devlock.
41 * We guarantee that if @running changes from true to false while you're
42 * holding @devlock, it's still fine to access @hid.
43 */
44 bool running;
45
46 __u8 *rd_data;
47 uint rd_size;
48
49 /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
50 struct hid_device *hid;
51 struct uhid_event input_buf;
52
53 wait_queue_head_t waitq;
54 spinlock_t qlock;
55 __u8 head;
56 __u8 tail;
57 struct uhid_event *outq[UHID_BUFSIZE];
58
59 /* blocking GET_REPORT support; state changes protected by qlock */
60 struct mutex report_lock;
61 wait_queue_head_t report_wait;
62 bool report_running;
63 u32 report_id;
64 u32 report_type;
65 struct uhid_event report_buf;
66 struct work_struct worker;
67 };
68
69 static struct miscdevice uhid_misc;
70
uhid_device_add_worker(struct work_struct * work)71 static void uhid_device_add_worker(struct work_struct *work)
72 {
73 struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
74 int ret;
75
76 ret = hid_add_device(uhid->hid);
77 if (ret) {
78 hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
79
80 /* We used to call hid_destroy_device() here, but that's really
81 * messy to get right because we have to coordinate with
82 * concurrent writes from userspace that might be in the middle
83 * of using uhid->hid.
84 * Just leave uhid->hid as-is for now, and clean it up when
85 * userspace tries to close or reinitialize the uhid instance.
86 *
87 * However, we do have to clear the ->running flag and do a
88 * wakeup to make sure userspace knows that the device is gone.
89 */
90 uhid->running = false;
91 wake_up_interruptible(&uhid->report_wait);
92 }
93 }
94
uhid_queue(struct uhid_device * uhid,struct uhid_event * ev)95 static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
96 {
97 __u8 newhead;
98
99 newhead = (uhid->head + 1) % UHID_BUFSIZE;
100
101 if (newhead != uhid->tail) {
102 uhid->outq[uhid->head] = ev;
103 uhid->head = newhead;
104 wake_up_interruptible(&uhid->waitq);
105 } else {
106 hid_warn(uhid->hid, "Output queue is full\n");
107 kfree(ev);
108 }
109 }
110
uhid_queue_event(struct uhid_device * uhid,__u32 event)111 static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
112 {
113 unsigned long flags;
114 struct uhid_event *ev;
115
116 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
117 if (!ev)
118 return -ENOMEM;
119
120 ev->type = event;
121
122 spin_lock_irqsave(&uhid->qlock, flags);
123 uhid_queue(uhid, ev);
124 spin_unlock_irqrestore(&uhid->qlock, flags);
125
126 return 0;
127 }
128
uhid_hid_start(struct hid_device * hid)129 static int uhid_hid_start(struct hid_device *hid)
130 {
131 struct uhid_device *uhid = hid->driver_data;
132 struct uhid_event *ev;
133 unsigned long flags;
134
135 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
136 if (!ev)
137 return -ENOMEM;
138
139 ev->type = UHID_START;
140
141 if (hid->report_enum[HID_FEATURE_REPORT].numbered)
142 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS;
143 if (hid->report_enum[HID_OUTPUT_REPORT].numbered)
144 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS;
145 if (hid->report_enum[HID_INPUT_REPORT].numbered)
146 ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS;
147
148 spin_lock_irqsave(&uhid->qlock, flags);
149 uhid_queue(uhid, ev);
150 spin_unlock_irqrestore(&uhid->qlock, flags);
151
152 return 0;
153 }
154
uhid_hid_stop(struct hid_device * hid)155 static void uhid_hid_stop(struct hid_device *hid)
156 {
157 struct uhid_device *uhid = hid->driver_data;
158
159 hid->claimed = 0;
160 uhid_queue_event(uhid, UHID_STOP);
161 }
162
uhid_hid_open(struct hid_device * hid)163 static int uhid_hid_open(struct hid_device *hid)
164 {
165 struct uhid_device *uhid = hid->driver_data;
166
167 return uhid_queue_event(uhid, UHID_OPEN);
168 }
169
uhid_hid_close(struct hid_device * hid)170 static void uhid_hid_close(struct hid_device *hid)
171 {
172 struct uhid_device *uhid = hid->driver_data;
173
174 uhid_queue_event(uhid, UHID_CLOSE);
175 }
176
uhid_hid_parse(struct hid_device * hid)177 static int uhid_hid_parse(struct hid_device *hid)
178 {
179 struct uhid_device *uhid = hid->driver_data;
180
181 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
182 }
183
184 /* must be called with report_lock held */
__uhid_report_queue_and_wait(struct uhid_device * uhid,struct uhid_event * ev,__u32 * report_id)185 static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
186 struct uhid_event *ev,
187 __u32 *report_id)
188 {
189 unsigned long flags;
190 int ret;
191
192 spin_lock_irqsave(&uhid->qlock, flags);
193 *report_id = ++uhid->report_id;
194 uhid->report_type = ev->type + 1;
195 uhid->report_running = true;
196 uhid_queue(uhid, ev);
197 spin_unlock_irqrestore(&uhid->qlock, flags);
198
199 ret = wait_event_interruptible_timeout(uhid->report_wait,
200 !uhid->report_running || !uhid->running,
201 5 * HZ);
202 if (!ret || !uhid->running || uhid->report_running)
203 ret = -EIO;
204 else if (ret < 0)
205 ret = -ERESTARTSYS;
206 else
207 ret = 0;
208
209 uhid->report_running = false;
210
211 return ret;
212 }
213
uhid_report_wake_up(struct uhid_device * uhid,u32 id,const struct uhid_event * ev)214 static void uhid_report_wake_up(struct uhid_device *uhid, u32 id,
215 const struct uhid_event *ev)
216 {
217 unsigned long flags;
218
219 spin_lock_irqsave(&uhid->qlock, flags);
220
221 /* id for old report; drop it silently */
222 if (uhid->report_type != ev->type || uhid->report_id != id)
223 goto unlock;
224 if (!uhid->report_running)
225 goto unlock;
226
227 memcpy(&uhid->report_buf, ev, sizeof(*ev));
228 uhid->report_running = false;
229 wake_up_interruptible(&uhid->report_wait);
230
231 unlock:
232 spin_unlock_irqrestore(&uhid->qlock, flags);
233 }
234
uhid_hid_get_report(struct hid_device * hid,unsigned char rnum,u8 * buf,size_t count,u8 rtype)235 static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
236 u8 *buf, size_t count, u8 rtype)
237 {
238 struct uhid_device *uhid = hid->driver_data;
239 struct uhid_get_report_reply_req *req;
240 struct uhid_event *ev;
241 int ret;
242
243 if (!uhid->running)
244 return -EIO;
245
246 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
247 if (!ev)
248 return -ENOMEM;
249
250 ev->type = UHID_GET_REPORT;
251 ev->u.get_report.rnum = rnum;
252 ev->u.get_report.rtype = rtype;
253
254 ret = mutex_lock_interruptible(&uhid->report_lock);
255 if (ret) {
256 kfree(ev);
257 return ret;
258 }
259
260 /* this _always_ takes ownership of @ev */
261 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
262 if (ret)
263 goto unlock;
264
265 req = &uhid->report_buf.u.get_report_reply;
266 if (req->err) {
267 ret = -EIO;
268 } else {
269 ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
270 memcpy(buf, req->data, ret);
271 }
272
273 unlock:
274 mutex_unlock(&uhid->report_lock);
275 return ret;
276 }
277
uhid_hid_set_report(struct hid_device * hid,unsigned char rnum,const u8 * buf,size_t count,u8 rtype)278 static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
279 const u8 *buf, size_t count, u8 rtype)
280 {
281 struct uhid_device *uhid = hid->driver_data;
282 struct uhid_event *ev;
283 int ret;
284
285 if (!uhid->running || count > UHID_DATA_MAX)
286 return -EIO;
287
288 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
289 if (!ev)
290 return -ENOMEM;
291
292 ev->type = UHID_SET_REPORT;
293 ev->u.set_report.rnum = rnum;
294 ev->u.set_report.rtype = rtype;
295 ev->u.set_report.size = count;
296 memcpy(ev->u.set_report.data, buf, count);
297
298 ret = mutex_lock_interruptible(&uhid->report_lock);
299 if (ret) {
300 kfree(ev);
301 return ret;
302 }
303
304 /* this _always_ takes ownership of @ev */
305 ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
306 if (ret)
307 goto unlock;
308
309 if (uhid->report_buf.u.set_report_reply.err)
310 ret = -EIO;
311 else
312 ret = count;
313
314 unlock:
315 mutex_unlock(&uhid->report_lock);
316 return ret;
317 }
318
uhid_hid_raw_request(struct hid_device * hid,unsigned char reportnum,__u8 * buf,size_t len,unsigned char rtype,int reqtype)319 static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
320 __u8 *buf, size_t len, unsigned char rtype,
321 int reqtype)
322 {
323 u8 u_rtype;
324
325 switch (rtype) {
326 case HID_FEATURE_REPORT:
327 u_rtype = UHID_FEATURE_REPORT;
328 break;
329 case HID_OUTPUT_REPORT:
330 u_rtype = UHID_OUTPUT_REPORT;
331 break;
332 case HID_INPUT_REPORT:
333 u_rtype = UHID_INPUT_REPORT;
334 break;
335 default:
336 return -EINVAL;
337 }
338
339 switch (reqtype) {
340 case HID_REQ_GET_REPORT:
341 return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
342 case HID_REQ_SET_REPORT:
343 return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
344 default:
345 return -EIO;
346 }
347 }
348
uhid_hid_output_raw(struct hid_device * hid,__u8 * buf,size_t count,unsigned char report_type)349 static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
350 unsigned char report_type)
351 {
352 struct uhid_device *uhid = hid->driver_data;
353 __u8 rtype;
354 unsigned long flags;
355 struct uhid_event *ev;
356
357 switch (report_type) {
358 case HID_FEATURE_REPORT:
359 rtype = UHID_FEATURE_REPORT;
360 break;
361 case HID_OUTPUT_REPORT:
362 rtype = UHID_OUTPUT_REPORT;
363 break;
364 default:
365 return -EINVAL;
366 }
367
368 if (count < 1 || count > UHID_DATA_MAX)
369 return -EINVAL;
370
371 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
372 if (!ev)
373 return -ENOMEM;
374
375 ev->type = UHID_OUTPUT;
376 ev->u.output.size = count;
377 ev->u.output.rtype = rtype;
378 memcpy(ev->u.output.data, buf, count);
379
380 spin_lock_irqsave(&uhid->qlock, flags);
381 uhid_queue(uhid, ev);
382 spin_unlock_irqrestore(&uhid->qlock, flags);
383
384 return count;
385 }
386
uhid_hid_output_report(struct hid_device * hid,__u8 * buf,size_t count)387 static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
388 size_t count)
389 {
390 return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
391 }
392
393 struct hid_ll_driver uhid_hid_driver = {
394 .start = uhid_hid_start,
395 .stop = uhid_hid_stop,
396 .open = uhid_hid_open,
397 .close = uhid_hid_close,
398 .parse = uhid_hid_parse,
399 .raw_request = uhid_hid_raw_request,
400 .output_report = uhid_hid_output_report,
401 .max_buffer_size = UHID_DATA_MAX,
402 };
403 EXPORT_SYMBOL_GPL(uhid_hid_driver);
404
405 #ifdef CONFIG_COMPAT
406
407 /* Apparently we haven't stepped on these rakes enough times yet. */
408 struct uhid_create_req_compat {
409 __u8 name[128];
410 __u8 phys[64];
411 __u8 uniq[64];
412
413 compat_uptr_t rd_data;
414 __u16 rd_size;
415
416 __u16 bus;
417 __u32 vendor;
418 __u32 product;
419 __u32 version;
420 __u32 country;
421 } __attribute__((__packed__));
422
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)423 static int uhid_event_from_user(const char __user *buffer, size_t len,
424 struct uhid_event *event)
425 {
426 if (in_compat_syscall()) {
427 u32 type;
428
429 if (get_user(type, buffer))
430 return -EFAULT;
431
432 if (type == UHID_CREATE) {
433 /*
434 * This is our messed up request with compat pointer.
435 * It is largish (more than 256 bytes) so we better
436 * allocate it from the heap.
437 */
438 struct uhid_create_req_compat *compat;
439
440 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
441 if (!compat)
442 return -ENOMEM;
443
444 buffer += sizeof(type);
445 len -= sizeof(type);
446 if (copy_from_user(compat, buffer,
447 min(len, sizeof(*compat)))) {
448 kfree(compat);
449 return -EFAULT;
450 }
451
452 /* Shuffle the data over to proper structure */
453 event->type = type;
454
455 memcpy(event->u.create.name, compat->name,
456 sizeof(compat->name));
457 memcpy(event->u.create.phys, compat->phys,
458 sizeof(compat->phys));
459 memcpy(event->u.create.uniq, compat->uniq,
460 sizeof(compat->uniq));
461
462 event->u.create.rd_data = compat_ptr(compat->rd_data);
463 event->u.create.rd_size = compat->rd_size;
464
465 event->u.create.bus = compat->bus;
466 event->u.create.vendor = compat->vendor;
467 event->u.create.product = compat->product;
468 event->u.create.version = compat->version;
469 event->u.create.country = compat->country;
470
471 kfree(compat);
472 return 0;
473 }
474 /* All others can be copied directly */
475 }
476
477 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
478 return -EFAULT;
479
480 return 0;
481 }
482 #else
uhid_event_from_user(const char __user * buffer,size_t len,struct uhid_event * event)483 static int uhid_event_from_user(const char __user *buffer, size_t len,
484 struct uhid_event *event)
485 {
486 if (copy_from_user(event, buffer, min(len, sizeof(*event))))
487 return -EFAULT;
488
489 return 0;
490 }
491 #endif
492
uhid_dev_create2(struct uhid_device * uhid,const struct uhid_event * ev)493 static int uhid_dev_create2(struct uhid_device *uhid,
494 const struct uhid_event *ev)
495 {
496 struct hid_device *hid;
497 size_t rd_size, len;
498 void *rd_data;
499 int ret;
500
501 if (uhid->hid)
502 return -EALREADY;
503
504 rd_size = ev->u.create2.rd_size;
505 if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE)
506 return -EINVAL;
507
508 rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL);
509 if (!rd_data)
510 return -ENOMEM;
511
512 uhid->rd_size = rd_size;
513 uhid->rd_data = rd_data;
514
515 hid = hid_allocate_device();
516 if (IS_ERR(hid)) {
517 ret = PTR_ERR(hid);
518 goto err_free;
519 }
520
521 /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
522 len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
523 strncpy(hid->name, ev->u.create2.name, len);
524 len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
525 strncpy(hid->phys, ev->u.create2.phys, len);
526 len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
527 strncpy(hid->uniq, ev->u.create2.uniq, len);
528
529 hid->ll_driver = &uhid_hid_driver;
530 hid->bus = ev->u.create2.bus;
531 hid->vendor = ev->u.create2.vendor;
532 hid->product = ev->u.create2.product;
533 hid->version = ev->u.create2.version;
534 hid->country = ev->u.create2.country;
535 hid->driver_data = uhid;
536 hid->dev.parent = uhid_misc.this_device;
537
538 uhid->hid = hid;
539 uhid->running = true;
540
541 /* Adding of a HID device is done through a worker, to allow HID drivers
542 * which use feature requests during .probe to work, without they would
543 * be blocked on devlock, which is held by uhid_char_write.
544 */
545 schedule_work(&uhid->worker);
546
547 return 0;
548
549 err_free:
550 kfree(uhid->rd_data);
551 uhid->rd_data = NULL;
552 uhid->rd_size = 0;
553 return ret;
554 }
555
uhid_dev_create(struct uhid_device * uhid,struct uhid_event * ev)556 static int uhid_dev_create(struct uhid_device *uhid,
557 struct uhid_event *ev)
558 {
559 struct uhid_create_req orig;
560
561 orig = ev->u.create;
562
563 if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE)
564 return -EINVAL;
565 if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size))
566 return -EFAULT;
567
568 memcpy(ev->u.create2.name, orig.name, sizeof(orig.name));
569 memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys));
570 memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq));
571 ev->u.create2.rd_size = orig.rd_size;
572 ev->u.create2.bus = orig.bus;
573 ev->u.create2.vendor = orig.vendor;
574 ev->u.create2.product = orig.product;
575 ev->u.create2.version = orig.version;
576 ev->u.create2.country = orig.country;
577
578 return uhid_dev_create2(uhid, ev);
579 }
580
uhid_dev_destroy(struct uhid_device * uhid)581 static int uhid_dev_destroy(struct uhid_device *uhid)
582 {
583 if (!uhid->hid)
584 return -EINVAL;
585
586 uhid->running = false;
587 wake_up_interruptible(&uhid->report_wait);
588
589 cancel_work_sync(&uhid->worker);
590
591 hid_destroy_device(uhid->hid);
592 uhid->hid = NULL;
593 kfree(uhid->rd_data);
594
595 return 0;
596 }
597
uhid_dev_input(struct uhid_device * uhid,struct uhid_event * ev)598 static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
599 {
600 if (!uhid->running)
601 return -EINVAL;
602
603 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
604 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
605
606 return 0;
607 }
608
uhid_dev_input2(struct uhid_device * uhid,struct uhid_event * ev)609 static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
610 {
611 if (!uhid->running)
612 return -EINVAL;
613
614 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
615 min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0);
616
617 return 0;
618 }
619
uhid_dev_get_report_reply(struct uhid_device * uhid,struct uhid_event * ev)620 static int uhid_dev_get_report_reply(struct uhid_device *uhid,
621 struct uhid_event *ev)
622 {
623 if (!uhid->running)
624 return -EINVAL;
625
626 uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
627 return 0;
628 }
629
uhid_dev_set_report_reply(struct uhid_device * uhid,struct uhid_event * ev)630 static int uhid_dev_set_report_reply(struct uhid_device *uhid,
631 struct uhid_event *ev)
632 {
633 if (!uhid->running)
634 return -EINVAL;
635
636 uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
637 return 0;
638 }
639
uhid_char_open(struct inode * inode,struct file * file)640 static int uhid_char_open(struct inode *inode, struct file *file)
641 {
642 struct uhid_device *uhid;
643
644 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
645 if (!uhid)
646 return -ENOMEM;
647
648 mutex_init(&uhid->devlock);
649 mutex_init(&uhid->report_lock);
650 spin_lock_init(&uhid->qlock);
651 init_waitqueue_head(&uhid->waitq);
652 init_waitqueue_head(&uhid->report_wait);
653 uhid->running = false;
654 INIT_WORK(&uhid->worker, uhid_device_add_worker);
655
656 file->private_data = uhid;
657 nonseekable_open(inode, file);
658
659 return 0;
660 }
661
uhid_char_release(struct inode * inode,struct file * file)662 static int uhid_char_release(struct inode *inode, struct file *file)
663 {
664 struct uhid_device *uhid = file->private_data;
665 unsigned int i;
666
667 uhid_dev_destroy(uhid);
668
669 for (i = 0; i < UHID_BUFSIZE; ++i)
670 kfree(uhid->outq[i]);
671
672 kfree(uhid);
673
674 return 0;
675 }
676
uhid_char_read(struct file * file,char __user * buffer,size_t count,loff_t * ppos)677 static ssize_t uhid_char_read(struct file *file, char __user *buffer,
678 size_t count, loff_t *ppos)
679 {
680 struct uhid_device *uhid = file->private_data;
681 int ret;
682 unsigned long flags;
683 size_t len;
684
685 /* they need at least the "type" member of uhid_event */
686 if (count < sizeof(__u32))
687 return -EINVAL;
688
689 try_again:
690 if (file->f_flags & O_NONBLOCK) {
691 if (uhid->head == uhid->tail)
692 return -EAGAIN;
693 } else {
694 ret = wait_event_interruptible(uhid->waitq,
695 uhid->head != uhid->tail);
696 if (ret)
697 return ret;
698 }
699
700 ret = mutex_lock_interruptible(&uhid->devlock);
701 if (ret)
702 return ret;
703
704 if (uhid->head == uhid->tail) {
705 mutex_unlock(&uhid->devlock);
706 goto try_again;
707 } else {
708 len = min(count, sizeof(**uhid->outq));
709 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
710 ret = -EFAULT;
711 } else {
712 kfree(uhid->outq[uhid->tail]);
713 uhid->outq[uhid->tail] = NULL;
714
715 spin_lock_irqsave(&uhid->qlock, flags);
716 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
717 spin_unlock_irqrestore(&uhid->qlock, flags);
718 }
719 }
720
721 mutex_unlock(&uhid->devlock);
722 return ret ? ret : len;
723 }
724
uhid_char_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)725 static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
726 size_t count, loff_t *ppos)
727 {
728 struct uhid_device *uhid = file->private_data;
729 int ret;
730 size_t len;
731
732 /* we need at least the "type" member of uhid_event */
733 if (count < sizeof(__u32))
734 return -EINVAL;
735
736 ret = mutex_lock_interruptible(&uhid->devlock);
737 if (ret)
738 return ret;
739
740 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
741 len = min(count, sizeof(uhid->input_buf));
742
743 ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
744 if (ret)
745 goto unlock;
746
747 switch (uhid->input_buf.type) {
748 case UHID_CREATE:
749 /*
750 * 'struct uhid_create_req' contains a __user pointer which is
751 * copied from, so it's unsafe to allow this with elevated
752 * privileges (e.g. from a setuid binary) or via kernel_write().
753 */
754 if (file->f_cred != current_cred() || uaccess_kernel()) {
755 pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
756 task_tgid_vnr(current), current->comm);
757 ret = -EACCES;
758 goto unlock;
759 }
760 ret = uhid_dev_create(uhid, &uhid->input_buf);
761 break;
762 case UHID_CREATE2:
763 ret = uhid_dev_create2(uhid, &uhid->input_buf);
764 break;
765 case UHID_DESTROY:
766 ret = uhid_dev_destroy(uhid);
767 break;
768 case UHID_INPUT:
769 ret = uhid_dev_input(uhid, &uhid->input_buf);
770 break;
771 case UHID_INPUT2:
772 ret = uhid_dev_input2(uhid, &uhid->input_buf);
773 break;
774 case UHID_GET_REPORT_REPLY:
775 ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf);
776 break;
777 case UHID_SET_REPORT_REPLY:
778 ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf);
779 break;
780 default:
781 ret = -EOPNOTSUPP;
782 }
783
784 unlock:
785 mutex_unlock(&uhid->devlock);
786
787 /* return "count" not "len" to not confuse the caller */
788 return ret ? ret : count;
789 }
790
uhid_char_poll(struct file * file,poll_table * wait)791 static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
792 {
793 struct uhid_device *uhid = file->private_data;
794 __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
795
796 poll_wait(file, &uhid->waitq, wait);
797
798 if (uhid->head != uhid->tail)
799 mask |= EPOLLIN | EPOLLRDNORM;
800
801 return mask;
802 }
803
804 static const struct file_operations uhid_fops = {
805 .owner = THIS_MODULE,
806 .open = uhid_char_open,
807 .release = uhid_char_release,
808 .read = uhid_char_read,
809 .write = uhid_char_write,
810 .poll = uhid_char_poll,
811 .llseek = no_llseek,
812 };
813
814 static struct miscdevice uhid_misc = {
815 .fops = &uhid_fops,
816 .minor = UHID_MINOR,
817 .name = UHID_NAME,
818 };
819 module_misc_device(uhid_misc);
820
821 MODULE_LICENSE("GPL");
822 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
823 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
824 MODULE_ALIAS_MISCDEV(UHID_MINOR);
825 MODULE_ALIAS("devname:" UHID_NAME);
826