1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains the procedures for the handling of select and poll
4 *
5 * Created for Linux based loosely upon Mathius Lattner's minix
6 * patches by Peter MacDonald. Heavily edited by Linus.
7 *
8 * 4 February 1994
9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10 * flag set in its personality we do *not* modify the given timeout
11 * parameter to reflect time remaining.
12 *
13 * 24 January 2000
14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/rt.h>
21 #include <linux/syscalls.h>
22 #include <linux/export.h>
23 #include <linux/slab.h>
24 #include <linux/poll.h>
25 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/fs.h>
29 #include <linux/rcupdate.h>
30 #include <linux/hrtimer.h>
31 #include <linux/freezer.h>
32 #include <net/busy_poll.h>
33 #include <linux/vmalloc.h>
34
35 #include <linux/uaccess.h>
36
37
38 /*
39 * Estimate expected accuracy in ns from a timeval.
40 *
41 * After quite a bit of churning around, we've settled on
42 * a simple thing of taking 0.1% of the timeout as the
43 * slack, with a cap of 100 msec.
44 * "nice" tasks get a 0.5% slack instead.
45 *
46 * Consider this comment an open invitation to come up with even
47 * better solutions..
48 */
49
50 #define MAX_SLACK (100 * NSEC_PER_MSEC)
51
__estimate_accuracy(struct timespec64 * tv)52 static long __estimate_accuracy(struct timespec64 *tv)
53 {
54 long slack;
55 int divfactor = 1000;
56
57 if (tv->tv_sec < 0)
58 return 0;
59
60 if (task_nice(current) > 0)
61 divfactor = divfactor / 5;
62
63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
64 return MAX_SLACK;
65
66 slack = tv->tv_nsec / divfactor;
67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
68
69 if (slack > MAX_SLACK)
70 return MAX_SLACK;
71
72 return slack;
73 }
74
select_estimate_accuracy(struct timespec64 * tv)75 u64 select_estimate_accuracy(struct timespec64 *tv)
76 {
77 u64 ret;
78 struct timespec64 now;
79
80 /*
81 * Realtime tasks get a slack of 0 for obvious reasons.
82 */
83
84 if (rt_task(current))
85 return 0;
86
87 ktime_get_ts64(&now);
88 now = timespec64_sub(*tv, now);
89 ret = __estimate_accuracy(&now);
90 if (ret < current->timer_slack_ns)
91 return current->timer_slack_ns;
92 return ret;
93 }
94
95
96
97 struct poll_table_page {
98 struct poll_table_page * next;
99 struct poll_table_entry * entry;
100 struct poll_table_entry entries[0];
101 };
102
103 #define POLL_TABLE_FULL(table) \
104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105
106 /*
107 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
108 * I have rewritten this, taking some shortcuts: This code may not be easy to
109 * follow, but it should be free of race-conditions, and it's practical. If you
110 * understand what I'm doing here, then you understand how the linux
111 * sleep/wakeup mechanism works.
112 *
113 * Two very simple procedures, poll_wait() and poll_freewait() make all the
114 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
115 * as all select/poll functions have to call it to add an entry to the
116 * poll table.
117 */
118 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
119 poll_table *p);
120
poll_initwait(struct poll_wqueues * pwq)121 void poll_initwait(struct poll_wqueues *pwq)
122 {
123 init_poll_funcptr(&pwq->pt, __pollwait);
124 pwq->polling_task = current;
125 pwq->triggered = 0;
126 pwq->error = 0;
127 pwq->table = NULL;
128 pwq->inline_index = 0;
129 }
130 EXPORT_SYMBOL(poll_initwait);
131
free_poll_entry(struct poll_table_entry * entry)132 static void free_poll_entry(struct poll_table_entry *entry)
133 {
134 remove_wait_queue(entry->wait_address, &entry->wait);
135 fput(entry->filp);
136 }
137
poll_freewait(struct poll_wqueues * pwq)138 void poll_freewait(struct poll_wqueues *pwq)
139 {
140 struct poll_table_page * p = pwq->table;
141 int i;
142 for (i = 0; i < pwq->inline_index; i++)
143 free_poll_entry(pwq->inline_entries + i);
144 while (p) {
145 struct poll_table_entry * entry;
146 struct poll_table_page *old;
147
148 entry = p->entry;
149 do {
150 entry--;
151 free_poll_entry(entry);
152 } while (entry > p->entries);
153 old = p;
154 p = p->next;
155 free_page((unsigned long) old);
156 }
157 }
158 EXPORT_SYMBOL(poll_freewait);
159
poll_get_entry(struct poll_wqueues * p)160 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
161 {
162 struct poll_table_page *table = p->table;
163
164 if (p->inline_index < N_INLINE_POLL_ENTRIES)
165 return p->inline_entries + p->inline_index++;
166
167 if (!table || POLL_TABLE_FULL(table)) {
168 struct poll_table_page *new_table;
169
170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
171 if (!new_table) {
172 p->error = -ENOMEM;
173 return NULL;
174 }
175 new_table->entry = new_table->entries;
176 new_table->next = table;
177 p->table = new_table;
178 table = new_table;
179 }
180
181 return table->entry++;
182 }
183
__pollwake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)184 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
185 {
186 struct poll_wqueues *pwq = wait->private;
187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
188
189 /*
190 * Although this function is called under waitqueue lock, LOCK
191 * doesn't imply write barrier and the users expect write
192 * barrier semantics on wakeup functions. The following
193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
194 * and is paired with smp_store_mb() in poll_schedule_timeout.
195 */
196 smp_wmb();
197 pwq->triggered = 1;
198
199 /*
200 * Perform the default wake up operation using a dummy
201 * waitqueue.
202 *
203 * TODO: This is hacky but there currently is no interface to
204 * pass in @sync. @sync is scheduled to be removed and once
205 * that happens, wake_up_process() can be used directly.
206 */
207 return default_wake_function(&dummy_wait, mode, sync, key);
208 }
209
pollwake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)210 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
211 {
212 struct poll_table_entry *entry;
213
214 entry = container_of(wait, struct poll_table_entry, wait);
215 if (key && !(key_to_poll(key) & entry->key))
216 return 0;
217 return __pollwake(wait, mode, sync, key);
218 }
219
220 /* Add a new entry */
__pollwait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)221 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
222 poll_table *p)
223 {
224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
225 struct poll_table_entry *entry = poll_get_entry(pwq);
226 if (!entry)
227 return;
228 entry->filp = get_file(filp);
229 entry->wait_address = wait_address;
230 entry->key = p->_key;
231 init_waitqueue_func_entry(&entry->wait, pollwake);
232 entry->wait.private = pwq;
233 add_wait_queue(wait_address, &entry->wait);
234 }
235
poll_schedule_timeout(struct poll_wqueues * pwq,int state,ktime_t * expires,unsigned long slack)236 static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
237 ktime_t *expires, unsigned long slack)
238 {
239 int rc = -EINTR;
240
241 set_current_state(state);
242 if (!pwq->triggered)
243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
244 __set_current_state(TASK_RUNNING);
245
246 /*
247 * Prepare for the next iteration.
248 *
249 * The following smp_store_mb() serves two purposes. First, it's
250 * the counterpart rmb of the wmb in pollwake() such that data
251 * written before wake up is always visible after wake up.
252 * Second, the full barrier guarantees that triggered clearing
253 * doesn't pass event check of the next iteration. Note that
254 * this problem doesn't exist for the first iteration as
255 * add_wait_queue() has full barrier semantics.
256 */
257 smp_store_mb(pwq->triggered, 0);
258
259 return rc;
260 }
261
262 /**
263 * poll_select_set_timeout - helper function to setup the timeout value
264 * @to: pointer to timespec64 variable for the final timeout
265 * @sec: seconds (from user space)
266 * @nsec: nanoseconds (from user space)
267 *
268 * Note, we do not use a timespec for the user space value here, That
269 * way we can use the function for timeval and compat interfaces as well.
270 *
271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272 */
poll_select_set_timeout(struct timespec64 * to,time64_t sec,long nsec)273 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
274 {
275 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
276
277 if (!timespec64_valid(&ts))
278 return -EINVAL;
279
280 /* Optimize for the zero timeout value here */
281 if (!sec && !nsec) {
282 to->tv_sec = to->tv_nsec = 0;
283 } else {
284 ktime_get_ts64(to);
285 *to = timespec64_add_safe(*to, ts);
286 }
287 return 0;
288 }
289
poll_select_copy_remaining(struct timespec64 * end_time,void __user * p,int timeval,int ret)290 static int poll_select_copy_remaining(struct timespec64 *end_time,
291 void __user *p,
292 int timeval, int ret)
293 {
294 struct timespec64 rts;
295 struct timeval rtv;
296
297 if (!p)
298 return ret;
299
300 if (current->personality & STICKY_TIMEOUTS)
301 goto sticky;
302
303 /* No update for zero timeout */
304 if (!end_time->tv_sec && !end_time->tv_nsec)
305 return ret;
306
307 ktime_get_ts64(&rts);
308 rts = timespec64_sub(*end_time, rts);
309 if (rts.tv_sec < 0)
310 rts.tv_sec = rts.tv_nsec = 0;
311
312
313 if (timeval) {
314 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
315 memset(&rtv, 0, sizeof(rtv));
316 rtv.tv_sec = rts.tv_sec;
317 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
318
319 if (!copy_to_user(p, &rtv, sizeof(rtv)))
320 return ret;
321
322 } else if (!put_timespec64(&rts, p))
323 return ret;
324
325 /*
326 * If an application puts its timeval in read-only memory, we
327 * don't want the Linux-specific update to the timeval to
328 * cause a fault after the select has completed
329 * successfully. However, because we're not updating the
330 * timeval, we can't restart the system call.
331 */
332
333 sticky:
334 if (ret == -ERESTARTNOHAND)
335 ret = -EINTR;
336 return ret;
337 }
338
339 /*
340 * Scalable version of the fd_set.
341 */
342
343 typedef struct {
344 unsigned long *in, *out, *ex;
345 unsigned long *res_in, *res_out, *res_ex;
346 } fd_set_bits;
347
348 /*
349 * How many longwords for "nr" bits?
350 */
351 #define FDS_BITPERLONG (8*sizeof(long))
352 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
353 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
354
355 /*
356 * We do a VERIFY_WRITE here even though we are only reading this time:
357 * we'll write to it eventually..
358 *
359 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
360 */
361 static inline
get_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)362 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
363 {
364 nr = FDS_BYTES(nr);
365 if (ufdset)
366 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
367
368 memset(fdset, 0, nr);
369 return 0;
370 }
371
372 static inline unsigned long __must_check
set_fd_set(unsigned long nr,void __user * ufdset,unsigned long * fdset)373 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
374 {
375 if (ufdset)
376 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
377 return 0;
378 }
379
380 static inline
zero_fd_set(unsigned long nr,unsigned long * fdset)381 void zero_fd_set(unsigned long nr, unsigned long *fdset)
382 {
383 memset(fdset, 0, FDS_BYTES(nr));
384 }
385
386 #define FDS_IN(fds, n) (fds->in + n)
387 #define FDS_OUT(fds, n) (fds->out + n)
388 #define FDS_EX(fds, n) (fds->ex + n)
389
390 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
391
max_select_fd(unsigned long n,fd_set_bits * fds)392 static int max_select_fd(unsigned long n, fd_set_bits *fds)
393 {
394 unsigned long *open_fds;
395 unsigned long set;
396 int max;
397 struct fdtable *fdt;
398
399 /* handle last in-complete long-word first */
400 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
401 n /= BITS_PER_LONG;
402 fdt = files_fdtable(current->files);
403 open_fds = fdt->open_fds + n;
404 max = 0;
405 if (set) {
406 set &= BITS(fds, n);
407 if (set) {
408 if (!(set & ~*open_fds))
409 goto get_max;
410 return -EBADF;
411 }
412 }
413 while (n) {
414 open_fds--;
415 n--;
416 set = BITS(fds, n);
417 if (!set)
418 continue;
419 if (set & ~*open_fds)
420 return -EBADF;
421 if (max)
422 continue;
423 get_max:
424 do {
425 max++;
426 set >>= 1;
427 } while (set);
428 max += n * BITS_PER_LONG;
429 }
430
431 return max;
432 }
433
434 #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
435 EPOLLNVAL)
436 #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
437 EPOLLNVAL)
438 #define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
439
wait_key_set(poll_table * wait,unsigned long in,unsigned long out,unsigned long bit,__poll_t ll_flag)440 static inline void wait_key_set(poll_table *wait, unsigned long in,
441 unsigned long out, unsigned long bit,
442 __poll_t ll_flag)
443 {
444 wait->_key = POLLEX_SET | ll_flag;
445 if (in & bit)
446 wait->_key |= POLLIN_SET;
447 if (out & bit)
448 wait->_key |= POLLOUT_SET;
449 }
450
do_select(int n,fd_set_bits * fds,struct timespec64 * end_time)451 static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
452 {
453 ktime_t expire, *to = NULL;
454 struct poll_wqueues table;
455 poll_table *wait;
456 int retval, i, timed_out = 0;
457 u64 slack = 0;
458 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
459 unsigned long busy_start = 0;
460
461 rcu_read_lock();
462 retval = max_select_fd(n, fds);
463 rcu_read_unlock();
464
465 if (retval < 0)
466 return retval;
467 n = retval;
468
469 poll_initwait(&table);
470 wait = &table.pt;
471 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
472 wait->_qproc = NULL;
473 timed_out = 1;
474 }
475
476 if (end_time && !timed_out)
477 slack = select_estimate_accuracy(end_time);
478
479 retval = 0;
480 for (;;) {
481 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
482 bool can_busy_loop = false;
483
484 inp = fds->in; outp = fds->out; exp = fds->ex;
485 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
486
487 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
488 unsigned long in, out, ex, all_bits, bit = 1, j;
489 unsigned long res_in = 0, res_out = 0, res_ex = 0;
490 __poll_t mask;
491
492 in = *inp++; out = *outp++; ex = *exp++;
493 all_bits = in | out | ex;
494 if (all_bits == 0) {
495 i += BITS_PER_LONG;
496 continue;
497 }
498
499 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
500 struct fd f;
501 if (i >= n)
502 break;
503 if (!(bit & all_bits))
504 continue;
505 mask = EPOLLNVAL;
506 f = fdget(i);
507 if (f.file) {
508 wait_key_set(wait, in, out, bit,
509 busy_flag);
510 mask = vfs_poll(f.file, wait);
511
512 fdput(f);
513 }
514 if ((mask & POLLIN_SET) && (in & bit)) {
515 res_in |= bit;
516 retval++;
517 wait->_qproc = NULL;
518 }
519 if ((mask & POLLOUT_SET) && (out & bit)) {
520 res_out |= bit;
521 retval++;
522 wait->_qproc = NULL;
523 }
524 if ((mask & POLLEX_SET) && (ex & bit)) {
525 res_ex |= bit;
526 retval++;
527 wait->_qproc = NULL;
528 }
529 /* got something, stop busy polling */
530 if (retval) {
531 can_busy_loop = false;
532 busy_flag = 0;
533
534 /*
535 * only remember a returned
536 * POLL_BUSY_LOOP if we asked for it
537 */
538 } else if (busy_flag & mask)
539 can_busy_loop = true;
540
541 }
542 if (res_in)
543 *rinp = res_in;
544 if (res_out)
545 *routp = res_out;
546 if (res_ex)
547 *rexp = res_ex;
548 cond_resched();
549 }
550 wait->_qproc = NULL;
551 if (retval || timed_out || signal_pending(current))
552 break;
553 if (table.error) {
554 retval = table.error;
555 break;
556 }
557
558 /* only if found POLL_BUSY_LOOP sockets && not out of time */
559 if (can_busy_loop && !need_resched()) {
560 if (!busy_start) {
561 busy_start = busy_loop_current_time();
562 continue;
563 }
564 if (!busy_loop_timeout(busy_start))
565 continue;
566 }
567 busy_flag = 0;
568
569 /*
570 * If this is the first loop and we have a timeout
571 * given, then we convert to ktime_t and set the to
572 * pointer to the expiry value.
573 */
574 if (end_time && !to) {
575 expire = timespec64_to_ktime(*end_time);
576 to = &expire;
577 }
578
579 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
580 to, slack))
581 timed_out = 1;
582 }
583
584 poll_freewait(&table);
585
586 return retval;
587 }
588
589 /*
590 * We can actually return ERESTARTSYS instead of EINTR, but I'd
591 * like to be certain this leads to no problems. So I return
592 * EINTR just for safety.
593 *
594 * Update: ERESTARTSYS breaks at least the xview clock binary, so
595 * I'm trying ERESTARTNOHAND which restart only when you want to.
596 */
core_sys_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec64 * end_time)597 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
598 fd_set __user *exp, struct timespec64 *end_time)
599 {
600 fd_set_bits fds;
601 void *bits;
602 int ret, max_fds;
603 size_t size, alloc_size;
604 struct fdtable *fdt;
605 /* Allocate small arguments on the stack to save memory and be faster */
606 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
607
608 ret = -EINVAL;
609 if (n < 0)
610 goto out_nofds;
611
612 /* max_fds can increase, so grab it once to avoid race */
613 rcu_read_lock();
614 fdt = files_fdtable(current->files);
615 max_fds = fdt->max_fds;
616 rcu_read_unlock();
617 if (n > max_fds)
618 n = max_fds;
619
620 /*
621 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
622 * since we used fdset we need to allocate memory in units of
623 * long-words.
624 */
625 size = FDS_BYTES(n);
626 bits = stack_fds;
627 if (size > sizeof(stack_fds) / 6) {
628 /* Not enough space in on-stack array; must use kmalloc */
629 ret = -ENOMEM;
630 if (size > (SIZE_MAX / 6))
631 goto out_nofds;
632
633 alloc_size = 6 * size;
634 bits = kvmalloc(alloc_size, GFP_KERNEL);
635 if (!bits)
636 goto out_nofds;
637 }
638 fds.in = bits;
639 fds.out = bits + size;
640 fds.ex = bits + 2*size;
641 fds.res_in = bits + 3*size;
642 fds.res_out = bits + 4*size;
643 fds.res_ex = bits + 5*size;
644
645 if ((ret = get_fd_set(n, inp, fds.in)) ||
646 (ret = get_fd_set(n, outp, fds.out)) ||
647 (ret = get_fd_set(n, exp, fds.ex)))
648 goto out;
649 zero_fd_set(n, fds.res_in);
650 zero_fd_set(n, fds.res_out);
651 zero_fd_set(n, fds.res_ex);
652
653 ret = do_select(n, &fds, end_time);
654
655 if (ret < 0)
656 goto out;
657 if (!ret) {
658 ret = -ERESTARTNOHAND;
659 if (signal_pending(current))
660 goto out;
661 ret = 0;
662 }
663
664 if (set_fd_set(n, inp, fds.res_in) ||
665 set_fd_set(n, outp, fds.res_out) ||
666 set_fd_set(n, exp, fds.res_ex))
667 ret = -EFAULT;
668
669 out:
670 if (bits != stack_fds)
671 kvfree(bits);
672 out_nofds:
673 return ret;
674 }
675
kern_select(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timeval __user * tvp)676 static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
677 fd_set __user *exp, struct timeval __user *tvp)
678 {
679 struct timespec64 end_time, *to = NULL;
680 struct timeval tv;
681 int ret;
682
683 if (tvp) {
684 if (copy_from_user(&tv, tvp, sizeof(tv)))
685 return -EFAULT;
686
687 to = &end_time;
688 if (poll_select_set_timeout(to,
689 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
690 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
691 return -EINVAL;
692 }
693
694 ret = core_sys_select(n, inp, outp, exp, to);
695 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
696
697 return ret;
698 }
699
SYSCALL_DEFINE5(select,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timeval __user *,tvp)700 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
701 fd_set __user *, exp, struct timeval __user *, tvp)
702 {
703 return kern_select(n, inp, outp, exp, tvp);
704 }
705
do_pselect(int n,fd_set __user * inp,fd_set __user * outp,fd_set __user * exp,struct timespec __user * tsp,const sigset_t __user * sigmask,size_t sigsetsize)706 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
707 fd_set __user *exp, struct timespec __user *tsp,
708 const sigset_t __user *sigmask, size_t sigsetsize)
709 {
710 sigset_t ksigmask, sigsaved;
711 struct timespec64 ts, end_time, *to = NULL;
712 int ret;
713
714 if (tsp) {
715 if (get_timespec64(&ts, tsp))
716 return -EFAULT;
717
718 to = &end_time;
719 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
720 return -EINVAL;
721 }
722
723 if (sigmask) {
724 /* XXX: Don't preclude handling different sized sigset_t's. */
725 if (sigsetsize != sizeof(sigset_t))
726 return -EINVAL;
727 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
728 return -EFAULT;
729
730 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
731 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
732 }
733
734 ret = core_sys_select(n, inp, outp, exp, to);
735 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
736
737 if (ret == -ERESTARTNOHAND) {
738 /*
739 * Don't restore the signal mask yet. Let do_signal() deliver
740 * the signal on the way back to userspace, before the signal
741 * mask is restored.
742 */
743 if (sigmask) {
744 memcpy(¤t->saved_sigmask, &sigsaved,
745 sizeof(sigsaved));
746 set_restore_sigmask();
747 }
748 } else if (sigmask)
749 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
750
751 return ret;
752 }
753
754 /*
755 * Most architectures can't handle 7-argument syscalls. So we provide a
756 * 6-argument version where the sixth argument is a pointer to a structure
757 * which has a pointer to the sigset_t itself followed by a size_t containing
758 * the sigset size.
759 */
SYSCALL_DEFINE6(pselect6,int,n,fd_set __user *,inp,fd_set __user *,outp,fd_set __user *,exp,struct timespec __user *,tsp,void __user *,sig)760 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
761 fd_set __user *, exp, struct timespec __user *, tsp,
762 void __user *, sig)
763 {
764 size_t sigsetsize = 0;
765 sigset_t __user *up = NULL;
766
767 if (sig) {
768 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
769 || __get_user(up, (sigset_t __user * __user *)sig)
770 || __get_user(sigsetsize,
771 (size_t __user *)(sig+sizeof(void *))))
772 return -EFAULT;
773 }
774
775 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
776 }
777
778 #ifdef __ARCH_WANT_SYS_OLD_SELECT
779 struct sel_arg_struct {
780 unsigned long n;
781 fd_set __user *inp, *outp, *exp;
782 struct timeval __user *tvp;
783 };
784
SYSCALL_DEFINE1(old_select,struct sel_arg_struct __user *,arg)785 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
786 {
787 struct sel_arg_struct a;
788
789 if (copy_from_user(&a, arg, sizeof(a)))
790 return -EFAULT;
791 return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
792 }
793 #endif
794
795 struct poll_list {
796 struct poll_list *next;
797 int len;
798 struct pollfd entries[0];
799 };
800
801 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
802
803 /*
804 * Fish for pollable events on the pollfd->fd file descriptor. We're only
805 * interested in events matching the pollfd->events mask, and the result
806 * matching that mask is both recorded in pollfd->revents and returned. The
807 * pwait poll_table will be used by the fd-provided poll handler for waiting,
808 * if pwait->_qproc is non-NULL.
809 */
do_pollfd(struct pollfd * pollfd,poll_table * pwait,bool * can_busy_poll,__poll_t busy_flag)810 static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
811 bool *can_busy_poll,
812 __poll_t busy_flag)
813 {
814 int fd = pollfd->fd;
815 __poll_t mask = 0, filter;
816 struct fd f;
817
818 if (fd < 0)
819 goto out;
820 mask = EPOLLNVAL;
821 f = fdget(fd);
822 if (!f.file)
823 goto out;
824
825 /* userland u16 ->events contains POLL... bitmap */
826 filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
827 pwait->_key = filter | busy_flag;
828 mask = vfs_poll(f.file, pwait);
829 if (mask & busy_flag)
830 *can_busy_poll = true;
831 mask &= filter; /* Mask out unneeded events. */
832 fdput(f);
833
834 out:
835 /* ... and so does ->revents */
836 pollfd->revents = mangle_poll(mask);
837 return mask;
838 }
839
do_poll(struct poll_list * list,struct poll_wqueues * wait,struct timespec64 * end_time)840 static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
841 struct timespec64 *end_time)
842 {
843 poll_table* pt = &wait->pt;
844 ktime_t expire, *to = NULL;
845 int timed_out = 0, count = 0;
846 u64 slack = 0;
847 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
848 unsigned long busy_start = 0;
849
850 /* Optimise the no-wait case */
851 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
852 pt->_qproc = NULL;
853 timed_out = 1;
854 }
855
856 if (end_time && !timed_out)
857 slack = select_estimate_accuracy(end_time);
858
859 for (;;) {
860 struct poll_list *walk;
861 bool can_busy_loop = false;
862
863 for (walk = list; walk != NULL; walk = walk->next) {
864 struct pollfd * pfd, * pfd_end;
865
866 pfd = walk->entries;
867 pfd_end = pfd + walk->len;
868 for (; pfd != pfd_end; pfd++) {
869 /*
870 * Fish for events. If we found one, record it
871 * and kill poll_table->_qproc, so we don't
872 * needlessly register any other waiters after
873 * this. They'll get immediately deregistered
874 * when we break out and return.
875 */
876 if (do_pollfd(pfd, pt, &can_busy_loop,
877 busy_flag)) {
878 count++;
879 pt->_qproc = NULL;
880 /* found something, stop busy polling */
881 busy_flag = 0;
882 can_busy_loop = false;
883 }
884 }
885 }
886 /*
887 * All waiters have already been registered, so don't provide
888 * a poll_table->_qproc to them on the next loop iteration.
889 */
890 pt->_qproc = NULL;
891 if (!count) {
892 count = wait->error;
893 if (signal_pending(current))
894 count = -EINTR;
895 }
896 if (count || timed_out)
897 break;
898
899 /* only if found POLL_BUSY_LOOP sockets && not out of time */
900 if (can_busy_loop && !need_resched()) {
901 if (!busy_start) {
902 busy_start = busy_loop_current_time();
903 continue;
904 }
905 if (!busy_loop_timeout(busy_start))
906 continue;
907 }
908 busy_flag = 0;
909
910 /*
911 * If this is the first loop and we have a timeout
912 * given, then we convert to ktime_t and set the to
913 * pointer to the expiry value.
914 */
915 if (end_time && !to) {
916 expire = timespec64_to_ktime(*end_time);
917 to = &expire;
918 }
919
920 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
921 timed_out = 1;
922 }
923 return count;
924 }
925
926 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
927 sizeof(struct pollfd))
928
do_sys_poll(struct pollfd __user * ufds,unsigned int nfds,struct timespec64 * end_time)929 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
930 struct timespec64 *end_time)
931 {
932 struct poll_wqueues table;
933 int err = -EFAULT, fdcount, len, size;
934 /* Allocate small arguments on the stack to save memory and be
935 faster - use long to make sure the buffer is aligned properly
936 on 64 bit archs to avoid unaligned access */
937 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
938 struct poll_list *const head = (struct poll_list *)stack_pps;
939 struct poll_list *walk = head;
940 unsigned long todo = nfds;
941
942 if (nfds > rlimit(RLIMIT_NOFILE))
943 return -EINVAL;
944
945 len = min_t(unsigned int, nfds, N_STACK_PPS);
946 for (;;) {
947 walk->next = NULL;
948 walk->len = len;
949 if (!len)
950 break;
951
952 if (copy_from_user(walk->entries, ufds + nfds-todo,
953 sizeof(struct pollfd) * walk->len))
954 goto out_fds;
955
956 todo -= walk->len;
957 if (!todo)
958 break;
959
960 len = min(todo, POLLFD_PER_PAGE);
961 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
962 walk = walk->next = kmalloc(size, GFP_KERNEL);
963 if (!walk) {
964 err = -ENOMEM;
965 goto out_fds;
966 }
967 }
968
969 poll_initwait(&table);
970 fdcount = do_poll(head, &table, end_time);
971 poll_freewait(&table);
972
973 for (walk = head; walk; walk = walk->next) {
974 struct pollfd *fds = walk->entries;
975 int j;
976
977 for (j = 0; j < walk->len; j++, ufds++)
978 if (__put_user(fds[j].revents, &ufds->revents))
979 goto out_fds;
980 }
981
982 err = fdcount;
983 out_fds:
984 walk = head->next;
985 while (walk) {
986 struct poll_list *pos = walk;
987 walk = walk->next;
988 kfree(pos);
989 }
990
991 return err;
992 }
993
do_restart_poll(struct restart_block * restart_block)994 static long do_restart_poll(struct restart_block *restart_block)
995 {
996 struct pollfd __user *ufds = restart_block->poll.ufds;
997 int nfds = restart_block->poll.nfds;
998 struct timespec64 *to = NULL, end_time;
999 int ret;
1000
1001 if (restart_block->poll.has_timeout) {
1002 end_time.tv_sec = restart_block->poll.tv_sec;
1003 end_time.tv_nsec = restart_block->poll.tv_nsec;
1004 to = &end_time;
1005 }
1006
1007 ret = do_sys_poll(ufds, nfds, to);
1008
1009 if (ret == -EINTR)
1010 ret = set_restart_fn(restart_block, do_restart_poll);
1011
1012 return ret;
1013 }
1014
SYSCALL_DEFINE3(poll,struct pollfd __user *,ufds,unsigned int,nfds,int,timeout_msecs)1015 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1016 int, timeout_msecs)
1017 {
1018 struct timespec64 end_time, *to = NULL;
1019 int ret;
1020
1021 if (timeout_msecs >= 0) {
1022 to = &end_time;
1023 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1024 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1025 }
1026
1027 ret = do_sys_poll(ufds, nfds, to);
1028
1029 if (ret == -EINTR) {
1030 struct restart_block *restart_block;
1031
1032 restart_block = ¤t->restart_block;
1033 restart_block->poll.ufds = ufds;
1034 restart_block->poll.nfds = nfds;
1035
1036 if (timeout_msecs >= 0) {
1037 restart_block->poll.tv_sec = end_time.tv_sec;
1038 restart_block->poll.tv_nsec = end_time.tv_nsec;
1039 restart_block->poll.has_timeout = 1;
1040 } else
1041 restart_block->poll.has_timeout = 0;
1042
1043 ret = set_restart_fn(restart_block, do_restart_poll);
1044 }
1045 return ret;
1046 }
1047
SYSCALL_DEFINE5(ppoll,struct pollfd __user *,ufds,unsigned int,nfds,struct timespec __user *,tsp,const sigset_t __user *,sigmask,size_t,sigsetsize)1048 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1049 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1050 size_t, sigsetsize)
1051 {
1052 sigset_t ksigmask, sigsaved;
1053 struct timespec64 ts, end_time, *to = NULL;
1054 int ret;
1055
1056 if (tsp) {
1057 if (get_timespec64(&ts, tsp))
1058 return -EFAULT;
1059
1060 to = &end_time;
1061 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1062 return -EINVAL;
1063 }
1064
1065 if (sigmask) {
1066 /* XXX: Don't preclude handling different sized sigset_t's. */
1067 if (sigsetsize != sizeof(sigset_t))
1068 return -EINVAL;
1069 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1070 return -EFAULT;
1071
1072 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1073 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1074 }
1075
1076 ret = do_sys_poll(ufds, nfds, to);
1077
1078 /* We can restart this syscall, usually */
1079 if (ret == -EINTR) {
1080 /*
1081 * Don't restore the signal mask yet. Let do_signal() deliver
1082 * the signal on the way back to userspace, before the signal
1083 * mask is restored.
1084 */
1085 if (sigmask) {
1086 memcpy(¤t->saved_sigmask, &sigsaved,
1087 sizeof(sigsaved));
1088 set_restore_sigmask();
1089 }
1090 ret = -ERESTARTNOHAND;
1091 } else if (sigmask)
1092 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1093
1094 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1095
1096 return ret;
1097 }
1098
1099 #ifdef CONFIG_COMPAT
1100 #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
1101
1102 static
compat_poll_select_copy_remaining(struct timespec64 * end_time,void __user * p,int timeval,int ret)1103 int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *p,
1104 int timeval, int ret)
1105 {
1106 struct timespec64 ts;
1107
1108 if (!p)
1109 return ret;
1110
1111 if (current->personality & STICKY_TIMEOUTS)
1112 goto sticky;
1113
1114 /* No update for zero timeout */
1115 if (!end_time->tv_sec && !end_time->tv_nsec)
1116 return ret;
1117
1118 ktime_get_ts64(&ts);
1119 ts = timespec64_sub(*end_time, ts);
1120 if (ts.tv_sec < 0)
1121 ts.tv_sec = ts.tv_nsec = 0;
1122
1123 if (timeval) {
1124 struct compat_timeval rtv;
1125
1126 rtv.tv_sec = ts.tv_sec;
1127 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
1128
1129 if (!copy_to_user(p, &rtv, sizeof(rtv)))
1130 return ret;
1131 } else {
1132 if (!compat_put_timespec64(&ts, p))
1133 return ret;
1134 }
1135 /*
1136 * If an application puts its timeval in read-only memory, we
1137 * don't want the Linux-specific update to the timeval to
1138 * cause a fault after the select has completed
1139 * successfully. However, because we're not updating the
1140 * timeval, we can't restart the system call.
1141 */
1142
1143 sticky:
1144 if (ret == -ERESTARTNOHAND)
1145 ret = -EINTR;
1146 return ret;
1147 }
1148
1149 /*
1150 * Ooo, nasty. We need here to frob 32-bit unsigned longs to
1151 * 64-bit unsigned longs.
1152 */
1153 static
compat_get_fd_set(unsigned long nr,compat_ulong_t __user * ufdset,unsigned long * fdset)1154 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1155 unsigned long *fdset)
1156 {
1157 if (ufdset) {
1158 return compat_get_bitmap(fdset, ufdset, nr);
1159 } else {
1160 zero_fd_set(nr, fdset);
1161 return 0;
1162 }
1163 }
1164
1165 static
compat_set_fd_set(unsigned long nr,compat_ulong_t __user * ufdset,unsigned long * fdset)1166 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1167 unsigned long *fdset)
1168 {
1169 if (!ufdset)
1170 return 0;
1171 return compat_put_bitmap(ufdset, fdset, nr);
1172 }
1173
1174
1175 /*
1176 * This is a virtual copy of sys_select from fs/select.c and probably
1177 * should be compared to it from time to time
1178 */
1179
1180 /*
1181 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1182 * like to be certain this leads to no problems. So I return
1183 * EINTR just for safety.
1184 *
1185 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1186 * I'm trying ERESTARTNOHAND which restart only when you want to.
1187 */
compat_core_sys_select(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,struct timespec64 * end_time)1188 static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1189 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1190 struct timespec64 *end_time)
1191 {
1192 fd_set_bits fds;
1193 void *bits;
1194 int size, max_fds, ret = -EINVAL;
1195 struct fdtable *fdt;
1196 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1197
1198 if (n < 0)
1199 goto out_nofds;
1200
1201 /* max_fds can increase, so grab it once to avoid race */
1202 rcu_read_lock();
1203 fdt = files_fdtable(current->files);
1204 max_fds = fdt->max_fds;
1205 rcu_read_unlock();
1206 if (n > max_fds)
1207 n = max_fds;
1208
1209 /*
1210 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1211 * since we used fdset we need to allocate memory in units of
1212 * long-words.
1213 */
1214 size = FDS_BYTES(n);
1215 bits = stack_fds;
1216 if (size > sizeof(stack_fds) / 6) {
1217 bits = kmalloc_array(6, size, GFP_KERNEL);
1218 ret = -ENOMEM;
1219 if (!bits)
1220 goto out_nofds;
1221 }
1222 fds.in = (unsigned long *) bits;
1223 fds.out = (unsigned long *) (bits + size);
1224 fds.ex = (unsigned long *) (bits + 2*size);
1225 fds.res_in = (unsigned long *) (bits + 3*size);
1226 fds.res_out = (unsigned long *) (bits + 4*size);
1227 fds.res_ex = (unsigned long *) (bits + 5*size);
1228
1229 if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1230 (ret = compat_get_fd_set(n, outp, fds.out)) ||
1231 (ret = compat_get_fd_set(n, exp, fds.ex)))
1232 goto out;
1233 zero_fd_set(n, fds.res_in);
1234 zero_fd_set(n, fds.res_out);
1235 zero_fd_set(n, fds.res_ex);
1236
1237 ret = do_select(n, &fds, end_time);
1238
1239 if (ret < 0)
1240 goto out;
1241 if (!ret) {
1242 ret = -ERESTARTNOHAND;
1243 if (signal_pending(current))
1244 goto out;
1245 ret = 0;
1246 }
1247
1248 if (compat_set_fd_set(n, inp, fds.res_in) ||
1249 compat_set_fd_set(n, outp, fds.res_out) ||
1250 compat_set_fd_set(n, exp, fds.res_ex))
1251 ret = -EFAULT;
1252 out:
1253 if (bits != stack_fds)
1254 kfree(bits);
1255 out_nofds:
1256 return ret;
1257 }
1258
do_compat_select(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,struct compat_timeval __user * tvp)1259 static int do_compat_select(int n, compat_ulong_t __user *inp,
1260 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1261 struct compat_timeval __user *tvp)
1262 {
1263 struct timespec64 end_time, *to = NULL;
1264 struct compat_timeval tv;
1265 int ret;
1266
1267 if (tvp) {
1268 if (copy_from_user(&tv, tvp, sizeof(tv)))
1269 return -EFAULT;
1270
1271 to = &end_time;
1272 if (poll_select_set_timeout(to,
1273 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1274 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1275 return -EINVAL;
1276 }
1277
1278 ret = compat_core_sys_select(n, inp, outp, exp, to);
1279 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret);
1280
1281 return ret;
1282 }
1283
COMPAT_SYSCALL_DEFINE5(select,int,n,compat_ulong_t __user *,inp,compat_ulong_t __user *,outp,compat_ulong_t __user *,exp,struct compat_timeval __user *,tvp)1284 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1285 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1286 struct compat_timeval __user *, tvp)
1287 {
1288 return do_compat_select(n, inp, outp, exp, tvp);
1289 }
1290
1291 struct compat_sel_arg_struct {
1292 compat_ulong_t n;
1293 compat_uptr_t inp;
1294 compat_uptr_t outp;
1295 compat_uptr_t exp;
1296 compat_uptr_t tvp;
1297 };
1298
COMPAT_SYSCALL_DEFINE1(old_select,struct compat_sel_arg_struct __user *,arg)1299 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1300 {
1301 struct compat_sel_arg_struct a;
1302
1303 if (copy_from_user(&a, arg, sizeof(a)))
1304 return -EFAULT;
1305 return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1306 compat_ptr(a.exp), compat_ptr(a.tvp));
1307 }
1308
do_compat_pselect(int n,compat_ulong_t __user * inp,compat_ulong_t __user * outp,compat_ulong_t __user * exp,struct compat_timespec __user * tsp,compat_sigset_t __user * sigmask,compat_size_t sigsetsize)1309 static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1310 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1311 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
1312 compat_size_t sigsetsize)
1313 {
1314 sigset_t ksigmask, sigsaved;
1315 struct timespec64 ts, end_time, *to = NULL;
1316 int ret;
1317
1318 if (tsp) {
1319 if (compat_get_timespec64(&ts, tsp))
1320 return -EFAULT;
1321
1322 to = &end_time;
1323 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1324 return -EINVAL;
1325 }
1326
1327 if (sigmask) {
1328 if (sigsetsize != sizeof(compat_sigset_t))
1329 return -EINVAL;
1330 if (get_compat_sigset(&ksigmask, sigmask))
1331 return -EFAULT;
1332
1333 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1334 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1335 }
1336
1337 ret = compat_core_sys_select(n, inp, outp, exp, to);
1338 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
1339
1340 if (ret == -ERESTARTNOHAND) {
1341 /*
1342 * Don't restore the signal mask yet. Let do_signal() deliver
1343 * the signal on the way back to userspace, before the signal
1344 * mask is restored.
1345 */
1346 if (sigmask) {
1347 memcpy(¤t->saved_sigmask, &sigsaved,
1348 sizeof(sigsaved));
1349 set_restore_sigmask();
1350 }
1351 } else if (sigmask)
1352 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1353
1354 return ret;
1355 }
1356
COMPAT_SYSCALL_DEFINE6(pselect6,int,n,compat_ulong_t __user *,inp,compat_ulong_t __user *,outp,compat_ulong_t __user *,exp,struct compat_timespec __user *,tsp,void __user *,sig)1357 COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
1358 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1359 struct compat_timespec __user *, tsp, void __user *, sig)
1360 {
1361 compat_size_t sigsetsize = 0;
1362 compat_uptr_t up = 0;
1363
1364 if (sig) {
1365 if (!access_ok(VERIFY_READ, sig,
1366 sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1367 __get_user(up, (compat_uptr_t __user *)sig) ||
1368 __get_user(sigsetsize,
1369 (compat_size_t __user *)(sig+sizeof(up))))
1370 return -EFAULT;
1371 }
1372 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
1373 sigsetsize);
1374 }
1375
COMPAT_SYSCALL_DEFINE5(ppoll,struct pollfd __user *,ufds,unsigned int,nfds,struct compat_timespec __user *,tsp,const compat_sigset_t __user *,sigmask,compat_size_t,sigsetsize)1376 COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
1377 unsigned int, nfds, struct compat_timespec __user *, tsp,
1378 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1379 {
1380 sigset_t ksigmask, sigsaved;
1381 struct timespec64 ts, end_time, *to = NULL;
1382 int ret;
1383
1384 if (tsp) {
1385 if (compat_get_timespec64(&ts, tsp))
1386 return -EFAULT;
1387
1388 to = &end_time;
1389 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1390 return -EINVAL;
1391 }
1392
1393 if (sigmask) {
1394 if (sigsetsize != sizeof(compat_sigset_t))
1395 return -EINVAL;
1396 if (get_compat_sigset(&ksigmask, sigmask))
1397 return -EFAULT;
1398
1399 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1400 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1401 }
1402
1403 ret = do_sys_poll(ufds, nfds, to);
1404
1405 /* We can restart this syscall, usually */
1406 if (ret == -EINTR) {
1407 /*
1408 * Don't restore the signal mask yet. Let do_signal() deliver
1409 * the signal on the way back to userspace, before the signal
1410 * mask is restored.
1411 */
1412 if (sigmask) {
1413 memcpy(¤t->saved_sigmask, &sigsaved,
1414 sizeof(sigsaved));
1415 set_restore_sigmask();
1416 }
1417 ret = -ERESTARTNOHAND;
1418 } else if (sigmask)
1419 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1420
1421 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
1422
1423 return ret;
1424 }
1425 #endif
1426