1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/sched/signal.h>
15 #include <linux/slab.h>
16 #include <linux/file.h>
17 #include <linux/fdtable.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/rcupdate.h>
21
22 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
23 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
24 /* our min() is unusable in constant expressions ;-/ */
25 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
26 unsigned int sysctl_nr_open_max =
27 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
28
__free_fdtable(struct fdtable * fdt)29 static void __free_fdtable(struct fdtable *fdt)
30 {
31 kvfree(fdt->fd);
32 kvfree(fdt->open_fds);
33 kfree(fdt);
34 }
35
free_fdtable_rcu(struct rcu_head * rcu)36 static void free_fdtable_rcu(struct rcu_head *rcu)
37 {
38 __free_fdtable(container_of(rcu, struct fdtable, rcu));
39 }
40
41 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
42 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
43
44 /*
45 * Copy 'count' fd bits from the old table to the new table and clear the extra
46 * space if any. This does not copy the file pointers. Called with the files
47 * spinlock held for write.
48 */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int count)49 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
50 unsigned int count)
51 {
52 unsigned int cpy, set;
53
54 cpy = count / BITS_PER_BYTE;
55 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
56 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
57 memset((char *)nfdt->open_fds + cpy, 0, set);
58 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
59 memset((char *)nfdt->close_on_exec + cpy, 0, set);
60
61 cpy = BITBIT_SIZE(count);
62 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
63 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
64 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
65 }
66
67 /*
68 * Copy all file descriptors from the old table to the new, expanded table and
69 * clear the extra space. Called with the files spinlock held for write.
70 */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)71 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
72 {
73 size_t cpy, set;
74
75 BUG_ON(nfdt->max_fds < ofdt->max_fds);
76
77 cpy = ofdt->max_fds * sizeof(struct file *);
78 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
79 memcpy(nfdt->fd, ofdt->fd, cpy);
80 memset((char *)nfdt->fd + cpy, 0, set);
81
82 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
83 }
84
alloc_fdtable(unsigned int nr)85 static struct fdtable * alloc_fdtable(unsigned int nr)
86 {
87 struct fdtable *fdt;
88 void *data;
89
90 /*
91 * Figure out how many fds we actually want to support in this fdtable.
92 * Allocation steps are keyed to the size of the fdarray, since it
93 * grows far faster than any of the other dynamic data. We try to fit
94 * the fdarray into comfortable page-tuned chunks: starting at 1024B
95 * and growing in powers of two from there on.
96 */
97 nr /= (1024 / sizeof(struct file *));
98 nr = roundup_pow_of_two(nr + 1);
99 nr *= (1024 / sizeof(struct file *));
100 /*
101 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
102 * had been set lower between the check in expand_files() and here. Deal
103 * with that in caller, it's cheaper that way.
104 *
105 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
106 * bitmaps handling below becomes unpleasant, to put it mildly...
107 */
108 if (unlikely(nr > sysctl_nr_open))
109 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
110
111 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
112 if (!fdt)
113 goto out;
114 fdt->max_fds = nr;
115 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
116 if (!data)
117 goto out_fdt;
118 fdt->fd = data;
119
120 data = kvmalloc(max_t(size_t,
121 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
122 GFP_KERNEL_ACCOUNT);
123 if (!data)
124 goto out_arr;
125 fdt->open_fds = data;
126 data += nr / BITS_PER_BYTE;
127 fdt->close_on_exec = data;
128 data += nr / BITS_PER_BYTE;
129 fdt->full_fds_bits = data;
130
131 return fdt;
132
133 out_arr:
134 kvfree(fdt->fd);
135 out_fdt:
136 kfree(fdt);
137 out:
138 return NULL;
139 }
140
141 /*
142 * Expand the file descriptor table.
143 * This function will allocate a new fdtable and both fd array and fdset, of
144 * the given size.
145 * Return <0 error code on error; 1 on successful completion.
146 * The files->file_lock should be held on entry, and will be held on exit.
147 */
expand_fdtable(struct files_struct * files,unsigned int nr)148 static int expand_fdtable(struct files_struct *files, unsigned int nr)
149 __releases(files->file_lock)
150 __acquires(files->file_lock)
151 {
152 struct fdtable *new_fdt, *cur_fdt;
153
154 spin_unlock(&files->file_lock);
155 new_fdt = alloc_fdtable(nr);
156
157 /* make sure all __fd_install() have seen resize_in_progress
158 * or have finished their rcu_read_lock_sched() section.
159 */
160 if (atomic_read(&files->count) > 1)
161 synchronize_sched();
162
163 spin_lock(&files->file_lock);
164 if (!new_fdt)
165 return -ENOMEM;
166 /*
167 * extremely unlikely race - sysctl_nr_open decreased between the check in
168 * caller and alloc_fdtable(). Cheaper to catch it here...
169 */
170 if (unlikely(new_fdt->max_fds <= nr)) {
171 __free_fdtable(new_fdt);
172 return -EMFILE;
173 }
174 cur_fdt = files_fdtable(files);
175 BUG_ON(nr < cur_fdt->max_fds);
176 copy_fdtable(new_fdt, cur_fdt);
177 rcu_assign_pointer(files->fdt, new_fdt);
178 if (cur_fdt != &files->fdtab)
179 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
180 /* coupled with smp_rmb() in __fd_install() */
181 smp_wmb();
182 return 1;
183 }
184
185 /*
186 * Expand files.
187 * This function will expand the file structures, if the requested size exceeds
188 * the current capacity and there is room for expansion.
189 * Return <0 error code on error; 0 when nothing done; 1 when files were
190 * expanded and execution may have blocked.
191 * The files->file_lock should be held on entry, and will be held on exit.
192 */
expand_files(struct files_struct * files,unsigned int nr)193 static int expand_files(struct files_struct *files, unsigned int nr)
194 __releases(files->file_lock)
195 __acquires(files->file_lock)
196 {
197 struct fdtable *fdt;
198 int expanded = 0;
199
200 repeat:
201 fdt = files_fdtable(files);
202
203 /* Do we need to expand? */
204 if (nr < fdt->max_fds)
205 return expanded;
206
207 /* Can we expand? */
208 if (nr >= sysctl_nr_open)
209 return -EMFILE;
210
211 if (unlikely(files->resize_in_progress)) {
212 spin_unlock(&files->file_lock);
213 expanded = 1;
214 wait_event(files->resize_wait, !files->resize_in_progress);
215 spin_lock(&files->file_lock);
216 goto repeat;
217 }
218
219 /* All good, so we try */
220 files->resize_in_progress = true;
221 expanded = expand_fdtable(files, nr);
222 files->resize_in_progress = false;
223
224 wake_up_all(&files->resize_wait);
225 return expanded;
226 }
227
__set_close_on_exec(unsigned int fd,struct fdtable * fdt)228 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
229 {
230 __set_bit(fd, fdt->close_on_exec);
231 }
232
__clear_close_on_exec(unsigned int fd,struct fdtable * fdt)233 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
234 {
235 if (test_bit(fd, fdt->close_on_exec))
236 __clear_bit(fd, fdt->close_on_exec);
237 }
238
__set_open_fd(unsigned int fd,struct fdtable * fdt)239 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
240 {
241 __set_bit(fd, fdt->open_fds);
242 fd /= BITS_PER_LONG;
243 if (!~fdt->open_fds[fd])
244 __set_bit(fd, fdt->full_fds_bits);
245 }
246
__clear_open_fd(unsigned int fd,struct fdtable * fdt)247 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
248 {
249 __clear_bit(fd, fdt->open_fds);
250 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
251 }
252
count_open_files(struct fdtable * fdt)253 static unsigned int count_open_files(struct fdtable *fdt)
254 {
255 unsigned int size = fdt->max_fds;
256 unsigned int i;
257
258 /* Find the last open fd */
259 for (i = size / BITS_PER_LONG; i > 0; ) {
260 if (fdt->open_fds[--i])
261 break;
262 }
263 i = (i + 1) * BITS_PER_LONG;
264 return i;
265 }
266
267 /*
268 * Allocate a new files structure and copy contents from the
269 * passed in files structure.
270 * errorp will be valid only when the returned files_struct is NULL.
271 */
dup_fd(struct files_struct * oldf,int * errorp)272 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
273 {
274 struct files_struct *newf;
275 struct file **old_fds, **new_fds;
276 unsigned int open_files, i;
277 struct fdtable *old_fdt, *new_fdt;
278
279 *errorp = -ENOMEM;
280 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
281 if (!newf)
282 goto out;
283
284 atomic_set(&newf->count, 1);
285
286 spin_lock_init(&newf->file_lock);
287 newf->resize_in_progress = false;
288 init_waitqueue_head(&newf->resize_wait);
289 newf->next_fd = 0;
290 new_fdt = &newf->fdtab;
291 new_fdt->max_fds = NR_OPEN_DEFAULT;
292 new_fdt->close_on_exec = newf->close_on_exec_init;
293 new_fdt->open_fds = newf->open_fds_init;
294 new_fdt->full_fds_bits = newf->full_fds_bits_init;
295 new_fdt->fd = &newf->fd_array[0];
296
297 spin_lock(&oldf->file_lock);
298 old_fdt = files_fdtable(oldf);
299 open_files = count_open_files(old_fdt);
300
301 /*
302 * Check whether we need to allocate a larger fd array and fd set.
303 */
304 while (unlikely(open_files > new_fdt->max_fds)) {
305 spin_unlock(&oldf->file_lock);
306
307 if (new_fdt != &newf->fdtab)
308 __free_fdtable(new_fdt);
309
310 new_fdt = alloc_fdtable(open_files - 1);
311 if (!new_fdt) {
312 *errorp = -ENOMEM;
313 goto out_release;
314 }
315
316 /* beyond sysctl_nr_open; nothing to do */
317 if (unlikely(new_fdt->max_fds < open_files)) {
318 __free_fdtable(new_fdt);
319 *errorp = -EMFILE;
320 goto out_release;
321 }
322
323 /*
324 * Reacquire the oldf lock and a pointer to its fd table
325 * who knows it may have a new bigger fd table. We need
326 * the latest pointer.
327 */
328 spin_lock(&oldf->file_lock);
329 old_fdt = files_fdtable(oldf);
330 open_files = count_open_files(old_fdt);
331 }
332
333 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
334
335 old_fds = old_fdt->fd;
336 new_fds = new_fdt->fd;
337
338 for (i = open_files; i != 0; i--) {
339 struct file *f = *old_fds++;
340 if (f) {
341 get_file(f);
342 } else {
343 /*
344 * The fd may be claimed in the fd bitmap but not yet
345 * instantiated in the files array if a sibling thread
346 * is partway through open(). So make sure that this
347 * fd is available to the new process.
348 */
349 __clear_open_fd(open_files - i, new_fdt);
350 }
351 rcu_assign_pointer(*new_fds++, f);
352 }
353 spin_unlock(&oldf->file_lock);
354
355 /* clear the remainder */
356 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
357
358 rcu_assign_pointer(newf->fdt, new_fdt);
359
360 return newf;
361
362 out_release:
363 kmem_cache_free(files_cachep, newf);
364 out:
365 return NULL;
366 }
367
close_files(struct files_struct * files)368 static struct fdtable *close_files(struct files_struct * files)
369 {
370 /*
371 * It is safe to dereference the fd table without RCU or
372 * ->file_lock because this is the last reference to the
373 * files structure.
374 */
375 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
376 unsigned int i, j = 0;
377
378 for (;;) {
379 unsigned long set;
380 i = j * BITS_PER_LONG;
381 if (i >= fdt->max_fds)
382 break;
383 set = fdt->open_fds[j++];
384 while (set) {
385 if (set & 1) {
386 struct file * file = xchg(&fdt->fd[i], NULL);
387 if (file) {
388 filp_close(file, files);
389 cond_resched();
390 }
391 }
392 i++;
393 set >>= 1;
394 }
395 }
396
397 return fdt;
398 }
399
get_files_struct(struct task_struct * task)400 struct files_struct *get_files_struct(struct task_struct *task)
401 {
402 struct files_struct *files;
403
404 task_lock(task);
405 files = task->files;
406 if (files)
407 atomic_inc(&files->count);
408 task_unlock(task);
409
410 return files;
411 }
412
put_files_struct(struct files_struct * files)413 void put_files_struct(struct files_struct *files)
414 {
415 if (atomic_dec_and_test(&files->count)) {
416 struct fdtable *fdt = close_files(files);
417
418 /* free the arrays if they are not embedded */
419 if (fdt != &files->fdtab)
420 __free_fdtable(fdt);
421 kmem_cache_free(files_cachep, files);
422 }
423 }
424
reset_files_struct(struct files_struct * files)425 void reset_files_struct(struct files_struct *files)
426 {
427 struct task_struct *tsk = current;
428 struct files_struct *old;
429
430 old = tsk->files;
431 task_lock(tsk);
432 tsk->files = files;
433 task_unlock(tsk);
434 put_files_struct(old);
435 }
436
exit_files(struct task_struct * tsk)437 void exit_files(struct task_struct *tsk)
438 {
439 struct files_struct * files = tsk->files;
440
441 if (files) {
442 task_lock(tsk);
443 tsk->files = NULL;
444 task_unlock(tsk);
445 put_files_struct(files);
446 }
447 }
448
449 struct files_struct init_files = {
450 .count = ATOMIC_INIT(1),
451 .fdt = &init_files.fdtab,
452 .fdtab = {
453 .max_fds = NR_OPEN_DEFAULT,
454 .fd = &init_files.fd_array[0],
455 .close_on_exec = init_files.close_on_exec_init,
456 .open_fds = init_files.open_fds_init,
457 .full_fds_bits = init_files.full_fds_bits_init,
458 },
459 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
460 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
461 };
462
find_next_fd(struct fdtable * fdt,unsigned int start)463 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
464 {
465 unsigned int maxfd = fdt->max_fds;
466 unsigned int maxbit = maxfd / BITS_PER_LONG;
467 unsigned int bitbit = start / BITS_PER_LONG;
468
469 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
470 if (bitbit > maxfd)
471 return maxfd;
472 if (bitbit > start)
473 start = bitbit;
474 return find_next_zero_bit(fdt->open_fds, maxfd, start);
475 }
476
477 /*
478 * allocate a file descriptor, mark it busy.
479 */
__alloc_fd(struct files_struct * files,unsigned start,unsigned end,unsigned flags)480 int __alloc_fd(struct files_struct *files,
481 unsigned start, unsigned end, unsigned flags)
482 {
483 unsigned int fd;
484 int error;
485 struct fdtable *fdt;
486
487 spin_lock(&files->file_lock);
488 repeat:
489 fdt = files_fdtable(files);
490 fd = start;
491 if (fd < files->next_fd)
492 fd = files->next_fd;
493
494 if (fd < fdt->max_fds)
495 fd = find_next_fd(fdt, fd);
496
497 /*
498 * N.B. For clone tasks sharing a files structure, this test
499 * will limit the total number of files that can be opened.
500 */
501 error = -EMFILE;
502 if (fd >= end)
503 goto out;
504
505 error = expand_files(files, fd);
506 if (error < 0)
507 goto out;
508
509 /*
510 * If we needed to expand the fs array we
511 * might have blocked - try again.
512 */
513 if (error)
514 goto repeat;
515
516 if (start <= files->next_fd)
517 files->next_fd = fd + 1;
518
519 __set_open_fd(fd, fdt);
520 if (flags & O_CLOEXEC)
521 __set_close_on_exec(fd, fdt);
522 else
523 __clear_close_on_exec(fd, fdt);
524 error = fd;
525 #if 1
526 /* Sanity check */
527 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
528 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
529 rcu_assign_pointer(fdt->fd[fd], NULL);
530 }
531 #endif
532
533 out:
534 spin_unlock(&files->file_lock);
535 return error;
536 }
537
alloc_fd(unsigned start,unsigned flags)538 static int alloc_fd(unsigned start, unsigned flags)
539 {
540 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
541 }
542
get_unused_fd_flags(unsigned flags)543 int get_unused_fd_flags(unsigned flags)
544 {
545 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
546 }
547 EXPORT_SYMBOL(get_unused_fd_flags);
548
__put_unused_fd(struct files_struct * files,unsigned int fd)549 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
550 {
551 struct fdtable *fdt = files_fdtable(files);
552 __clear_open_fd(fd, fdt);
553 if (fd < files->next_fd)
554 files->next_fd = fd;
555 }
556
put_unused_fd(unsigned int fd)557 void put_unused_fd(unsigned int fd)
558 {
559 struct files_struct *files = current->files;
560 spin_lock(&files->file_lock);
561 __put_unused_fd(files, fd);
562 spin_unlock(&files->file_lock);
563 }
564
565 EXPORT_SYMBOL(put_unused_fd);
566
567 /*
568 * Install a file pointer in the fd array.
569 *
570 * The VFS is full of places where we drop the files lock between
571 * setting the open_fds bitmap and installing the file in the file
572 * array. At any such point, we are vulnerable to a dup2() race
573 * installing a file in the array before us. We need to detect this and
574 * fput() the struct file we are about to overwrite in this case.
575 *
576 * It should never happen - if we allow dup2() do it, _really_ bad things
577 * will follow.
578 *
579 * NOTE: __fd_install() variant is really, really low-level; don't
580 * use it unless you are forced to by truly lousy API shoved down
581 * your throat. 'files' *MUST* be either current->files or obtained
582 * by get_files_struct(current) done by whoever had given it to you,
583 * or really bad things will happen. Normally you want to use
584 * fd_install() instead.
585 */
586
__fd_install(struct files_struct * files,unsigned int fd,struct file * file)587 void __fd_install(struct files_struct *files, unsigned int fd,
588 struct file *file)
589 {
590 struct fdtable *fdt;
591
592 rcu_read_lock_sched();
593
594 if (unlikely(files->resize_in_progress)) {
595 rcu_read_unlock_sched();
596 spin_lock(&files->file_lock);
597 fdt = files_fdtable(files);
598 BUG_ON(fdt->fd[fd] != NULL);
599 rcu_assign_pointer(fdt->fd[fd], file);
600 spin_unlock(&files->file_lock);
601 return;
602 }
603 /* coupled with smp_wmb() in expand_fdtable() */
604 smp_rmb();
605 fdt = rcu_dereference_sched(files->fdt);
606 BUG_ON(fdt->fd[fd] != NULL);
607 rcu_assign_pointer(fdt->fd[fd], file);
608 rcu_read_unlock_sched();
609 }
610
fd_install(unsigned int fd,struct file * file)611 void fd_install(unsigned int fd, struct file *file)
612 {
613 __fd_install(current->files, fd, file);
614 }
615
616 EXPORT_SYMBOL(fd_install);
617
618 /*
619 * The same warnings as for __alloc_fd()/__fd_install() apply here...
620 */
__close_fd(struct files_struct * files,unsigned fd)621 int __close_fd(struct files_struct *files, unsigned fd)
622 {
623 struct file *file;
624 struct fdtable *fdt;
625
626 spin_lock(&files->file_lock);
627 fdt = files_fdtable(files);
628 if (fd >= fdt->max_fds)
629 goto out_unlock;
630 fd = array_index_nospec(fd, fdt->max_fds);
631 file = fdt->fd[fd];
632 if (!file)
633 goto out_unlock;
634 rcu_assign_pointer(fdt->fd[fd], NULL);
635 __put_unused_fd(files, fd);
636 spin_unlock(&files->file_lock);
637 return filp_close(file, files);
638
639 out_unlock:
640 spin_unlock(&files->file_lock);
641 return -EBADF;
642 }
643 EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
644
do_close_on_exec(struct files_struct * files)645 void do_close_on_exec(struct files_struct *files)
646 {
647 unsigned i;
648 struct fdtable *fdt;
649
650 /* exec unshares first */
651 spin_lock(&files->file_lock);
652 for (i = 0; ; i++) {
653 unsigned long set;
654 unsigned fd = i * BITS_PER_LONG;
655 fdt = files_fdtable(files);
656 if (fd >= fdt->max_fds)
657 break;
658 set = fdt->close_on_exec[i];
659 if (!set)
660 continue;
661 fdt->close_on_exec[i] = 0;
662 for ( ; set ; fd++, set >>= 1) {
663 struct file *file;
664 if (!(set & 1))
665 continue;
666 file = fdt->fd[fd];
667 if (!file)
668 continue;
669 rcu_assign_pointer(fdt->fd[fd], NULL);
670 __put_unused_fd(files, fd);
671 spin_unlock(&files->file_lock);
672 filp_close(file, files);
673 cond_resched();
674 spin_lock(&files->file_lock);
675 }
676
677 }
678 spin_unlock(&files->file_lock);
679 }
680
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask,unsigned int refs)681 static inline struct file *__fget_files_rcu(struct files_struct *files,
682 unsigned int fd, fmode_t mask, unsigned int refs)
683 {
684 for (;;) {
685 struct file *file;
686 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
687 struct file __rcu **fdentry;
688
689 if (unlikely(fd >= fdt->max_fds))
690 return NULL;
691
692 fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
693 file = rcu_dereference_raw(*fdentry);
694 if (unlikely(!file))
695 return NULL;
696
697 if (unlikely(file->f_mode & mask))
698 return NULL;
699
700 /*
701 * Ok, we have a file pointer. However, because we do
702 * this all locklessly under RCU, we may be racing with
703 * that file being closed.
704 *
705 * Such a race can take two forms:
706 *
707 * (a) the file ref already went down to zero,
708 * and get_file_rcu_many() fails. Just try
709 * again:
710 */
711 if (unlikely(!get_file_rcu_many(file, refs)))
712 continue;
713
714 /*
715 * (b) the file table entry has changed under us.
716 * Note that we don't need to re-check the 'fdt->fd'
717 * pointer having changed, because it always goes
718 * hand-in-hand with 'fdt'.
719 *
720 * If so, we need to put our refs and try again.
721 */
722 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
723 unlikely(rcu_dereference_raw(*fdentry) != file)) {
724 fput_many(file, refs);
725 continue;
726 }
727
728 /*
729 * Ok, we have a ref to the file, and checked that it
730 * still exists.
731 */
732 return file;
733 }
734 }
735
736
__fget(unsigned int fd,fmode_t mask,unsigned int refs)737 static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
738 {
739 struct files_struct *files = current->files;
740 struct file *file;
741
742 rcu_read_lock();
743 file = __fget_files_rcu(files, fd, mask, refs);
744 rcu_read_unlock();
745
746 return file;
747 }
748
fget_many(unsigned int fd,unsigned int refs)749 struct file *fget_many(unsigned int fd, unsigned int refs)
750 {
751 return __fget(fd, FMODE_PATH, refs);
752 }
753
fget(unsigned int fd)754 struct file *fget(unsigned int fd)
755 {
756 return __fget(fd, FMODE_PATH, 1);
757 }
758 EXPORT_SYMBOL(fget);
759
fget_raw(unsigned int fd)760 struct file *fget_raw(unsigned int fd)
761 {
762 return __fget(fd, 0, 1);
763 }
764 EXPORT_SYMBOL(fget_raw);
765
766 /*
767 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
768 *
769 * You can use this instead of fget if you satisfy all of the following
770 * conditions:
771 * 1) You must call fput_light before exiting the syscall and returning control
772 * to userspace (i.e. you cannot remember the returned struct file * after
773 * returning to userspace).
774 * 2) You must not call filp_close on the returned struct file * in between
775 * calls to fget_light and fput_light.
776 * 3) You must not clone the current task in between the calls to fget_light
777 * and fput_light.
778 *
779 * The fput_needed flag returned by fget_light should be passed to the
780 * corresponding fput_light.
781 */
__fget_light(unsigned int fd,fmode_t mask)782 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
783 {
784 struct files_struct *files = current->files;
785 struct file *file;
786
787 if (atomic_read(&files->count) == 1) {
788 file = __fcheck_files(files, fd);
789 if (!file || unlikely(file->f_mode & mask))
790 return 0;
791 return (unsigned long)file;
792 } else {
793 file = __fget(fd, mask, 1);
794 if (!file)
795 return 0;
796 return FDPUT_FPUT | (unsigned long)file;
797 }
798 }
__fdget(unsigned int fd)799 unsigned long __fdget(unsigned int fd)
800 {
801 return __fget_light(fd, FMODE_PATH);
802 }
803 EXPORT_SYMBOL(__fdget);
804
__fdget_raw(unsigned int fd)805 unsigned long __fdget_raw(unsigned int fd)
806 {
807 return __fget_light(fd, 0);
808 }
809
__fdget_pos(unsigned int fd)810 unsigned long __fdget_pos(unsigned int fd)
811 {
812 unsigned long v = __fdget(fd);
813 struct file *file = (struct file *)(v & ~3);
814
815 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
816 if (file_count(file) > 1) {
817 v |= FDPUT_POS_UNLOCK;
818 mutex_lock(&file->f_pos_lock);
819 }
820 }
821 return v;
822 }
823
__f_unlock_pos(struct file * f)824 void __f_unlock_pos(struct file *f)
825 {
826 mutex_unlock(&f->f_pos_lock);
827 }
828
829 /*
830 * We only lock f_pos if we have threads or if the file might be
831 * shared with another process. In both cases we'll have an elevated
832 * file count (done either by fdget() or by fork()).
833 */
834
set_close_on_exec(unsigned int fd,int flag)835 void set_close_on_exec(unsigned int fd, int flag)
836 {
837 struct files_struct *files = current->files;
838 struct fdtable *fdt;
839 spin_lock(&files->file_lock);
840 fdt = files_fdtable(files);
841 if (flag)
842 __set_close_on_exec(fd, fdt);
843 else
844 __clear_close_on_exec(fd, fdt);
845 spin_unlock(&files->file_lock);
846 }
847
get_close_on_exec(unsigned int fd)848 bool get_close_on_exec(unsigned int fd)
849 {
850 struct files_struct *files = current->files;
851 struct fdtable *fdt;
852 bool res;
853 rcu_read_lock();
854 fdt = files_fdtable(files);
855 res = close_on_exec(fd, fdt);
856 rcu_read_unlock();
857 return res;
858 }
859
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)860 static int do_dup2(struct files_struct *files,
861 struct file *file, unsigned fd, unsigned flags)
862 __releases(&files->file_lock)
863 {
864 struct file *tofree;
865 struct fdtable *fdt;
866
867 /*
868 * We need to detect attempts to do dup2() over allocated but still
869 * not finished descriptor. NB: OpenBSD avoids that at the price of
870 * extra work in their equivalent of fget() - they insert struct
871 * file immediately after grabbing descriptor, mark it larval if
872 * more work (e.g. actual opening) is needed and make sure that
873 * fget() treats larval files as absent. Potentially interesting,
874 * but while extra work in fget() is trivial, locking implications
875 * and amount of surgery on open()-related paths in VFS are not.
876 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
877 * deadlocks in rather amusing ways, AFAICS. All of that is out of
878 * scope of POSIX or SUS, since neither considers shared descriptor
879 * tables and this condition does not arise without those.
880 */
881 fdt = files_fdtable(files);
882 tofree = fdt->fd[fd];
883 if (!tofree && fd_is_open(fd, fdt))
884 goto Ebusy;
885 get_file(file);
886 rcu_assign_pointer(fdt->fd[fd], file);
887 __set_open_fd(fd, fdt);
888 if (flags & O_CLOEXEC)
889 __set_close_on_exec(fd, fdt);
890 else
891 __clear_close_on_exec(fd, fdt);
892 spin_unlock(&files->file_lock);
893
894 if (tofree)
895 filp_close(tofree, files);
896
897 return fd;
898
899 Ebusy:
900 spin_unlock(&files->file_lock);
901 return -EBUSY;
902 }
903
replace_fd(unsigned fd,struct file * file,unsigned flags)904 int replace_fd(unsigned fd, struct file *file, unsigned flags)
905 {
906 int err;
907 struct files_struct *files = current->files;
908
909 if (!file)
910 return __close_fd(files, fd);
911
912 if (fd >= rlimit(RLIMIT_NOFILE))
913 return -EBADF;
914
915 spin_lock(&files->file_lock);
916 err = expand_files(files, fd);
917 if (unlikely(err < 0))
918 goto out_unlock;
919 return do_dup2(files, file, fd, flags);
920
921 out_unlock:
922 spin_unlock(&files->file_lock);
923 return err;
924 }
925
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)926 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
927 {
928 int err = -EBADF;
929 struct file *file;
930 struct files_struct *files = current->files;
931
932 if ((flags & ~O_CLOEXEC) != 0)
933 return -EINVAL;
934
935 if (unlikely(oldfd == newfd))
936 return -EINVAL;
937
938 if (newfd >= rlimit(RLIMIT_NOFILE))
939 return -EBADF;
940
941 spin_lock(&files->file_lock);
942 err = expand_files(files, newfd);
943 file = fcheck(oldfd);
944 if (unlikely(!file))
945 goto Ebadf;
946 if (unlikely(err < 0)) {
947 if (err == -EMFILE)
948 goto Ebadf;
949 goto out_unlock;
950 }
951 return do_dup2(files, file, newfd, flags);
952
953 Ebadf:
954 err = -EBADF;
955 out_unlock:
956 spin_unlock(&files->file_lock);
957 return err;
958 }
959
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)960 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
961 {
962 return ksys_dup3(oldfd, newfd, flags);
963 }
964
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)965 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
966 {
967 if (unlikely(newfd == oldfd)) { /* corner case */
968 struct files_struct *files = current->files;
969 int retval = oldfd;
970
971 rcu_read_lock();
972 if (!fcheck_files(files, oldfd))
973 retval = -EBADF;
974 rcu_read_unlock();
975 return retval;
976 }
977 return ksys_dup3(oldfd, newfd, 0);
978 }
979
ksys_dup(unsigned int fildes)980 int ksys_dup(unsigned int fildes)
981 {
982 int ret = -EBADF;
983 struct file *file = fget_raw(fildes);
984
985 if (file) {
986 ret = get_unused_fd_flags(0);
987 if (ret >= 0)
988 fd_install(ret, file);
989 else
990 fput(file);
991 }
992 return ret;
993 }
994
SYSCALL_DEFINE1(dup,unsigned int,fildes)995 SYSCALL_DEFINE1(dup, unsigned int, fildes)
996 {
997 return ksys_dup(fildes);
998 }
999
f_dupfd(unsigned int from,struct file * file,unsigned flags)1000 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1001 {
1002 int err;
1003 if (from >= rlimit(RLIMIT_NOFILE))
1004 return -EINVAL;
1005 err = alloc_fd(from, flags);
1006 if (err >= 0) {
1007 get_file(file);
1008 fd_install(err, file);
1009 }
1010 return err;
1011 }
1012
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1013 int iterate_fd(struct files_struct *files, unsigned n,
1014 int (*f)(const void *, struct file *, unsigned),
1015 const void *p)
1016 {
1017 struct fdtable *fdt;
1018 int res = 0;
1019 if (!files)
1020 return 0;
1021 spin_lock(&files->file_lock);
1022 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1023 struct file *file;
1024 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1025 if (!file)
1026 continue;
1027 res = f(p, file, n);
1028 if (res)
1029 break;
1030 }
1031 spin_unlock(&files->file_lock);
1032 return res;
1033 }
1034 EXPORT_SYMBOL(iterate_fd);
1035