1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/char_dev.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/kdev_t.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 #include <linux/tty.h>
25
26 #include "internal.h"
27
28 static struct kobj_map *cdev_map;
29
30 static DEFINE_MUTEX(chrdevs_lock);
31
32 #define CHRDEV_MAJOR_HASH_SIZE 255
33
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct cdev *cdev; /* will die */
41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
42
43 /* index in the above */
major_to_index(unsigned major)44 static inline int major_to_index(unsigned major)
45 {
46 return major % CHRDEV_MAJOR_HASH_SIZE;
47 }
48
49 #ifdef CONFIG_PROC_FS
50
chrdev_show(struct seq_file * f,off_t offset)51 void chrdev_show(struct seq_file *f, off_t offset)
52 {
53 struct char_device_struct *cd;
54
55 mutex_lock(&chrdevs_lock);
56 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
57 if (cd->major == offset)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
59 }
60 mutex_unlock(&chrdevs_lock);
61 }
62
63 #endif /* CONFIG_PROC_FS */
64
find_dynamic_major(void)65 static int find_dynamic_major(void)
66 {
67 int i;
68 struct char_device_struct *cd;
69
70 for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
71 if (chrdevs[i] == NULL)
72 return i;
73 }
74
75 for (i = CHRDEV_MAJOR_DYN_EXT_START;
76 i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
77 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
78 if (cd->major == i)
79 break;
80
81 if (cd == NULL)
82 return i;
83 }
84
85 return -EBUSY;
86 }
87
88 /*
89 * Register a single major with a specified minor range.
90 *
91 * If major == 0 this functions will dynamically allocate a major and return
92 * its number.
93 *
94 * If major > 0 this function will attempt to reserve the passed range of
95 * minors and will return zero on success.
96 *
97 * Returns a -ve errno on failure.
98 */
99 static struct char_device_struct *
__register_chrdev_region(unsigned int major,unsigned int baseminor,int minorct,const char * name)100 __register_chrdev_region(unsigned int major, unsigned int baseminor,
101 int minorct, const char *name)
102 {
103 struct char_device_struct *cd, **cp;
104 int ret = 0;
105 int i;
106
107 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
108 if (cd == NULL)
109 return ERR_PTR(-ENOMEM);
110
111 mutex_lock(&chrdevs_lock);
112
113 if (major == 0) {
114 ret = find_dynamic_major();
115 if (ret < 0) {
116 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
117 name);
118 goto out;
119 }
120 major = ret;
121 }
122
123 if (major >= CHRDEV_MAJOR_MAX) {
124 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
125 name, major, CHRDEV_MAJOR_MAX-1);
126 ret = -EINVAL;
127 goto out;
128 }
129
130 cd->major = major;
131 cd->baseminor = baseminor;
132 cd->minorct = minorct;
133 strlcpy(cd->name, name, sizeof(cd->name));
134
135 i = major_to_index(major);
136
137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
138 if ((*cp)->major > major ||
139 ((*cp)->major == major &&
140 (((*cp)->baseminor >= baseminor) ||
141 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
142 break;
143
144 /* Check for overlapping minor ranges. */
145 if (*cp && (*cp)->major == major) {
146 int old_min = (*cp)->baseminor;
147 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
148 int new_min = baseminor;
149 int new_max = baseminor + minorct - 1;
150
151 /* New driver overlaps from the left. */
152 if (new_max >= old_min && new_max <= old_max) {
153 ret = -EBUSY;
154 goto out;
155 }
156
157 /* New driver overlaps from the right. */
158 if (new_min <= old_max && new_min >= old_min) {
159 ret = -EBUSY;
160 goto out;
161 }
162
163 if (new_min < old_min && new_max > old_max) {
164 ret = -EBUSY;
165 goto out;
166 }
167
168 }
169
170 cd->next = *cp;
171 *cp = cd;
172 mutex_unlock(&chrdevs_lock);
173 return cd;
174 out:
175 mutex_unlock(&chrdevs_lock);
176 kfree(cd);
177 return ERR_PTR(ret);
178 }
179
180 static struct char_device_struct *
__unregister_chrdev_region(unsigned major,unsigned baseminor,int minorct)181 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
182 {
183 struct char_device_struct *cd = NULL, **cp;
184 int i = major_to_index(major);
185
186 mutex_lock(&chrdevs_lock);
187 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
188 if ((*cp)->major == major &&
189 (*cp)->baseminor == baseminor &&
190 (*cp)->minorct == minorct)
191 break;
192 if (*cp) {
193 cd = *cp;
194 *cp = cd->next;
195 }
196 mutex_unlock(&chrdevs_lock);
197 return cd;
198 }
199
200 /**
201 * register_chrdev_region() - register a range of device numbers
202 * @from: the first in the desired range of device numbers; must include
203 * the major number.
204 * @count: the number of consecutive device numbers required
205 * @name: the name of the device or driver.
206 *
207 * Return value is zero on success, a negative error code on failure.
208 */
register_chrdev_region(dev_t from,unsigned count,const char * name)209 int register_chrdev_region(dev_t from, unsigned count, const char *name)
210 {
211 struct char_device_struct *cd;
212 dev_t to = from + count;
213 dev_t n, next;
214
215 for (n = from; n < to; n = next) {
216 next = MKDEV(MAJOR(n)+1, 0);
217 if (next > to)
218 next = to;
219 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
220 next - n, name);
221 if (IS_ERR(cd))
222 goto fail;
223 }
224 return 0;
225 fail:
226 to = n;
227 for (n = from; n < to; n = next) {
228 next = MKDEV(MAJOR(n)+1, 0);
229 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
230 }
231 return PTR_ERR(cd);
232 }
233
234 /**
235 * alloc_chrdev_region() - register a range of char device numbers
236 * @dev: output parameter for first assigned number
237 * @baseminor: first of the requested range of minor numbers
238 * @count: the number of minor numbers required
239 * @name: the name of the associated device or driver
240 *
241 * Allocates a range of char device numbers. The major number will be
242 * chosen dynamically, and returned (along with the first minor number)
243 * in @dev. Returns zero or a negative error code.
244 */
alloc_chrdev_region(dev_t * dev,unsigned baseminor,unsigned count,const char * name)245 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
246 const char *name)
247 {
248 struct char_device_struct *cd;
249 cd = __register_chrdev_region(0, baseminor, count, name);
250 if (IS_ERR(cd))
251 return PTR_ERR(cd);
252 *dev = MKDEV(cd->major, cd->baseminor);
253 return 0;
254 }
255
256 /**
257 * __register_chrdev() - create and register a cdev occupying a range of minors
258 * @major: major device number or 0 for dynamic allocation
259 * @baseminor: first of the requested range of minor numbers
260 * @count: the number of minor numbers required
261 * @name: name of this range of devices
262 * @fops: file operations associated with this devices
263 *
264 * If @major == 0 this functions will dynamically allocate a major and return
265 * its number.
266 *
267 * If @major > 0 this function will attempt to reserve a device with the given
268 * major number and will return zero on success.
269 *
270 * Returns a -ve errno on failure.
271 *
272 * The name of this device has nothing to do with the name of the device in
273 * /dev. It only helps to keep track of the different owners of devices. If
274 * your module name has only one type of devices it's ok to use e.g. the name
275 * of the module here.
276 */
__register_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name,const struct file_operations * fops)277 int __register_chrdev(unsigned int major, unsigned int baseminor,
278 unsigned int count, const char *name,
279 const struct file_operations *fops)
280 {
281 struct char_device_struct *cd;
282 struct cdev *cdev;
283 int err = -ENOMEM;
284
285 cd = __register_chrdev_region(major, baseminor, count, name);
286 if (IS_ERR(cd))
287 return PTR_ERR(cd);
288
289 cdev = cdev_alloc();
290 if (!cdev)
291 goto out2;
292
293 cdev->owner = fops->owner;
294 cdev->ops = fops;
295 kobject_set_name(&cdev->kobj, "%s", name);
296
297 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
298 if (err)
299 goto out;
300
301 cd->cdev = cdev;
302
303 return major ? 0 : cd->major;
304 out:
305 kobject_put(&cdev->kobj);
306 out2:
307 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
308 return err;
309 }
310
311 /**
312 * unregister_chrdev_region() - unregister a range of device numbers
313 * @from: the first in the range of numbers to unregister
314 * @count: the number of device numbers to unregister
315 *
316 * This function will unregister a range of @count device numbers,
317 * starting with @from. The caller should normally be the one who
318 * allocated those numbers in the first place...
319 */
unregister_chrdev_region(dev_t from,unsigned count)320 void unregister_chrdev_region(dev_t from, unsigned count)
321 {
322 dev_t to = from + count;
323 dev_t n, next;
324
325 for (n = from; n < to; n = next) {
326 next = MKDEV(MAJOR(n)+1, 0);
327 if (next > to)
328 next = to;
329 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
330 }
331 }
332
333 /**
334 * __unregister_chrdev - unregister and destroy a cdev
335 * @major: major device number
336 * @baseminor: first of the range of minor numbers
337 * @count: the number of minor numbers this cdev is occupying
338 * @name: name of this range of devices
339 *
340 * Unregister and destroy the cdev occupying the region described by
341 * @major, @baseminor and @count. This function undoes what
342 * __register_chrdev() did.
343 */
__unregister_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name)344 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
345 unsigned int count, const char *name)
346 {
347 struct char_device_struct *cd;
348
349 cd = __unregister_chrdev_region(major, baseminor, count);
350 if (cd && cd->cdev)
351 cdev_del(cd->cdev);
352 kfree(cd);
353 }
354
355 static DEFINE_SPINLOCK(cdev_lock);
356
cdev_get(struct cdev * p)357 static struct kobject *cdev_get(struct cdev *p)
358 {
359 struct module *owner = p->owner;
360 struct kobject *kobj;
361
362 if (owner && !try_module_get(owner))
363 return NULL;
364 kobj = kobject_get_unless_zero(&p->kobj);
365 if (!kobj)
366 module_put(owner);
367 return kobj;
368 }
369
cdev_put(struct cdev * p)370 void cdev_put(struct cdev *p)
371 {
372 if (p) {
373 struct module *owner = p->owner;
374 kobject_put(&p->kobj);
375 module_put(owner);
376 }
377 }
378
379 /*
380 * Called every time a character special file is opened
381 */
chrdev_open(struct inode * inode,struct file * filp)382 static int chrdev_open(struct inode *inode, struct file *filp)
383 {
384 const struct file_operations *fops;
385 struct cdev *p;
386 struct cdev *new = NULL;
387 int ret = 0;
388
389 spin_lock(&cdev_lock);
390 p = inode->i_cdev;
391 if (!p) {
392 struct kobject *kobj;
393 int idx;
394 spin_unlock(&cdev_lock);
395 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
396 if (!kobj)
397 return -ENXIO;
398 new = container_of(kobj, struct cdev, kobj);
399 spin_lock(&cdev_lock);
400 /* Check i_cdev again in case somebody beat us to it while
401 we dropped the lock. */
402 p = inode->i_cdev;
403 if (!p) {
404 inode->i_cdev = p = new;
405 list_add(&inode->i_devices, &p->list);
406 new = NULL;
407 } else if (!cdev_get(p))
408 ret = -ENXIO;
409 } else if (!cdev_get(p))
410 ret = -ENXIO;
411 spin_unlock(&cdev_lock);
412 cdev_put(new);
413 if (ret)
414 return ret;
415
416 ret = -ENXIO;
417 fops = fops_get(p->ops);
418 if (!fops)
419 goto out_cdev_put;
420
421 replace_fops(filp, fops);
422 if (filp->f_op->open) {
423 ret = filp->f_op->open(inode, filp);
424 if (ret)
425 goto out_cdev_put;
426 }
427
428 return 0;
429
430 out_cdev_put:
431 cdev_put(p);
432 return ret;
433 }
434
cd_forget(struct inode * inode)435 void cd_forget(struct inode *inode)
436 {
437 spin_lock(&cdev_lock);
438 list_del_init(&inode->i_devices);
439 inode->i_cdev = NULL;
440 inode->i_mapping = &inode->i_data;
441 spin_unlock(&cdev_lock);
442 }
443
cdev_purge(struct cdev * cdev)444 static void cdev_purge(struct cdev *cdev)
445 {
446 spin_lock(&cdev_lock);
447 while (!list_empty(&cdev->list)) {
448 struct inode *inode;
449 inode = container_of(cdev->list.next, struct inode, i_devices);
450 list_del_init(&inode->i_devices);
451 inode->i_cdev = NULL;
452 }
453 spin_unlock(&cdev_lock);
454 }
455
456 /*
457 * Dummy default file-operations: the only thing this does
458 * is contain the open that then fills in the correct operations
459 * depending on the special file...
460 */
461 const struct file_operations def_chr_fops = {
462 .open = chrdev_open,
463 .llseek = noop_llseek,
464 };
465
exact_match(dev_t dev,int * part,void * data)466 static struct kobject *exact_match(dev_t dev, int *part, void *data)
467 {
468 struct cdev *p = data;
469 return &p->kobj;
470 }
471
exact_lock(dev_t dev,void * data)472 static int exact_lock(dev_t dev, void *data)
473 {
474 struct cdev *p = data;
475 return cdev_get(p) ? 0 : -1;
476 }
477
478 /**
479 * cdev_add() - add a char device to the system
480 * @p: the cdev structure for the device
481 * @dev: the first device number for which this device is responsible
482 * @count: the number of consecutive minor numbers corresponding to this
483 * device
484 *
485 * cdev_add() adds the device represented by @p to the system, making it
486 * live immediately. A negative error code is returned on failure.
487 */
cdev_add(struct cdev * p,dev_t dev,unsigned count)488 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
489 {
490 int error;
491
492 p->dev = dev;
493 p->count = count;
494
495 error = kobj_map(cdev_map, dev, count, NULL,
496 exact_match, exact_lock, p);
497 if (error)
498 return error;
499
500 kobject_get(p->kobj.parent);
501
502 return 0;
503 }
504
505 /**
506 * cdev_set_parent() - set the parent kobject for a char device
507 * @p: the cdev structure
508 * @kobj: the kobject to take a reference to
509 *
510 * cdev_set_parent() sets a parent kobject which will be referenced
511 * appropriately so the parent is not freed before the cdev. This
512 * should be called before cdev_add.
513 */
cdev_set_parent(struct cdev * p,struct kobject * kobj)514 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
515 {
516 WARN_ON(!kobj->state_initialized);
517 p->kobj.parent = kobj;
518 }
519
520 /**
521 * cdev_device_add() - add a char device and it's corresponding
522 * struct device, linkink
523 * @dev: the device structure
524 * @cdev: the cdev structure
525 *
526 * cdev_device_add() adds the char device represented by @cdev to the system,
527 * just as cdev_add does. It then adds @dev to the system using device_add
528 * The dev_t for the char device will be taken from the struct device which
529 * needs to be initialized first. This helper function correctly takes a
530 * reference to the parent device so the parent will not get released until
531 * all references to the cdev are released.
532 *
533 * This helper uses dev->devt for the device number. If it is not set
534 * it will not add the cdev and it will be equivalent to device_add.
535 *
536 * This function should be used whenever the struct cdev and the
537 * struct device are members of the same structure whose lifetime is
538 * managed by the struct device.
539 *
540 * NOTE: Callers must assume that userspace was able to open the cdev and
541 * can call cdev fops callbacks at any time, even if this function fails.
542 */
cdev_device_add(struct cdev * cdev,struct device * dev)543 int cdev_device_add(struct cdev *cdev, struct device *dev)
544 {
545 int rc = 0;
546
547 if (dev->devt) {
548 cdev_set_parent(cdev, &dev->kobj);
549
550 rc = cdev_add(cdev, dev->devt, 1);
551 if (rc)
552 return rc;
553 }
554
555 rc = device_add(dev);
556 if (rc && dev->devt)
557 cdev_del(cdev);
558
559 return rc;
560 }
561
562 /**
563 * cdev_device_del() - inverse of cdev_device_add
564 * @dev: the device structure
565 * @cdev: the cdev structure
566 *
567 * cdev_device_del() is a helper function to call cdev_del and device_del.
568 * It should be used whenever cdev_device_add is used.
569 *
570 * If dev->devt is not set it will not remove the cdev and will be equivalent
571 * to device_del.
572 *
573 * NOTE: This guarantees that associated sysfs callbacks are not running
574 * or runnable, however any cdevs already open will remain and their fops
575 * will still be callable even after this function returns.
576 */
cdev_device_del(struct cdev * cdev,struct device * dev)577 void cdev_device_del(struct cdev *cdev, struct device *dev)
578 {
579 device_del(dev);
580 if (dev->devt)
581 cdev_del(cdev);
582 }
583
cdev_unmap(dev_t dev,unsigned count)584 static void cdev_unmap(dev_t dev, unsigned count)
585 {
586 kobj_unmap(cdev_map, dev, count);
587 }
588
589 /**
590 * cdev_del() - remove a cdev from the system
591 * @p: the cdev structure to be removed
592 *
593 * cdev_del() removes @p from the system, possibly freeing the structure
594 * itself.
595 *
596 * NOTE: This guarantees that cdev device will no longer be able to be
597 * opened, however any cdevs already open will remain and their fops will
598 * still be callable even after cdev_del returns.
599 */
cdev_del(struct cdev * p)600 void cdev_del(struct cdev *p)
601 {
602 cdev_unmap(p->dev, p->count);
603 kobject_put(&p->kobj);
604 }
605
606
cdev_default_release(struct kobject * kobj)607 static void cdev_default_release(struct kobject *kobj)
608 {
609 struct cdev *p = container_of(kobj, struct cdev, kobj);
610 struct kobject *parent = kobj->parent;
611
612 cdev_purge(p);
613 kobject_put(parent);
614 }
615
cdev_dynamic_release(struct kobject * kobj)616 static void cdev_dynamic_release(struct kobject *kobj)
617 {
618 struct cdev *p = container_of(kobj, struct cdev, kobj);
619 struct kobject *parent = kobj->parent;
620
621 cdev_purge(p);
622 kfree(p);
623 kobject_put(parent);
624 }
625
626 static struct kobj_type ktype_cdev_default = {
627 .release = cdev_default_release,
628 };
629
630 static struct kobj_type ktype_cdev_dynamic = {
631 .release = cdev_dynamic_release,
632 };
633
634 /**
635 * cdev_alloc() - allocate a cdev structure
636 *
637 * Allocates and returns a cdev structure, or NULL on failure.
638 */
cdev_alloc(void)639 struct cdev *cdev_alloc(void)
640 {
641 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
642 if (p) {
643 INIT_LIST_HEAD(&p->list);
644 kobject_init(&p->kobj, &ktype_cdev_dynamic);
645 }
646 return p;
647 }
648
649 /**
650 * cdev_init() - initialize a cdev structure
651 * @cdev: the structure to initialize
652 * @fops: the file_operations for this device
653 *
654 * Initializes @cdev, remembering @fops, making it ready to add to the
655 * system with cdev_add().
656 */
cdev_init(struct cdev * cdev,const struct file_operations * fops)657 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
658 {
659 memset(cdev, 0, sizeof *cdev);
660 INIT_LIST_HEAD(&cdev->list);
661 kobject_init(&cdev->kobj, &ktype_cdev_default);
662 cdev->ops = fops;
663 }
664
base_probe(dev_t dev,int * part,void * data)665 static struct kobject *base_probe(dev_t dev, int *part, void *data)
666 {
667 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
668 /* Make old-style 2.4 aliases work */
669 request_module("char-major-%d", MAJOR(dev));
670 return NULL;
671 }
672
chrdev_init(void)673 void __init chrdev_init(void)
674 {
675 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
676 }
677
678
679 /* Let modules do char dev stuff */
680 EXPORT_SYMBOL(register_chrdev_region);
681 EXPORT_SYMBOL(unregister_chrdev_region);
682 EXPORT_SYMBOL(alloc_chrdev_region);
683 EXPORT_SYMBOL(cdev_init);
684 EXPORT_SYMBOL(cdev_alloc);
685 EXPORT_SYMBOL(cdev_del);
686 EXPORT_SYMBOL(cdev_add);
687 EXPORT_SYMBOL(cdev_set_parent);
688 EXPORT_SYMBOL(cdev_device_add);
689 EXPORT_SYMBOL(cdev_device_del);
690 EXPORT_SYMBOL(__register_chrdev);
691 EXPORT_SYMBOL(__unregister_chrdev);
692