1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to io context handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task.h>
12
13 #include "blk.h"
14
15 /*
16 * For io context allocations
17 */
18 static struct kmem_cache *iocontext_cachep;
19
20 /**
21 * get_io_context - increment reference count to io_context
22 * @ioc: io_context to get
23 *
24 * Increment reference count to @ioc.
25 */
get_io_context(struct io_context * ioc)26 void get_io_context(struct io_context *ioc)
27 {
28 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 atomic_long_inc(&ioc->refcount);
30 }
31 EXPORT_SYMBOL(get_io_context);
32
icq_free_icq_rcu(struct rcu_head * head)33 static void icq_free_icq_rcu(struct rcu_head *head)
34 {
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
38 }
39
40 /*
41 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
42 * and queue locked for legacy.
43 */
ioc_exit_icq(struct io_cq * icq)44 static void ioc_exit_icq(struct io_cq *icq)
45 {
46 struct elevator_type *et = icq->q->elevator->type;
47
48 if (icq->flags & ICQ_EXITED)
49 return;
50
51 if (et->uses_mq && et->ops.mq.exit_icq)
52 et->ops.mq.exit_icq(icq);
53 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
54 et->ops.sq.elevator_exit_icq_fn(icq);
55
56 icq->flags |= ICQ_EXITED;
57 }
58
59 /*
60 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
61 * and queue locked for legacy.
62 */
ioc_destroy_icq(struct io_cq * icq)63 static void ioc_destroy_icq(struct io_cq *icq)
64 {
65 struct io_context *ioc = icq->ioc;
66 struct request_queue *q = icq->q;
67 struct elevator_type *et = q->elevator->type;
68
69 lockdep_assert_held(&ioc->lock);
70
71 radix_tree_delete(&ioc->icq_tree, icq->q->id);
72 hlist_del_init(&icq->ioc_node);
73 list_del_init(&icq->q_node);
74
75 /*
76 * Both setting lookup hint to and clearing it from @icq are done
77 * under queue_lock. If it's not pointing to @icq now, it never
78 * will. Hint assignment itself can race safely.
79 */
80 if (rcu_access_pointer(ioc->icq_hint) == icq)
81 rcu_assign_pointer(ioc->icq_hint, NULL);
82
83 ioc_exit_icq(icq);
84
85 /*
86 * @icq->q might have gone away by the time RCU callback runs
87 * making it impossible to determine icq_cache. Record it in @icq.
88 */
89 icq->__rcu_icq_cache = et->icq_cache;
90 icq->flags |= ICQ_DESTROYED;
91 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
92 }
93
94 /*
95 * Slow path for ioc release in put_io_context(). Performs double-lock
96 * dancing to unlink all icq's and then frees ioc.
97 */
ioc_release_fn(struct work_struct * work)98 static void ioc_release_fn(struct work_struct *work)
99 {
100 struct io_context *ioc = container_of(work, struct io_context,
101 release_work);
102 unsigned long flags;
103
104 /*
105 * Exiting icq may call into put_io_context() through elevator
106 * which will trigger lockdep warning. The ioc's are guaranteed to
107 * be different, use a different locking subclass here. Use
108 * irqsave variant as there's no spin_lock_irq_nested().
109 */
110 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
111
112 while (!hlist_empty(&ioc->icq_list)) {
113 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
114 struct io_cq, ioc_node);
115 struct request_queue *q = icq->q;
116
117 if (spin_trylock(q->queue_lock)) {
118 ioc_destroy_icq(icq);
119 spin_unlock(q->queue_lock);
120 } else {
121 spin_unlock_irqrestore(&ioc->lock, flags);
122 cpu_relax();
123 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
124 }
125 }
126
127 spin_unlock_irqrestore(&ioc->lock, flags);
128
129 kmem_cache_free(iocontext_cachep, ioc);
130 }
131
132 /**
133 * put_io_context - put a reference of io_context
134 * @ioc: io_context to put
135 *
136 * Decrement reference count of @ioc and release it if the count reaches
137 * zero.
138 */
put_io_context(struct io_context * ioc)139 void put_io_context(struct io_context *ioc)
140 {
141 unsigned long flags;
142 bool free_ioc = false;
143
144 if (ioc == NULL)
145 return;
146
147 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
148
149 /*
150 * Releasing ioc requires reverse order double locking and we may
151 * already be holding a queue_lock. Do it asynchronously from wq.
152 */
153 if (atomic_long_dec_and_test(&ioc->refcount)) {
154 spin_lock_irqsave(&ioc->lock, flags);
155 if (!hlist_empty(&ioc->icq_list))
156 queue_work(system_power_efficient_wq,
157 &ioc->release_work);
158 else
159 free_ioc = true;
160 spin_unlock_irqrestore(&ioc->lock, flags);
161 }
162
163 if (free_ioc)
164 kmem_cache_free(iocontext_cachep, ioc);
165 }
166 EXPORT_SYMBOL(put_io_context);
167
168 /**
169 * put_io_context_active - put active reference on ioc
170 * @ioc: ioc of interest
171 *
172 * Undo get_io_context_active(). If active reference reaches zero after
173 * put, @ioc can never issue further IOs and ioscheds are notified.
174 */
put_io_context_active(struct io_context * ioc)175 void put_io_context_active(struct io_context *ioc)
176 {
177 struct elevator_type *et;
178 unsigned long flags;
179 struct io_cq *icq;
180
181 if (!atomic_dec_and_test(&ioc->active_ref)) {
182 put_io_context(ioc);
183 return;
184 }
185
186 /*
187 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
188 * reverse double locking. Read comment in ioc_release_fn() for
189 * explanation on the nested locking annotation.
190 */
191 retry:
192 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
193 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
194 if (icq->flags & ICQ_EXITED)
195 continue;
196
197 et = icq->q->elevator->type;
198 if (et->uses_mq) {
199 ioc_exit_icq(icq);
200 } else {
201 if (spin_trylock(icq->q->queue_lock)) {
202 ioc_exit_icq(icq);
203 spin_unlock(icq->q->queue_lock);
204 } else {
205 spin_unlock_irqrestore(&ioc->lock, flags);
206 cpu_relax();
207 goto retry;
208 }
209 }
210 }
211 spin_unlock_irqrestore(&ioc->lock, flags);
212
213 put_io_context(ioc);
214 }
215
216 /* Called by the exiting task */
exit_io_context(struct task_struct * task)217 void exit_io_context(struct task_struct *task)
218 {
219 struct io_context *ioc;
220
221 task_lock(task);
222 ioc = task->io_context;
223 task->io_context = NULL;
224 task_unlock(task);
225
226 atomic_dec(&ioc->nr_tasks);
227 put_io_context_active(ioc);
228 }
229
__ioc_clear_queue(struct list_head * icq_list)230 static void __ioc_clear_queue(struct list_head *icq_list)
231 {
232 unsigned long flags;
233
234 rcu_read_lock();
235 while (!list_empty(icq_list)) {
236 struct io_cq *icq = list_entry(icq_list->next,
237 struct io_cq, q_node);
238 struct io_context *ioc = icq->ioc;
239
240 spin_lock_irqsave(&ioc->lock, flags);
241 if (icq->flags & ICQ_DESTROYED) {
242 spin_unlock_irqrestore(&ioc->lock, flags);
243 continue;
244 }
245 ioc_destroy_icq(icq);
246 spin_unlock_irqrestore(&ioc->lock, flags);
247 }
248 rcu_read_unlock();
249 }
250
251 /**
252 * ioc_clear_queue - break any ioc association with the specified queue
253 * @q: request_queue being cleared
254 *
255 * Walk @q->icq_list and exit all io_cq's.
256 */
ioc_clear_queue(struct request_queue * q)257 void ioc_clear_queue(struct request_queue *q)
258 {
259 LIST_HEAD(icq_list);
260
261 spin_lock_irq(q->queue_lock);
262 list_splice_init(&q->icq_list, &icq_list);
263
264 if (q->mq_ops) {
265 spin_unlock_irq(q->queue_lock);
266 __ioc_clear_queue(&icq_list);
267 } else {
268 __ioc_clear_queue(&icq_list);
269 spin_unlock_irq(q->queue_lock);
270 }
271 }
272
create_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)273 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
274 {
275 struct io_context *ioc;
276 int ret;
277
278 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
279 node);
280 if (unlikely(!ioc))
281 return -ENOMEM;
282
283 /* initialize */
284 atomic_long_set(&ioc->refcount, 1);
285 atomic_set(&ioc->nr_tasks, 1);
286 atomic_set(&ioc->active_ref, 1);
287 spin_lock_init(&ioc->lock);
288 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
289 INIT_HLIST_HEAD(&ioc->icq_list);
290 INIT_WORK(&ioc->release_work, ioc_release_fn);
291
292 /*
293 * Try to install. ioc shouldn't be installed if someone else
294 * already did or @task, which isn't %current, is exiting. Note
295 * that we need to allow ioc creation on exiting %current as exit
296 * path may issue IOs from e.g. exit_files(). The exit path is
297 * responsible for not issuing IO after exit_io_context().
298 */
299 task_lock(task);
300 if (!task->io_context &&
301 (task == current || !(task->flags & PF_EXITING)))
302 task->io_context = ioc;
303 else
304 kmem_cache_free(iocontext_cachep, ioc);
305
306 ret = task->io_context ? 0 : -EBUSY;
307
308 task_unlock(task);
309
310 return ret;
311 }
312
313 /**
314 * get_task_io_context - get io_context of a task
315 * @task: task of interest
316 * @gfp_flags: allocation flags, used if allocation is necessary
317 * @node: allocation node, used if allocation is necessary
318 *
319 * Return io_context of @task. If it doesn't exist, it is created with
320 * @gfp_flags and @node. The returned io_context has its reference count
321 * incremented.
322 *
323 * This function always goes through task_lock() and it's better to use
324 * %current->io_context + get_io_context() for %current.
325 */
get_task_io_context(struct task_struct * task,gfp_t gfp_flags,int node)326 struct io_context *get_task_io_context(struct task_struct *task,
327 gfp_t gfp_flags, int node)
328 {
329 struct io_context *ioc;
330
331 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
332
333 do {
334 task_lock(task);
335 ioc = task->io_context;
336 if (likely(ioc)) {
337 get_io_context(ioc);
338 task_unlock(task);
339 return ioc;
340 }
341 task_unlock(task);
342 } while (!create_task_io_context(task, gfp_flags, node));
343
344 return NULL;
345 }
346 EXPORT_SYMBOL(get_task_io_context);
347
348 /**
349 * ioc_lookup_icq - lookup io_cq from ioc
350 * @ioc: the associated io_context
351 * @q: the associated request_queue
352 *
353 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
354 * with @q->queue_lock held.
355 */
ioc_lookup_icq(struct io_context * ioc,struct request_queue * q)356 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
357 {
358 struct io_cq *icq;
359
360 lockdep_assert_held(q->queue_lock);
361
362 /*
363 * icq's are indexed from @ioc using radix tree and hint pointer,
364 * both of which are protected with RCU. All removals are done
365 * holding both q and ioc locks, and we're holding q lock - if we
366 * find a icq which points to us, it's guaranteed to be valid.
367 */
368 rcu_read_lock();
369 icq = rcu_dereference(ioc->icq_hint);
370 if (icq && icq->q == q)
371 goto out;
372
373 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
374 if (icq && icq->q == q)
375 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
376 else
377 icq = NULL;
378 out:
379 rcu_read_unlock();
380 return icq;
381 }
382 EXPORT_SYMBOL(ioc_lookup_icq);
383
384 /**
385 * ioc_create_icq - create and link io_cq
386 * @ioc: io_context of interest
387 * @q: request_queue of interest
388 * @gfp_mask: allocation mask
389 *
390 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
391 * will be created using @gfp_mask.
392 *
393 * The caller is responsible for ensuring @ioc won't go away and @q is
394 * alive and will stay alive until this function returns.
395 */
ioc_create_icq(struct io_context * ioc,struct request_queue * q,gfp_t gfp_mask)396 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
397 gfp_t gfp_mask)
398 {
399 struct elevator_type *et = q->elevator->type;
400 struct io_cq *icq;
401
402 /* allocate stuff */
403 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
404 q->node);
405 if (!icq)
406 return NULL;
407
408 if (radix_tree_maybe_preload(gfp_mask) < 0) {
409 kmem_cache_free(et->icq_cache, icq);
410 return NULL;
411 }
412
413 icq->ioc = ioc;
414 icq->q = q;
415 INIT_LIST_HEAD(&icq->q_node);
416 INIT_HLIST_NODE(&icq->ioc_node);
417
418 /* lock both q and ioc and try to link @icq */
419 spin_lock_irq(q->queue_lock);
420 spin_lock(&ioc->lock);
421
422 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
423 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
424 list_add(&icq->q_node, &q->icq_list);
425 if (et->uses_mq && et->ops.mq.init_icq)
426 et->ops.mq.init_icq(icq);
427 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
428 et->ops.sq.elevator_init_icq_fn(icq);
429 } else {
430 kmem_cache_free(et->icq_cache, icq);
431 icq = ioc_lookup_icq(ioc, q);
432 if (!icq)
433 printk(KERN_ERR "cfq: icq link failed!\n");
434 }
435
436 spin_unlock(&ioc->lock);
437 spin_unlock_irq(q->queue_lock);
438 radix_tree_preload_end();
439 return icq;
440 }
441
blk_ioc_init(void)442 static int __init blk_ioc_init(void)
443 {
444 iocontext_cachep = kmem_cache_create("blkdev_ioc",
445 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
446 return 0;
447 }
448 subsys_initcall(blk_ioc_init);
449