Lines Matching refs:dio

115 struct dio {  struct
166 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) in dio_refill_pages() argument
170 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages()
173 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { in dio_refill_pages()
180 if (dio->page_errors == 0) in dio_refill_pages()
181 dio->page_errors = ret; in dio_refill_pages()
183 dio->pages[0] = page; in dio_refill_pages()
208 static inline struct page *dio_get_page(struct dio *dio, in dio_get_page() argument
214 ret = dio_refill_pages(dio, sdio); in dio_get_page()
219 return dio->pages[sdio->head]; in dio_get_page()
255 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) in dio_complete() argument
257 loff_t offset = dio->iocb->ki_pos; in dio_complete()
270 if (dio->result) { in dio_complete()
271 transferred = dio->result; in dio_complete()
274 if ((dio->op == REQ_OP_READ) && in dio_complete()
275 ((offset + transferred) > dio->i_size)) in dio_complete()
276 transferred = dio->i_size - offset; in dio_complete()
283 ret = dio->page_errors; in dio_complete()
285 ret = dio->io_error; in dio_complete()
289 if (dio->end_io) { in dio_complete()
291 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete()
309 ret > 0 && dio->op == REQ_OP_WRITE && in dio_complete()
310 dio->inode->i_mapping->nrpages) { in dio_complete()
311 err = invalidate_inode_pages2_range(dio->inode->i_mapping, in dio_complete()
315 dio_warn_stale_pagecache(dio->iocb->ki_filp); in dio_complete()
318 inode_dio_end(dio->inode); in dio_complete()
326 dio->iocb->ki_pos += transferred; in dio_complete()
328 if (ret > 0 && dio->op == REQ_OP_WRITE) in dio_complete()
329 ret = generic_write_sync(dio->iocb, ret); in dio_complete()
330 dio->iocb->ki_complete(dio->iocb, ret, 0); in dio_complete()
333 kmem_cache_free(dio_cache, dio); in dio_complete()
339 struct dio *dio = container_of(work, struct dio, complete_work); in dio_aio_complete_work() local
341 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); in dio_aio_complete_work()
344 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
351 struct dio *dio = bio->bi_private; in dio_bio_end_aio() local
357 dio_bio_complete(dio, bio); in dio_bio_end_aio()
359 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
360 remaining = --dio->refcount; in dio_bio_end_aio()
361 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
362 wake_up_process(dio->waiter); in dio_bio_end_aio()
363 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
374 if (dio->result) in dio_bio_end_aio()
375 defer_completion = dio->defer_completion || in dio_bio_end_aio()
376 (dio->op == REQ_OP_WRITE && in dio_bio_end_aio()
377 dio->inode->i_mapping->nrpages); in dio_bio_end_aio()
379 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
380 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
381 &dio->complete_work); in dio_bio_end_aio()
383 dio_complete(dio, 0, DIO_COMPLETE_ASYNC); in dio_bio_end_aio()
397 struct dio *dio = bio->bi_private; in dio_bio_end_io() local
400 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
401 bio->bi_private = dio->bio_list; in dio_bio_end_io()
402 dio->bio_list = bio; in dio_bio_end_io()
403 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
404 wake_up_process(dio->waiter); in dio_bio_end_io()
405 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
418 struct dio *dio = bio->bi_private; in dio_end_io() local
420 if (dio->is_async) in dio_end_io()
428 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, in dio_bio_alloc() argument
442 bio_set_op_attrs(bio, dio->op, dio->op_flags); in dio_bio_alloc()
443 if (dio->is_async) in dio_bio_alloc()
448 bio->bi_write_hint = dio->iocb->ki_hint; in dio_bio_alloc()
461 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) in dio_bio_submit() argument
466 bio->bi_private = dio; in dio_bio_submit()
468 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
469 dio->refcount++; in dio_bio_submit()
470 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
472 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit()
475 dio->bio_disk = bio->bi_disk; in dio_bio_submit()
478 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); in dio_bio_submit()
479 dio->bio_cookie = BLK_QC_T_NONE; in dio_bio_submit()
481 dio->bio_cookie = submit_bio(bio); in dio_bio_submit()
491 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) in dio_cleanup() argument
494 put_page(dio->pages[sdio->head++]); in dio_cleanup()
503 static struct bio *dio_await_one(struct dio *dio) in dio_await_one() argument
508 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
516 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
518 dio->waiter = current; in dio_await_one()
519 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
520 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || in dio_await_one()
521 !blk_poll(dio->bio_disk->queue, dio->bio_cookie)) in dio_await_one()
524 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
525 dio->waiter = NULL; in dio_await_one()
527 if (dio->bio_list) { in dio_await_one()
528 bio = dio->bio_list; in dio_await_one()
529 dio->bio_list = bio->bi_private; in dio_await_one()
531 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
538 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) in dio_bio_complete() argument
546 dio->io_error = -EAGAIN; in dio_bio_complete()
548 dio->io_error = -EIO; in dio_bio_complete()
551 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) { in dio_bio_complete()
557 if (dio->op == REQ_OP_READ && !PageCompound(page) && in dio_bio_complete()
558 dio->should_dirty) in dio_bio_complete()
574 static void dio_await_completion(struct dio *dio) in dio_await_completion() argument
578 bio = dio_await_one(dio); in dio_await_completion()
580 dio_bio_complete(dio, bio); in dio_await_completion()
591 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) in dio_bio_reap() argument
596 while (dio->bio_list) { in dio_bio_reap()
601 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
602 bio = dio->bio_list; in dio_bio_reap()
603 dio->bio_list = bio->bi_private; in dio_bio_reap()
604 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
605 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); in dio_bio_reap()
638 static int dio_set_defer_completion(struct dio *dio) in dio_set_defer_completion() argument
640 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
642 if (dio->defer_completion) in dio_set_defer_completion()
644 dio->defer_completion = true; in dio_set_defer_completion()
673 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, in get_more_blocks() argument
688 ret = dio->page_errors; in get_more_blocks()
710 create = dio->op == REQ_OP_WRITE; in get_more_blocks()
711 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
712 i_size = i_size_read(dio->inode); in get_more_blocks()
717 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
721 dio->private = map_bh->b_private; in get_more_blocks()
724 ret = dio_set_defer_completion(dio); in get_more_blocks()
732 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, in dio_new_bio() argument
738 ret = dio_bio_reap(dio, sdio); in dio_new_bio()
744 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
789 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, in dio_send_cur_page() argument
815 dio_bio_submit(dio, sdio); in dio_send_cur_page()
819 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
825 dio_bio_submit(dio, sdio); in dio_send_cur_page()
826 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
854 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, in submit_page_section() argument
861 if (dio->op == REQ_OP_WRITE) { in submit_page_section()
883 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
902 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
904 dio_bio_submit(dio, sdio); in submit_page_section()
920 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, in dio_zero_block() argument
948 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, in dio_zero_block()
971 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, in do_direct_IO() argument
982 page = dio_get_page(dio, sdio); in do_direct_IO()
1003 ret = get_more_blocks(dio, sdio, map_bh); in do_direct_IO()
1049 if (dio->op == REQ_OP_WRITE) { in do_direct_IO()
1058 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
1069 dio->result += 1 << blkbits; in do_direct_IO()
1079 dio_zero_block(dio, sdio, 0, map_bh); in do_direct_IO()
1097 ret = submit_page_section(dio, sdio, page, in do_direct_IO()
1110 dio->result += this_chunk_bytes; in do_direct_IO()
1125 static inline int drop_refcount(struct dio *dio) in drop_refcount() argument
1141 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1142 ret2 = --dio->refcount; in drop_refcount()
1143 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1185 struct dio *dio; in do_blockdev_direct_IO() local
1208 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); in do_blockdev_direct_IO()
1210 if (!dio) in do_blockdev_direct_IO()
1217 memset(dio, 0, offsetof(struct dio, pages)); in do_blockdev_direct_IO()
1219 dio->flags = flags; in do_blockdev_direct_IO()
1220 if (dio->flags & DIO_LOCKING) { in do_blockdev_direct_IO()
1232 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1239 dio->i_size = i_size_read(inode); in do_blockdev_direct_IO()
1240 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO()
1241 if (dio->flags & DIO_LOCKING) in do_blockdev_direct_IO()
1243 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1255 dio->is_async = false; in do_blockdev_direct_IO()
1257 dio->is_async = false; in do_blockdev_direct_IO()
1259 dio->is_async = true; in do_blockdev_direct_IO()
1261 dio->inode = inode; in do_blockdev_direct_IO()
1263 dio->op = REQ_OP_WRITE; in do_blockdev_direct_IO()
1264 dio->op_flags = REQ_SYNC | REQ_IDLE; in do_blockdev_direct_IO()
1266 dio->op_flags |= REQ_NOWAIT; in do_blockdev_direct_IO()
1268 dio->op = REQ_OP_READ; in do_blockdev_direct_IO()
1275 if (dio->is_async && iov_iter_rw(iter) == WRITE) { in do_blockdev_direct_IO()
1278 retval = dio_set_defer_completion(dio); in do_blockdev_direct_IO()
1279 else if (!dio->inode->i_sb->s_dio_done_wq) { in do_blockdev_direct_IO()
1285 retval = sb_init_dio_done_wq(dio->inode->i_sb); in do_blockdev_direct_IO()
1292 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1308 dio->end_io = end_io; in do_blockdev_direct_IO()
1313 dio->iocb = iocb; in do_blockdev_direct_IO()
1315 spin_lock_init(&dio->bio_lock); in do_blockdev_direct_IO()
1316 dio->refcount = 1; in do_blockdev_direct_IO()
1318 dio->should_dirty = (iter->type == ITER_IOVEC); in do_blockdev_direct_IO()
1333 retval = do_direct_IO(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1335 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1348 dio_zero_block(dio, &sdio, 1, &map_bh); in do_blockdev_direct_IO()
1353 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1360 dio_bio_submit(dio, &sdio); in do_blockdev_direct_IO()
1368 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1375 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in do_blockdev_direct_IO()
1376 inode_unlock(dio->inode); in do_blockdev_direct_IO()
1386 if (dio->is_async && retval == 0 && dio->result && in do_blockdev_direct_IO()
1387 (iov_iter_rw(iter) == READ || dio->result == count)) in do_blockdev_direct_IO()
1390 dio_await_completion(dio); in do_blockdev_direct_IO()
1392 if (drop_refcount(dio) == 0) { in do_blockdev_direct_IO()
1393 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); in do_blockdev_direct_IO()
1427 dio_cache = KMEM_CACHE(dio, SLAB_PANIC); in dio_init()