Lines Matching refs:dio

1481 static ssize_t iomap_dio_complete(struct iomap_dio *dio)  in iomap_dio_complete()  argument
1483 struct kiocb *iocb = dio->iocb; in iomap_dio_complete()
1488 if (dio->end_io) { in iomap_dio_complete()
1489 ret = dio->end_io(iocb, in iomap_dio_complete()
1490 dio->error ? dio->error : dio->size, in iomap_dio_complete()
1491 dio->flags); in iomap_dio_complete()
1493 ret = dio->error; in iomap_dio_complete()
1497 ret = dio->size; in iomap_dio_complete()
1499 if (offset + ret > dio->i_size && in iomap_dio_complete()
1500 !(dio->flags & IOMAP_DIO_WRITE)) in iomap_dio_complete()
1501 ret = dio->i_size - offset; in iomap_dio_complete()
1517 if (!dio->error && in iomap_dio_complete()
1518 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { in iomap_dio_complete()
1522 (offset + dio->size - 1) >> PAGE_SHIFT); in iomap_dio_complete()
1531 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) in iomap_dio_complete()
1535 kfree(dio); in iomap_dio_complete()
1542 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); in iomap_dio_complete_work() local
1543 struct kiocb *iocb = dio->iocb; in iomap_dio_complete_work()
1545 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); in iomap_dio_complete_work()
1553 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) in iomap_dio_set_error() argument
1555 cmpxchg(&dio->error, 0, ret); in iomap_dio_set_error()
1560 struct iomap_dio *dio = bio->bi_private; in iomap_dio_bio_end_io() local
1561 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); in iomap_dio_bio_end_io()
1564 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); in iomap_dio_bio_end_io()
1566 if (atomic_dec_and_test(&dio->ref)) { in iomap_dio_bio_end_io()
1567 if (dio->wait_for_completion) { in iomap_dio_bio_end_io()
1568 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_bio_end_io()
1569 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_bio_end_io()
1571 } else if (dio->flags & IOMAP_DIO_WRITE) { in iomap_dio_bio_end_io()
1572 struct inode *inode = file_inode(dio->iocb->ki_filp); in iomap_dio_bio_end_io()
1574 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); in iomap_dio_bio_end_io()
1575 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); in iomap_dio_bio_end_io()
1577 iomap_dio_complete_work(&dio->aio.work); in iomap_dio_bio_end_io()
1594 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, in iomap_dio_zero() argument
1603 bio->bi_private = dio; in iomap_dio_zero()
1610 atomic_inc(&dio->ref); in iomap_dio_zero()
1616 struct iomap_dio *dio, struct iomap *iomap) in iomap_dio_bio_actor() argument
1620 unsigned int align = iov_iter_alignment(dio->submit.iter); in iomap_dio_bio_actor()
1632 dio->flags |= IOMAP_DIO_UNWRITTEN; in iomap_dio_bio_actor()
1637 dio->flags |= IOMAP_DIO_COW; in iomap_dio_bio_actor()
1650 (dio->flags & IOMAP_DIO_WRITE_FUA) && in iomap_dio_bio_actor()
1659 iter = *dio->submit.iter; in iomap_dio_bio_actor()
1670 iomap_dio_zero(dio, iomap, pos - pad, pad); in iomap_dio_bio_actor()
1675 if (dio->error) { in iomap_dio_bio_actor()
1676 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_actor()
1683 bio->bi_write_hint = dio->iocb->ki_hint; in iomap_dio_bio_actor()
1684 bio->bi_ioprio = dio->iocb->ki_ioprio; in iomap_dio_bio_actor()
1685 bio->bi_private = dio; in iomap_dio_bio_actor()
1701 if (dio->flags & IOMAP_DIO_WRITE) { in iomap_dio_bio_actor()
1706 dio->flags &= ~IOMAP_DIO_WRITE_FUA; in iomap_dio_bio_actor()
1710 if (dio->flags & IOMAP_DIO_DIRTY) in iomap_dio_bio_actor()
1714 iov_iter_advance(dio->submit.iter, n); in iomap_dio_bio_actor()
1716 dio->size += n; in iomap_dio_bio_actor()
1722 atomic_inc(&dio->ref); in iomap_dio_bio_actor()
1724 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_bio_actor()
1725 dio->submit.cookie = submit_bio(bio); in iomap_dio_bio_actor()
1736 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { in iomap_dio_bio_actor()
1740 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); in iomap_dio_bio_actor()
1746 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) in iomap_dio_hole_actor() argument
1748 length = iov_iter_zero(length, dio->submit.iter); in iomap_dio_hole_actor()
1749 dio->size += length; in iomap_dio_hole_actor()
1755 struct iomap_dio *dio, struct iomap *iomap) in iomap_dio_inline_actor() argument
1757 struct iov_iter *iter = dio->submit.iter; in iomap_dio_inline_actor()
1762 if (dio->flags & IOMAP_DIO_WRITE) { in iomap_dio_inline_actor()
1776 dio->size += copied; in iomap_dio_inline_actor()
1784 struct iomap_dio *dio = data; in iomap_dio_actor() local
1788 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) in iomap_dio_actor()
1790 return iomap_dio_hole_actor(length, dio); in iomap_dio_actor()
1792 if (!(dio->flags & IOMAP_DIO_WRITE)) in iomap_dio_actor()
1793 return iomap_dio_hole_actor(length, dio); in iomap_dio_actor()
1794 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); in iomap_dio_actor()
1796 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); in iomap_dio_actor()
1798 return iomap_dio_inline_actor(inode, pos, length, dio, iomap); in iomap_dio_actor()
1826 struct iomap_dio *dio; in iomap_dio_rw() local
1833 dio = kmalloc(sizeof(*dio), GFP_KERNEL); in iomap_dio_rw()
1834 if (!dio) in iomap_dio_rw()
1837 dio->iocb = iocb; in iomap_dio_rw()
1838 atomic_set(&dio->ref, 1); in iomap_dio_rw()
1839 dio->size = 0; in iomap_dio_rw()
1840 dio->i_size = i_size_read(inode); in iomap_dio_rw()
1841 dio->end_io = end_io; in iomap_dio_rw()
1842 dio->error = 0; in iomap_dio_rw()
1843 dio->flags = 0; in iomap_dio_rw()
1845 dio->submit.iter = iter; in iomap_dio_rw()
1846 dio->submit.waiter = current; in iomap_dio_rw()
1847 dio->submit.cookie = BLK_QC_T_NONE; in iomap_dio_rw()
1848 dio->submit.last_queue = NULL; in iomap_dio_rw()
1851 if (pos >= dio->i_size) in iomap_dio_rw()
1855 dio->flags |= IOMAP_DIO_DIRTY; in iomap_dio_rw()
1858 dio->flags |= IOMAP_DIO_WRITE; in iomap_dio_rw()
1862 dio->flags |= IOMAP_DIO_NEED_SYNC; in iomap_dio_rw()
1871 dio->flags |= IOMAP_DIO_WRITE_FUA; in iomap_dio_rw()
1909 ret = iomap_apply(inode, pos, count, flags, ops, dio, in iomap_dio_rw()
1921 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) { in iomap_dio_rw()
1927 iov_iter_revert(iter, pos - dio->i_size); in iomap_dio_rw()
1934 iomap_dio_set_error(dio, ret); in iomap_dio_rw()
1940 if (dio->flags & IOMAP_DIO_WRITE_FUA) in iomap_dio_rw()
1941 dio->flags &= ~IOMAP_DIO_NEED_SYNC; in iomap_dio_rw()
1958 dio->wait_for_completion = wait_for_completion; in iomap_dio_rw()
1959 if (!atomic_dec_and_test(&dio->ref)) { in iomap_dio_rw()
1965 if (!READ_ONCE(dio->submit.waiter)) in iomap_dio_rw()
1969 !dio->submit.last_queue || in iomap_dio_rw()
1970 !blk_poll(dio->submit.last_queue, in iomap_dio_rw()
1971 dio->submit.cookie)) in iomap_dio_rw()
1977 return iomap_dio_complete(dio); in iomap_dio_rw()
1980 kfree(dio); in iomap_dio_rw()