/linux-4.19.296/lib/ |
D | iov_iter.c | 16 __v.iov_len = min(n, __p->iov_len - skip); \ 17 if (likely(__v.iov_len)) { \ 20 __v.iov_len -= left; \ 21 skip += __v.iov_len; \ 22 n -= __v.iov_len; \ 28 __v.iov_len = min(n, __p->iov_len); \ 29 if (unlikely(!__v.iov_len)) \ 33 __v.iov_len -= left; \ 34 skip = __v.iov_len; \ 35 n -= __v.iov_len; \ [all …]
|
/linux-4.19.296/fs/cifs/ |
D | smb2transport.c | 214 if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { in smb2_calc_signature() 216 iov[0].iov_len); in smb2_calc_signature() 273 label.iov_base, label.iov_len); in generate_key() 287 context.iov_base, context.iov_len); in generate_key() 378 d->label.iov_len = 12; in generate_smb30signingkey() 380 d->context.iov_len = 8; in generate_smb30signingkey() 384 d->label.iov_len = 11; in generate_smb30signingkey() 386 d->context.iov_len = 10; in generate_smb30signingkey() 390 d->label.iov_len = 11; in generate_smb30signingkey() 392 d->context.iov_len = 10; in generate_smb30signingkey() [all …]
|
D | transport.c | 239 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { in smb_rqst_len() 249 buflen += iov[i].iov_len; in smb_rqst_len() 311 .iov_len = 4 in __smb_send_rqst() 331 dump_smb(iov[i].iov_base, iov[i].iov_len); in __smb_send_rqst() 332 size += iov[i].iov_len; in __smb_send_rqst() 420 iov.iov_len = sizeof(*tr_hdr); in smb_send_rqst() 445 iov[0].iov_len = 4; in smb_send() 447 iov[1].iov_len = smb_buf_length; in smb_send() 577 if (rqst->rq_iov[0].iov_len != 4 || in cifs_setup_async_request() 683 iov[0].iov_len = get_rfc1002_length(in_buf) + 4; in SendReceiveNoRsp() [all …]
|
D | sess.c | 588 sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; in sess_alloc_buffer() 607 sess_data->iov[0].iov_len = 0; in sess_alloc_buffer() 661 count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; in sess_sendreceive() 740 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_lanman() 841 if (sess_data->iov[0].iov_len % 2) { in sess_auth_ntlm() 851 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_ntlm() 949 if (sess_data->iov[0].iov_len % 2) { in sess_auth_ntlmv2() 959 sess_data->iov[2].iov_len = (long) bcc_ptr - in sess_auth_ntlmv2() 1068 sess_data->iov[1].iov_len = msg->secblob_len; in sess_auth_kerberos() 1069 pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); in sess_auth_kerberos() [all …]
|
D | smb2pdu.c | 605 iov[num].iov_len = sizeof(struct create_posix); in add_posix_context() 609 iov[num - 1].iov_len); in add_posix_context() 703 iov[0].iov_len = total_len; in SMB2_negotiate() 832 rsp_iov.iov_len); in SMB2_negotiate() 1056 sess_data->iov[0].iov_len = total_len - 1; in SMB2_sess_alloc_buffer() 1084 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); in SMB2_sess_sendreceive() 1179 sess_data->iov[1].iov_len = msg->secblob_len; in SMB2_auth_kerberos() 1255 sess_data->iov[1].iov_len = blob_length; in SMB2_sess_auth_rawntlmssp_negotiate() 1333 sess_data->iov[1].iov_len = blob_length; in SMB2_sess_auth_rawntlmssp_authenticate() 1466 iov[0].iov_len = total_len; in SMB2_logoff() [all …]
|
D | cifsencrypt.c | 51 if (iov[0].iov_len <= 4) in __cifs_calc_signature() 55 if (n_vec < 2 || iov[0].iov_len != 4) in __cifs_calc_signature() 61 if (iov[i].iov_len == 0) in __cifs_calc_signature() 69 iov[i].iov_base, iov[i].iov_len); in __cifs_calc_signature() 149 if (rqst->rq_iov[0].iov_len != 4 || in cifs_sign_rqst() 197 iov[0].iov_len = 4; in cifs_sign_smb() 199 iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length); in cifs_sign_smb() 214 if (rqst->rq_iov[0].iov_len != 4 || in cifs_verify_signature()
|
D | smbdirect.c | 1175 data_length += iov[i].iov_len; in smbd_post_send_data() 1176 sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len); in smbd_post_send_data() 2138 to_read = msg->msg_iter.kvec->iov_len; in smbd_recv() 2219 dump_smb(iov[i].iov_base, iov[i].iov_len); in smbd_send() 2230 buflen += iov[i].iov_len; in smbd_send() 2234 (buflen-iov[i].iov_len); in smbd_send() 2256 vec.iov_len = max_iov_size; in smbd_send() 2258 vec.iov_len = in smbd_send() 2261 remaining_data_length -= vec.iov_len; in smbd_send() 2266 j, vec.iov_base, vec.iov_len, in smbd_send()
|
/linux-4.19.296/include/linux/sunrpc/ |
D | svc.h | 190 iov->iov_len -= sizeof(__be32); in svc_getnl() 196 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putnl() 198 iov->iov_len += sizeof(__be32); in svc_putnl() 207 iov->iov_len -= sizeof(__be32); in svc_getu32() 215 iov->iov_len += sizeof(*vp); in svc_ungetu32() 220 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putu32() 222 iov->iov_len += sizeof(__be32); in svc_putu32() 348 && cp <= (char*)vec->iov_base + vec->iov_len; in xdr_argsize_check() 357 vec->iov_len = cp - (char*)vec->iov_base; in xdr_ressize_check() 359 return vec->iov_len <= PAGE_SIZE; in xdr_ressize_check()
|
D | xdr.h | 69 buf->head[0].iov_len = len; in xdr_buf_init() 70 buf->tail[0].iov_len = 0; in xdr_buf_init() 153 return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base); in xdr_adjust_iovec()
|
/linux-4.19.296/fs/nfsd/ |
D | nfscache.c | 141 drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_reply_cache_free_locked() 299 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum() 301 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum() 452 drc_mem_usage -= rp->c_replvec.iov_len; in nfsd_cache_lookup() 530 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update() 553 cachv->iov_len = bufsize; in nfsd_cache_update() 580 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append() 582 data->iov_len); in nfsd_cache_append() 585 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append() 586 vec->iov_len += data->iov_len; in nfsd_cache_append()
|
D | nfsxdr.c | 260 rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); in nfssvc_decode_readargs() 261 len -= rqstp->rq_vec[v].iov_len; in nfssvc_decode_readargs() 294 if (hdr > head->iov_len) in nfssvc_decode_writeargs() 296 dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; in nfssvc_decode_writeargs() 310 args->first.iov_len = head->iov_len - hdr; in nfssvc_decode_writeargs() 383 args->first.iov_len = rqstp->rq_arg.head[0].iov_len; in nfssvc_decode_symlinkargs() 384 args->first.iov_len -= (char *)p - base; in nfssvc_decode_symlinkargs() 397 if (xdrlen > args->first.iov_len - (8 * sizeof(__be32))) in nfssvc_decode_symlinkargs() 462 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); in nfssvc_encode_readlinkres() 482 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); in nfssvc_encode_readres()
|
D | nfs3xdr.c | 386 rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); in nfs3svc_decode_readargs() 387 len -= rqstp->rq_vec[v].iov_len; in nfs3svc_decode_readargs() 411 if ((void *)p > head->iov_base + head->iov_len) in nfs3svc_decode_writeargs() 424 dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; in nfs3svc_decode_writeargs() 442 args->first.iov_len = head->iov_len - hdr; in nfs3svc_decode_writeargs() 499 args->first.iov_len = rqstp->rq_arg.head[0].iov_len; in nfs3svc_decode_symlinkargs() 500 args->first.iov_len -= (char *)p - base; in nfs3svc_decode_symlinkargs() 502 dlen = args->first.iov_len + rqstp->rq_arg.page_len + in nfs3svc_decode_symlinkargs() 503 rqstp->rq_arg.tail[0].iov_len; in nfs3svc_decode_symlinkargs() 706 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); in nfs3svc_encode_readlinkres() [all …]
|
/linux-4.19.296/include/linux/ |
D | uio.h | 21 size_t iov_len; member 63 ret += iov[seg].iov_len; in iov_length() 71 .iov_len = min(iter->count, in iov_iter_iovec() 72 iter->iov->iov_len - iter->iov_offset), in iov_iter_iovec() 81 iov_iter_advance(&(iter), (iov).iov_len))
|
D | vringh.h | 129 iov->iov[iov->i].iov_len += iov->consumed; in vringh_iov_reset() 191 kiov->iov[kiov->i].iov_len += kiov->consumed; in vringh_kiov_reset()
|
/linux-4.19.296/drivers/xen/xenbus/ |
D | xenbus_xs.c | 286 vec->iov_len = msg->len; in xenbus_dev_request_and_reply() 328 msg.len += iovec[i].iov_len; in xs_talkv() 363 iovec.iov_len = strlen(string) + 1; in xs_single() 491 iovec[0].iov_len = strlen(path) + 1; in xenbus_write() 493 iovec[1].iov_len = strlen(string); in xenbus_write() 663 iov[0].iov_len = strlen(path) + 1; in xs_watch() 665 iov[1].iov_len = strlen(token) + 1; in xs_watch() 676 iov[0].iov_len = strlen(path) + 1; in xs_unwatch() 678 iov[1].iov_len = strlen(token) + 1; in xs_unwatch()
|
/linux-4.19.296/fs/jffs2/ |
D | write.c | 78 vecs[0].iov_len = sizeof(*ri); in jffs2_write_dnode() 80 vecs[1].iov_len = datalen; in jffs2_write_dnode() 98 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); in jffs2_write_dnode() 238 vecs[0].iov_len = sizeof(*rd); in jffs2_write_dirent() 240 vecs[1].iov_len = namelen; in jffs2_write_dirent() 256 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); in jffs2_write_dirent()
|
D | writev.c | 43 vecs[0].iov_len = len; in jffs2_flash_direct_write()
|
/linux-4.19.296/include/uapi/linux/ |
D | uio.h | 20 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ member
|
/linux-4.19.296/fs/9p/ |
D | xattr.c | 31 struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; in v9fs_fid_xattr_get() 106 struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; in v9fs_fid_xattr_set()
|
/linux-4.19.296/drivers/misc/mic/vop/ |
D | vop_vringh.c | 695 total += iov->iov[i].iov_len; in vop_vringh_iov_consumed() 716 partlen = min(kiov->iov_len, len); in vop_vringh_copy() 720 kiov->iov_len, in vop_vringh_copy() 725 kiov->iov_len, in vop_vringh_copy() 736 kiov->iov_len -= partlen; in vop_vringh_copy() 738 if (!kiov->iov_len) { in vop_vringh_copy() 740 kiov->iov_len = iov->consumed; in vop_vringh_copy() 789 len = iov.iov_len; in _vop_virtio_copy()
|
/linux-4.19.296/fs/ceph/ |
D | mds_client.c | 337 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); in parse_reply_info() 967 end = p + msg->front.iov_len; in create_session_open_msg() 1002 msg->front.iov_len = p - msg->front.iov_base; in create_session_open_msg() 1003 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in create_session_open_msg() 1711 msg->front.iov_len = sizeof(*head); in ceph_send_cap_releases() 1724 item = msg->front.iov_base + msg->front.iov_len; in ceph_send_cap_releases() 1729 msg->front.iov_len += sizeof(*item); in ceph_send_cap_releases() 1735 cap_barrier = msg->front.iov_base + msg->front.iov_len; in ceph_send_cap_releases() 1737 msg->front.iov_len += sizeof(*cap_barrier); in ceph_send_cap_releases() 1739 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); in ceph_send_cap_releases() [all …]
|
D | quota.c | 51 if (msg->front.iov_len < sizeof(*h)) { in ceph_handle_quota() 53 session->s_mds, (int)msg->front.iov_len); in ceph_handle_quota()
|
/linux-4.19.296/drivers/xen/ |
D | pvcalls-back.c | 140 vec[0].iov_len = wanted; in pvcalls_conn_back_read() 144 vec[0].iov_len = array_size - masked_prod; in pvcalls_conn_back_read() 146 vec[1].iov_len = wanted - vec[0].iov_len; in pvcalls_conn_back_read() 199 vec[0].iov_len = size; in pvcalls_conn_back_write() 203 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); in pvcalls_conn_back_write() 205 vec[1].iov_len = size - vec[0].iov_len; in pvcalls_conn_back_write()
|
/linux-4.19.296/include/scsi/ |
D | sg.h | 40 size_t iov_len; /* Length in bytes */ member
|
/linux-4.19.296/fs/ |
D | read_write.c | 397 struct iovec iov = { .iov_base = buf, .iov_len = len }; in new_sync_read() 465 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; in new_sync_write() 702 iovec.iov_len, ppos); in do_loop_readv_writev() 705 iovec.iov_len, ppos); in do_loop_readv_writev() 714 if (nr != iovec.iov_len) in do_loop_readv_writev() 805 ssize_t len = (ssize_t)iov[seg].iov_len; in rw_copy_check_uvector() 820 iov[seg].iov_len = len; in rw_copy_check_uvector() 877 if (__get_user(len, &uvector->iov_len) || in compat_rw_copy_check_uvector() 893 iov->iov_len = (compat_size_t) len; in compat_rw_copy_check_uvector()
|