Home
last modified time | relevance | path

Searched refs:bvec (Results 1 – 25 of 93) sorted by relevance

1234

/linux/include/linux/
H A Dbvec.h98 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) argument
125 #define bvec_iter_len(bvec, iter) \ argument
129 #define bvec_iter_page(bvec, iter) \ argument
213 bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); in bvec_advance()
217 bvec->bv_len - iter_all->done); in bvec_advance()
235 return kmap_local_page(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_local()
246 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); in memcpy_from_bvec()
257 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); in memcpy_to_bvec()
268 memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); in memzero_bvec()
280 return page_address(bvec->bv_page) + bvec->bv_offset; in bvec_virt()
[all …]
/linux/fs/erofs/
H A Dzdata.c212 iter->bvset->bvec[iter->cur++] = *bvec; in z_erofs_bvec_enqueue()
224 *bvec = iter->bvset->bvec[iter->cur++]; in z_erofs_bvec_dequeue()
632 for (; bvec < end; ++bvec) { in z_erofs_cache_release_folio()
633 if (bvec->page && page_folio(bvec->page) == folio) { in z_erofs_cache_release_folio()
634 bvec->page = NULL; in z_erofs_cache_release_folio()
1053 bvec->offset + bvec->end == be->pcl->length)) { in z_erofs_do_decompressed_bvec()
1064 item->bvec = *bvec; in z_erofs_do_decompressed_bvec()
1080 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; in z_erofs_fill_other_copies()
1391 bvec->bv_offset = 0; in z_erofs_fill_bio_vec()
1422 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; in z_erofs_fill_bio_vec()
[all …]
/linux/block/
H A Dbio-integrity.c215 iov_iter_bvec(&iter, direction, bvec, nr_vecs, len); in bio_integrity_copy_user()
238 bio_integrity_unpin_bvec(bvec, nr_vecs, false); in bio_integrity_copy_user()
240 memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec)); in bio_integrity_copy_user()
269 memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec)); in bio_integrity_init_user()
334 bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL); in bio_integrity_map_user()
335 if (!bvec) in bio_integrity_map_user()
358 if (bvec != stack_vec) in bio_integrity_map_user()
359 kfree(bvec); in bio_integrity_map_user()
364 bio_integrity_unpin_bvec(bvec, nr_bvecs, false); in bio_integrity_map_user()
366 if (bvec != stack_vec) in bio_integrity_map_user()
[all …]
H A Dblk-map.c50 struct bio_vec *bvec; in bio_copy_from_iter() local
57 bvec->bv_offset, in bio_copy_from_iter()
58 bvec->bv_len, in bio_copy_from_iter()
64 if (ret < bvec->bv_len) in bio_copy_from_iter()
81 struct bio_vec *bvec; in bio_copy_to_iter() local
88 bvec->bv_offset, in bio_copy_to_iter()
89 bvec->bv_len, in bio_copy_to_iter()
95 if (ret < bvec->bv_len) in bio_copy_to_iter()
452 struct bio_vec *bvec; in bio_copy_kern_endio_read() local
456 memcpy_from_bvec(p, bvec); in bio_copy_kern_endio_read()
[all …]
H A Dblk-merge.c475 unsigned nbytes = bvec->bv_len; in blk_bvec_map_sg()
479 unsigned offset = bvec->bv_offset + total; in blk_bvec_map_sg()
481 bvec_phys(bvec) + total, nbytes); in blk_bvec_map_sg()
482 struct page *page = bvec->bv_page; in blk_bvec_map_sg()
520 int nbytes = bvec->bv_len; in __blk_segment_map_sg_merge()
528 if (!biovec_phys_mergeable(q, bvprv, bvec)) in __blk_segment_map_sg_merge()
540 struct bio_vec bvec, bvprv = { NULL }; in __blk_bios_map_sg() local
546 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg()
556 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) in __blk_bios_map_sg()
557 nsegs += __blk_bvec_map_sg(bvec, sglist, sg); in __blk_bios_map_sg()
[all …]
H A Dbounce.c105 struct bio_vec *bvec, orig_vec; in bounce_end_io() local
112 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io()
114 if (bvec->bv_page != orig_vec.bv_page) { in bounce_end_io()
115 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io()
116 mempool_free(bvec->bv_page, &page_pool); in bounce_end_io()
/linux/lib/
H A Diov_iter.c503 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
509 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
510 i->bvec = bvec; in iov_iter_bvec_advance()
631 const struct bio_vec *bvec = i->bvec; in iov_iter_revert() local
636 i->bvec = bvec; in iov_iter_revert()
703 .bvec = bvec, in iov_iter_bvec()
818 const struct bio_vec *bvec = i->bvec; in iov_iter_aligned_bvec() local
832 bvec++; in iov_iter_aligned_bvec()
909 const struct bio_vec *bvec = i->bvec; in iov_iter_alignment_bvec() local
920 bvec++; in iov_iter_alignment_bvec()
[all …]
H A Dkunit_iov_iter.c222 struct bio_vec *bvec, unsigned int bvmax, in iov_kunit_load_bvec() argument
243 bvec[i].bv_len += pr->to; in iov_kunit_load_bvec()
245 bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from); in iov_kunit_load_bvec()
255 iov_iter_bvec(iter, dir, bvec, i, size); in iov_kunit_load_bvec()
265 struct bio_vec bvec[8]; in iov_kunit_copy_to_bvec() local
281 iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec), in iov_kunit_copy_to_bvec()
319 struct bio_vec bvec[8]; in iov_kunit_copy_from_bvec() local
335 iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec), in iov_kunit_copy_from_bvec()
787 struct bio_vec bvec[8]; in iov_kunit_extract_pages_bvec() local
796 iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec), in iov_kunit_extract_pages_bvec()
/linux/drivers/nvme/target/
H A Dio-cmd-file.c94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done()
110 kfree(req->f.bvec); in nvmet_file_io_done()
112 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done()
142 bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length, in nvmet_file_execute_io()
144 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
145 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
231 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), in nvmet_file_execute_rw()
234 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw()
236 if (unlikely(!req->f.bvec)) { in nvmet_file_execute_rw()
[all …]
/linux/fs/squashfs/
H A Dblock.c39 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in copy_bio_to_actor() local
50 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor()
56 memcpy(actor_addr + actor_offset, bvec_virt(bvec) + in copy_bio_to_actor()
69 if (offset >= bvec->bv_len) { in copy_bio_to_actor()
286 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in squashfs_read_data() local
301 data = bvec_virt(bvec); in squashfs_read_data()
303 if (offset < bvec->bv_len - 1) { in squashfs_read_data()
310 data = bvec_virt(bvec); in squashfs_read_data()
H A Dlzo_wrapper.c70 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in lzo_uncompress() local
77 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lzo_uncompress()
79 data = bvec_virt(bvec); in lzo_uncompress()
H A Dzlib_wrapper.c57 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in zlib_uncompress() local
83 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
84 data = bvec_virt(bvec); in zlib_uncompress()
H A Dlz4_wrapper.c96 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in lz4_uncompress() local
102 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lz4_uncompress()
104 data = bvec_virt(bvec); in lz4_uncompress()
H A Dzstd_wrapper.c72 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in zstd_uncompress() local
100 avail = min(length, ((int)bvec->bv_len) - offset); in zstd_uncompress()
101 data = bvec_virt(bvec); in zstd_uncompress()
H A Dxz_wrapper.c124 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); in squashfs_xz_uncompress() local
152 avail = min(length, ((int)bvec->bv_len) - offset); in squashfs_xz_uncompress()
153 data = bvec_virt(bvec); in squashfs_xz_uncompress()
/linux/mm/
H A Dpage_io.c317 struct bio_vec bvec[SWAP_CLUSTER_MAX]; member
339 struct page *page = sio->bvec[0].bv_page; in sio_write_complete()
354 page = sio->bvec[p].bv_page; in sio_write_complete()
361 end_page_writeback(sio->bvec[p].bv_page); in sio_write_complete()
393 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); in swap_writepage_fs()
396 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { in swap_writepage_fs()
473 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); in swap_write_unplug()
486 struct folio *folio = page_folio(sio->bvec[p].bv_page); in sio_read_complete()
494 struct folio *folio = page_folio(sio->bvec[p].bv_page); in sio_read_complete()
551 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { in swap_read_folio_fs()
[all …]
/linux/drivers/block/
H A Dn64cart.c89 struct bio_vec bvec; in n64cart_submit_bio() local
94 bio_for_each_segment(bvec, bio, iter) { in n64cart_submit_bio()
95 if (!n64cart_do_bvec(dev, &bvec, pos)) { in n64cart_submit_bio()
99 pos += bvec.bv_len; in n64cart_submit_bio()
H A Dloop.c87 struct bio_vec *bvec; member
241 iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); in lo_write_bvec()
259 struct bio_vec bvec; in lo_write_simple() local
282 iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len); in lo_read_simple()
396 kfree(cmd->bvec); in lo_rw_aio_do_completion()
397 cmd->bvec = NULL; in lo_rw_aio_do_completion()
431 if (!bvec) in lo_rw_aio()
433 cmd->bvec = bvec; in lo_rw_aio()
442 *bvec = tmp; in lo_rw_aio()
443 bvec++; in lo_rw_aio()
[all …]
H A Dbrd.c246 struct bio_vec bvec; in brd_submit_bio() local
255 bio_for_each_segment(bvec, bio, iter) { in brd_submit_bio()
256 unsigned int len = bvec.bv_len; in brd_submit_bio()
260 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || in brd_submit_bio()
263 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, in brd_submit_bio()
/linux/arch/m68k/emu/
H A Dnfblock.c63 struct bio_vec bvec; in nfhd_submit_bio() local
70 bio_for_each_segment(bvec, bio, iter) { in nfhd_submit_bio()
71 len = bvec.bv_len; in nfhd_submit_bio()
74 bvec_phys(&bvec)); in nfhd_submit_bio()
/linux/Documentation/block/
H A Dbiovecs.rst20 bytes completed in the current bvec.
43 As of 5.12 bvec segments with zero bv_len are not supported.
52 exactly one bvec at a time - for example, bio_copy_data() in block/bio.c,
60 coding bvec iterators before, and having common implementation considerably
65 it somewhere else if there was an error) had to save the entire bvec array
68 * Biovecs can be shared between multiple bios - a bvec iter can represent an
76 bios with more than a single bvec! Now, we can efficiently split arbitrary
88 fine to _most_ devices, but since accessing the raw bvec array was the
90 since all drivers _must_ go through the bvec iterator - and have been
146 * The following helpers iterate over multi-page bvec. The passed 'struct
/linux/fs/nfs/
H A Dlocalio.c35 struct bio_vec *bvec; member
254 struct bio_vec *bvec, *p; in nfs_bvec_alloc_and_import_pagevec() local
256 bvec = kmalloc_array(npages, sizeof(*bvec), flags); in nfs_bvec_alloc_and_import_pagevec()
257 if (bvec != NULL) { in nfs_bvec_alloc_and_import_pagevec()
258 for (p = bvec; npages > 0; p++, pagevec++, npages--) { in nfs_bvec_alloc_and_import_pagevec()
264 return bvec; in nfs_bvec_alloc_and_import_pagevec()
270 kfree(iocb->bvec); in nfs_local_iocb_free()
283 iocb->bvec = nfs_bvec_alloc_and_import_pagevec(hdr->page_array.pagevec, in nfs_local_iocb_alloc()
285 if (iocb->bvec == NULL) { in nfs_local_iocb_alloc()
302 iov_iter_bvec(i, dir, iocb->bvec, hdr->page_array.npages, in nfs_local_iter_init()
/linux/drivers/target/
H A Dtarget_core_file.c318 struct bio_vec *bvec; in fd_do_rw() local
323 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_do_rw()
324 if (!bvec) { in fd_do_rw()
334 iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); in fd_do_rw()
378 kfree(bvec); in fd_do_rw()
435 struct bio_vec *bvec; in fd_execute_write_same() local
458 bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); in fd_execute_write_same()
459 if (!bvec) in fd_execute_write_same()
463 bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]), in fd_execute_write_same()
469 iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len); in fd_execute_write_same()
[all …]
/linux/fs/netfs/
H A Dbuffered_read.c466 struct bio_vec *bvec; in netfs_read_gaps() local
494 bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL); in netfs_read_gaps()
495 if (!bvec) in netfs_read_gaps()
500 kfree(bvec); in netfs_read_gaps()
506 rreq->direct_bv = bvec; in netfs_read_gaps()
509 bvec_set_folio(&bvec[i++], folio, from, 0); in netfs_read_gaps()
514 bvec_set_folio(&bvec[i++], sink, part, 0); in netfs_read_gaps()
518 bvec_set_folio(&bvec[i++], folio, flen - to, to); in netfs_read_gaps()
519 iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len); in netfs_read_gaps()
/linux/io_uring/
H A Drsrc.c123 unpin_user_page(imu->bvec[i].bv_page); in io_buffer_unmap()
819 if (!PageCompound(imu->bvec[j].bv_page)) in headpage_already_acct()
821 if (compound_head(imu->bvec[j].bv_page) == hpage) in headpage_already_acct()
981 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); in io_sqe_buffer_register()
1008 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off); in io_sqe_buffer_register()
1127 const struct bio_vec *bvec = imu->bvec; in io_import_fixed() local
1129 if (offset < bvec->bv_len) { in io_import_fixed()
1130 iter->bvec = bvec; in io_import_fixed()
1137 offset -= bvec->bv_len; in io_import_fixed()
1140 iter->bvec = bvec + seg_skip; in io_import_fixed()
[all …]

1234