1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/splice.h>
7 #include <net/checksum.h>
9 #define PIPE_PARANOIA /* for now */
11 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
15 __v.iov_len = min(n, __p->iov_len - skip); \
16 if (likely(__v.iov_len)) { \
17 __v.iov_base = __p->iov_base + skip; \
19 __v.iov_len -= left; \
20 skip += __v.iov_len; \
25 while (unlikely(!left && n)) { \
27 __v.iov_len = min(n, __p->iov_len); \
28 if (unlikely(!__v.iov_len)) \
30 __v.iov_base = __p->iov_base; \
32 __v.iov_len -= left; \
39 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
42 __v.iov_len = min(n, __p->iov_len - skip); \
43 if (likely(__v.iov_len)) { \
44 __v.iov_base = __p->iov_base + skip; \
46 skip += __v.iov_len; \
49 while (unlikely(n)) { \
51 __v.iov_len = min(n, __p->iov_len); \
52 if (unlikely(!__v.iov_len)) \
54 __v.iov_base = __p->iov_base; \
62 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
63 struct bvec_iter __start; \
64 __start.bi_size = n; \
65 __start.bi_bvec_done = skip; \
67 for_each_bvec(__v, i->bvec, __bi, __start) { \
74 #define iterate_all_kinds(i, n, v, I, B, K) { \
75 size_t skip = i->iov_offset; \
76 if (unlikely(i->type & ITER_BVEC)) { \
78 struct bvec_iter __bi; \
79 iterate_bvec(i, n, v, __bi, skip, (B)) \
80 } else if (unlikely(i->type & ITER_KVEC)) { \
81 const struct kvec *kvec; \
83 iterate_kvec(i, n, v, kvec, skip, (K)) \
85 const struct iovec *iov; \
87 iterate_iovec(i, n, v, iov, skip, (I)) \
91 #define iterate_and_advance(i, n, v, I, B, K) { \
92 if (unlikely(i->count < n)) \
95 size_t skip = i->iov_offset; \
96 if (unlikely(i->type & ITER_BVEC)) { \
97 const struct bio_vec *bvec = i->bvec; \
99 struct bvec_iter __bi; \
100 iterate_bvec(i, n, v, __bi, skip, (B)) \
101 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
102 i->nr_segs -= i->bvec - bvec; \
103 skip = __bi.bi_bvec_done; \
104 } else if (unlikely(i->type & ITER_KVEC)) { \
105 const struct kvec *kvec; \
107 iterate_kvec(i, n, v, kvec, skip, (K)) \
108 if (skip == kvec->iov_len) { \
112 i->nr_segs -= kvec - i->kvec; \
115 const struct iovec *iov; \
117 iterate_iovec(i, n, v, iov, skip, (I)) \
118 if (skip == iov->iov_len) { \
122 i->nr_segs -= iov - i->iov; \
126 i->iov_offset = skip; \
130 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
133 size_t skip, copy, left, wanted;
134 const struct iovec *iov;
138 if (unlikely(bytes > i->count))
141 if (unlikely(!bytes))
146 skip = i->iov_offset;
147 buf = iov->iov_base + skip;
148 copy = min(bytes, iov->iov_len - skip);
150 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
151 kaddr = kmap_atomic(page);
152 from = kaddr + offset;
154 /* first chunk, usually the only one */
155 left = __copy_to_user_inatomic(buf, from, copy);
161 while (unlikely(!left && bytes)) {
164 copy = min(bytes, iov->iov_len);
165 left = __copy_to_user_inatomic(buf, from, copy);
171 if (likely(!bytes)) {
172 kunmap_atomic(kaddr);
175 offset = from - kaddr;
177 kunmap_atomic(kaddr);
178 copy = min(bytes, iov->iov_len - skip);
180 /* Too bad - revert to non-atomic kmap */
183 from = kaddr + offset;
184 left = __copy_to_user(buf, from, copy);
189 while (unlikely(!left && bytes)) {
192 copy = min(bytes, iov->iov_len);
193 left = __copy_to_user(buf, from, copy);
202 if (skip == iov->iov_len) {
206 i->count -= wanted - bytes;
207 i->nr_segs -= iov - i->iov;
209 i->iov_offset = skip;
210 return wanted - bytes;
213 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
216 size_t skip, copy, left, wanted;
217 const struct iovec *iov;
221 if (unlikely(bytes > i->count))
224 if (unlikely(!bytes))
229 skip = i->iov_offset;
230 buf = iov->iov_base + skip;
231 copy = min(bytes, iov->iov_len - skip);
233 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
234 kaddr = kmap_atomic(page);
237 /* first chunk, usually the only one */
238 left = __copy_from_user_inatomic(to, buf, copy);
244 while (unlikely(!left && bytes)) {
247 copy = min(bytes, iov->iov_len);
248 left = __copy_from_user_inatomic(to, buf, copy);
254 if (likely(!bytes)) {
255 kunmap_atomic(kaddr);
260 kunmap_atomic(kaddr);
261 copy = min(bytes, iov->iov_len - skip);
263 /* Too bad - revert to non-atomic kmap */
267 left = __copy_from_user(to, buf, copy);
272 while (unlikely(!left && bytes)) {
275 copy = min(bytes, iov->iov_len);
276 left = __copy_from_user(to, buf, copy);
285 if (skip == iov->iov_len) {
289 i->count -= wanted - bytes;
290 i->nr_segs -= iov - i->iov;
292 i->iov_offset = skip;
293 return wanted - bytes;
297 static bool sanity(const struct iov_iter *i)
299 struct pipe_inode_info *pipe = i->pipe;
301 int next = pipe->curbuf + pipe->nrbufs;
303 struct pipe_buffer *p;
304 if (unlikely(!pipe->nrbufs))
305 goto Bad; // pipe must be non-empty
306 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
307 goto Bad; // must be at the last buffer...
309 p = &pipe->bufs[idx];
310 if (unlikely(p->offset + p->len != i->iov_offset))
311 goto Bad; // ... at the end of segment
313 if (idx != (next & (pipe->buffers - 1)))
314 goto Bad; // must be right after the last buffer
318 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
319 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
320 pipe->curbuf, pipe->nrbufs, pipe->buffers);
321 for (idx = 0; idx < pipe->buffers; idx++)
322 printk(KERN_ERR "[%p %p %d %d]\n",
324 pipe->bufs[idx].page,
325 pipe->bufs[idx].offset,
326 pipe->bufs[idx].len);
331 #define sanity(i) true
334 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
336 return (idx + 1) & (pipe->buffers - 1);
339 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
342 struct pipe_inode_info *pipe = i->pipe;
343 struct pipe_buffer *buf;
347 if (unlikely(bytes > i->count))
350 if (unlikely(!bytes))
358 buf = &pipe->bufs[idx];
360 if (offset == off && buf->page == page) {
361 /* merge with the last one */
363 i->iov_offset += bytes;
366 idx = next_idx(idx, pipe);
367 buf = &pipe->bufs[idx];
369 if (idx == pipe->curbuf && pipe->nrbufs)
372 buf->ops = &page_cache_pipe_buf_ops;
373 get_page(buf->page = page);
374 buf->offset = offset;
376 i->iov_offset = offset + bytes;
384 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
385 * bytes. For each iovec, fault in each page that constitutes the iovec.
387 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
388 * because it is an invalid address).
390 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
392 size_t skip = i->iov_offset;
393 const struct iovec *iov;
397 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
398 iterate_iovec(i, bytes, v, iov, skip, ({
399 err = fault_in_multipages_readable(v.iov_base,
407 EXPORT_SYMBOL(iov_iter_fault_in_readable);
409 void iov_iter_init(struct iov_iter *i, int direction,
410 const struct iovec *iov, unsigned long nr_segs,
413 /* It will get better. Eventually... */
414 if (segment_eq(get_fs(), KERNEL_DS)) {
415 direction |= ITER_KVEC;
417 i->kvec = (struct kvec *)iov;
422 i->nr_segs = nr_segs;
426 EXPORT_SYMBOL(iov_iter_init);
428 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
430 char *from = kmap_atomic(page);
431 memcpy(to, from + offset, len);
435 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
437 char *to = kmap_atomic(page);
438 memcpy(to + offset, from, len);
442 static void memzero_page(struct page *page, size_t offset, size_t len)
444 char *addr = kmap_atomic(page);
445 memset(addr + offset, 0, len);
449 static inline bool allocated(struct pipe_buffer *buf)
451 return buf->ops == &default_pipe_buf_ops;
454 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
456 size_t off = i->iov_offset;
458 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
459 idx = next_idx(idx, i->pipe);
466 static size_t push_pipe(struct iov_iter *i, size_t size,
467 int *idxp, size_t *offp)
469 struct pipe_inode_info *pipe = i->pipe;
474 if (unlikely(size > i->count))
480 data_start(i, &idx, &off);
484 left -= PAGE_SIZE - off;
486 pipe->bufs[idx].len += size;
489 pipe->bufs[idx].len = PAGE_SIZE;
490 idx = next_idx(idx, pipe);
492 while (idx != pipe->curbuf || !pipe->nrbufs) {
493 struct page *page = alloc_page(GFP_USER);
497 pipe->bufs[idx].ops = &default_pipe_buf_ops;
498 pipe->bufs[idx].page = page;
499 pipe->bufs[idx].offset = 0;
500 if (left <= PAGE_SIZE) {
501 pipe->bufs[idx].len = left;
504 pipe->bufs[idx].len = PAGE_SIZE;
506 idx = next_idx(idx, pipe);
511 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
514 struct pipe_inode_info *pipe = i->pipe;
521 bytes = n = push_pipe(i, bytes, &idx, &off);
524 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
525 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
526 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
528 i->iov_offset = off + chunk;
536 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
538 const char *from = addr;
539 if (unlikely(i->type & ITER_PIPE))
540 return copy_pipe_to_iter(addr, bytes, i);
541 iterate_and_advance(i, bytes, v,
542 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
544 memcpy_to_page(v.bv_page, v.bv_offset,
545 (from += v.bv_len) - v.bv_len, v.bv_len),
546 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
551 EXPORT_SYMBOL(copy_to_iter);
553 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
556 if (unlikely(i->type & ITER_PIPE)) {
560 iterate_and_advance(i, bytes, v,
561 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
563 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
564 v.bv_offset, v.bv_len),
565 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
570 EXPORT_SYMBOL(copy_from_iter);
572 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
575 if (unlikely(i->type & ITER_PIPE)) {
579 iterate_and_advance(i, bytes, v,
580 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
581 v.iov_base, v.iov_len),
582 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
583 v.bv_offset, v.bv_len),
584 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
589 EXPORT_SYMBOL(copy_from_iter_nocache);
591 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
594 if (i->type & (ITER_BVEC|ITER_KVEC)) {
595 void *kaddr = kmap_atomic(page);
596 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
597 kunmap_atomic(kaddr);
599 } else if (likely(!(i->type & ITER_PIPE)))
600 return copy_page_to_iter_iovec(page, offset, bytes, i);
602 return copy_page_to_iter_pipe(page, offset, bytes, i);
604 EXPORT_SYMBOL(copy_page_to_iter);
606 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
609 if (unlikely(i->type & ITER_PIPE)) {
613 if (i->type & (ITER_BVEC|ITER_KVEC)) {
614 void *kaddr = kmap_atomic(page);
615 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
616 kunmap_atomic(kaddr);
619 return copy_page_from_iter_iovec(page, offset, bytes, i);
621 EXPORT_SYMBOL(copy_page_from_iter);
623 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
625 struct pipe_inode_info *pipe = i->pipe;
632 bytes = n = push_pipe(i, bytes, &idx, &off);
636 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
637 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
638 memzero_page(pipe->bufs[idx].page, off, chunk);
640 i->iov_offset = off + chunk;
647 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
649 if (unlikely(i->type & ITER_PIPE))
650 return pipe_zero(bytes, i);
651 iterate_and_advance(i, bytes, v,
652 __clear_user(v.iov_base, v.iov_len),
653 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
654 memset(v.iov_base, 0, v.iov_len)
659 EXPORT_SYMBOL(iov_iter_zero);
661 size_t iov_iter_copy_from_user_atomic(struct page *page,
662 struct iov_iter *i, unsigned long offset, size_t bytes)
664 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
665 if (unlikely(i->type & ITER_PIPE)) {
666 kunmap_atomic(kaddr);
670 iterate_all_kinds(i, bytes, v,
671 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
672 v.iov_base, v.iov_len),
673 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
674 v.bv_offset, v.bv_len),
675 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
677 kunmap_atomic(kaddr);
680 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
682 static void pipe_advance(struct iov_iter *i, size_t size)
684 struct pipe_inode_info *pipe = i->pipe;
685 struct pipe_buffer *buf;
687 size_t off = i->iov_offset;
689 if (unlikely(i->count < size))
693 if (off) /* make it relative to the beginning of buffer */
694 size += off - pipe->bufs[idx].offset;
696 buf = &pipe->bufs[idx];
697 if (size <= buf->len)
700 idx = next_idx(idx, pipe);
704 off = i->iov_offset = buf->offset + size;
707 idx = next_idx(idx, pipe);
709 int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
710 /* [curbuf,unused) is in use. Free [idx,unused) */
711 while (idx != unused) {
712 buf = &pipe->bufs[idx];
713 buf->ops->release(pipe, buf);
715 idx = next_idx(idx, pipe);
721 void iov_iter_advance(struct iov_iter *i, size_t size)
723 if (unlikely(i->type & ITER_PIPE)) {
724 pipe_advance(i, size);
727 iterate_and_advance(i, size, v, 0, 0, 0)
729 EXPORT_SYMBOL(iov_iter_advance);
732 * Return the count of just the current iov_iter segment.
734 size_t iov_iter_single_seg_count(const struct iov_iter *i)
736 if (unlikely(i->type & ITER_PIPE))
737 return i->count; // it is a silly place, anyway
740 else if (i->type & ITER_BVEC)
741 return min(i->count, i->bvec->bv_len - i->iov_offset);
743 return min(i->count, i->iov->iov_len - i->iov_offset);
745 EXPORT_SYMBOL(iov_iter_single_seg_count);
747 void iov_iter_kvec(struct iov_iter *i, int direction,
748 const struct kvec *kvec, unsigned long nr_segs,
751 BUG_ON(!(direction & ITER_KVEC));
754 i->nr_segs = nr_segs;
758 EXPORT_SYMBOL(iov_iter_kvec);
760 void iov_iter_bvec(struct iov_iter *i, int direction,
761 const struct bio_vec *bvec, unsigned long nr_segs,
764 BUG_ON(!(direction & ITER_BVEC));
767 i->nr_segs = nr_segs;
771 EXPORT_SYMBOL(iov_iter_bvec);
773 void iov_iter_pipe(struct iov_iter *i, int direction,
774 struct pipe_inode_info *pipe,
777 BUG_ON(direction != ITER_PIPE);
780 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
784 EXPORT_SYMBOL(iov_iter_pipe);
786 unsigned long iov_iter_alignment(const struct iov_iter *i)
788 unsigned long res = 0;
789 size_t size = i->count;
794 if (unlikely(i->type & ITER_PIPE)) {
795 if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
796 return size | i->iov_offset;
799 iterate_all_kinds(i, size, v,
800 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
801 res |= v.bv_offset | v.bv_len,
802 res |= (unsigned long)v.iov_base | v.iov_len
806 EXPORT_SYMBOL(iov_iter_alignment);
808 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
810 unsigned long res = 0;
811 size_t size = i->count;
815 if (unlikely(i->type & ITER_PIPE)) {
820 iterate_all_kinds(i, size, v,
821 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
822 (size != v.iov_len ? size : 0), 0),
823 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
824 (size != v.bv_len ? size : 0)),
825 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
826 (size != v.iov_len ? size : 0))
830 EXPORT_SYMBOL(iov_iter_gap_alignment);
832 static inline size_t __pipe_get_pages(struct iov_iter *i,
838 struct pipe_inode_info *pipe = i->pipe;
839 size_t n = push_pipe(i, maxsize, &idx, start);
845 while (n >= PAGE_SIZE) {
846 get_page(*pages++ = pipe->bufs[idx].page);
847 idx = next_idx(idx, pipe);
854 static ssize_t pipe_get_pages(struct iov_iter *i,
855 struct page **pages, size_t maxsize, unsigned maxpages,
865 data_start(i, &idx, start);
866 /* some of this one + all after this one */
867 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
868 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
870 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
873 ssize_t iov_iter_get_pages(struct iov_iter *i,
874 struct page **pages, size_t maxsize, unsigned maxpages,
877 if (maxsize > i->count)
883 if (unlikely(i->type & ITER_PIPE))
884 return pipe_get_pages(i, pages, maxsize, maxpages, start);
885 iterate_all_kinds(i, maxsize, v, ({
886 unsigned long addr = (unsigned long)v.iov_base;
887 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
891 if (len > maxpages * PAGE_SIZE)
892 len = maxpages * PAGE_SIZE;
893 addr &= ~(PAGE_SIZE - 1);
894 n = DIV_ROUND_UP(len, PAGE_SIZE);
895 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
896 if (unlikely(res < 0))
898 return (res == n ? len : res * PAGE_SIZE) - *start;
900 /* can't be more than PAGE_SIZE */
901 *start = v.bv_offset;
902 get_page(*pages = v.bv_page);
910 EXPORT_SYMBOL(iov_iter_get_pages);
912 static struct page **get_pages_array(size_t n)
914 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
916 p = vmalloc(n * sizeof(struct page *));
920 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
921 struct page ***pages, size_t maxsize,
932 data_start(i, &idx, start);
933 /* some of this one + all after this one */
934 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
935 n = npages * PAGE_SIZE - *start;
939 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
940 p = get_pages_array(npages);
943 n = __pipe_get_pages(i, maxsize, p, idx, start);
951 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
952 struct page ***pages, size_t maxsize,
957 if (maxsize > i->count)
963 if (unlikely(i->type & ITER_PIPE))
964 return pipe_get_pages_alloc(i, pages, maxsize, start);
965 iterate_all_kinds(i, maxsize, v, ({
966 unsigned long addr = (unsigned long)v.iov_base;
967 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
971 addr &= ~(PAGE_SIZE - 1);
972 n = DIV_ROUND_UP(len, PAGE_SIZE);
973 p = get_pages_array(n);
976 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
977 if (unlikely(res < 0)) {
982 return (res == n ? len : res * PAGE_SIZE) - *start;
984 /* can't be more than PAGE_SIZE */
985 *start = v.bv_offset;
986 *pages = p = get_pages_array(1);
989 get_page(*p = v.bv_page);
997 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
999 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1006 if (unlikely(i->type & ITER_PIPE)) {
1010 iterate_and_advance(i, bytes, v, ({
1012 next = csum_and_copy_from_user(v.iov_base,
1013 (to += v.iov_len) - v.iov_len,
1014 v.iov_len, 0, &err);
1016 sum = csum_block_add(sum, next, off);
1019 err ? v.iov_len : 0;
1021 char *p = kmap_atomic(v.bv_page);
1022 next = csum_partial_copy_nocheck(p + v.bv_offset,
1023 (to += v.bv_len) - v.bv_len,
1026 sum = csum_block_add(sum, next, off);
1029 next = csum_partial_copy_nocheck(v.iov_base,
1030 (to += v.iov_len) - v.iov_len,
1032 sum = csum_block_add(sum, next, off);
1039 EXPORT_SYMBOL(csum_and_copy_from_iter);
1041 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1044 const char *from = addr;
1048 if (unlikely(i->type & ITER_PIPE)) {
1049 WARN_ON(1); /* for now */
1052 iterate_and_advance(i, bytes, v, ({
1054 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1056 v.iov_len, 0, &err);
1058 sum = csum_block_add(sum, next, off);
1061 err ? v.iov_len : 0;
1063 char *p = kmap_atomic(v.bv_page);
1064 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1068 sum = csum_block_add(sum, next, off);
1071 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1074 sum = csum_block_add(sum, next, off);
1081 EXPORT_SYMBOL(csum_and_copy_to_iter);
1083 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1085 size_t size = i->count;
1091 if (unlikely(i->type & ITER_PIPE)) {
1092 struct pipe_inode_info *pipe = i->pipe;
1099 data_start(i, &idx, &off);
1100 /* some of this one + all after this one */
1101 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1102 if (npages >= maxpages)
1104 } else iterate_all_kinds(i, size, v, ({
1105 unsigned long p = (unsigned long)v.iov_base;
1106 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1108 if (npages >= maxpages)
1112 if (npages >= maxpages)
1115 unsigned long p = (unsigned long)v.iov_base;
1116 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1118 if (npages >= maxpages)
1124 EXPORT_SYMBOL(iov_iter_npages);
1126 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1129 if (unlikely(new->type & ITER_PIPE)) {
1133 if (new->type & ITER_BVEC)
1134 return new->bvec = kmemdup(new->bvec,
1135 new->nr_segs * sizeof(struct bio_vec),
1138 /* iovec and kvec have identical layout */
1139 return new->iov = kmemdup(new->iov,
1140 new->nr_segs * sizeof(struct iovec),
1143 EXPORT_SYMBOL(dup_iter);
1145 int import_iovec(int type, const struct iovec __user * uvector,
1146 unsigned nr_segs, unsigned fast_segs,
1147 struct iovec **iov, struct iov_iter *i)
1151 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1159 iov_iter_init(i, type, p, nr_segs, n);
1160 *iov = p == *iov ? NULL : p;
1163 EXPORT_SYMBOL(import_iovec);
1165 #ifdef CONFIG_COMPAT
1166 #include <linux/compat.h>
1168 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1169 unsigned nr_segs, unsigned fast_segs,
1170 struct iovec **iov, struct iov_iter *i)
1174 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1182 iov_iter_init(i, type, p, nr_segs, n);
1183 *iov = p == *iov ? NULL : p;
1188 int import_single_range(int rw, void __user *buf, size_t len,
1189 struct iovec *iov, struct iov_iter *i)
1191 if (len > MAX_RW_COUNT)
1193 if (unlikely(!access_ok(!rw, buf, len)))
1196 iov->iov_base = buf;
1198 iov_iter_init(i, rw, iov, 1, len);
1201 EXPORT_SYMBOL(import_single_range);