Merge remote-tracking branch 'ovl/misc' into work.misc
[cascardo/linux.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
7
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
9         size_t left;                                    \
10         size_t wanted = n;                              \
11         __p = i->iov;                                   \
12         __v.iov_len = min(n, __p->iov_len - skip);      \
13         if (likely(__v.iov_len)) {                      \
14                 __v.iov_base = __p->iov_base + skip;    \
15                 left = (STEP);                          \
16                 __v.iov_len -= left;                    \
17                 skip += __v.iov_len;                    \
18                 n -= __v.iov_len;                       \
19         } else {                                        \
20                 left = 0;                               \
21         }                                               \
22         while (unlikely(!left && n)) {                  \
23                 __p++;                                  \
24                 __v.iov_len = min(n, __p->iov_len);     \
25                 if (unlikely(!__v.iov_len))             \
26                         continue;                       \
27                 __v.iov_base = __p->iov_base;           \
28                 left = (STEP);                          \
29                 __v.iov_len -= left;                    \
30                 skip = __v.iov_len;                     \
31                 n -= __v.iov_len;                       \
32         }                                               \
33         n = wanted - n;                                 \
34 }
35
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
37         size_t wanted = n;                              \
38         __p = i->kvec;                                  \
39         __v.iov_len = min(n, __p->iov_len - skip);      \
40         if (likely(__v.iov_len)) {                      \
41                 __v.iov_base = __p->iov_base + skip;    \
42                 (void)(STEP);                           \
43                 skip += __v.iov_len;                    \
44                 n -= __v.iov_len;                       \
45         }                                               \
46         while (unlikely(n)) {                           \
47                 __p++;                                  \
48                 __v.iov_len = min(n, __p->iov_len);     \
49                 if (unlikely(!__v.iov_len))             \
50                         continue;                       \
51                 __v.iov_base = __p->iov_base;           \
52                 (void)(STEP);                           \
53                 skip = __v.iov_len;                     \
54                 n -= __v.iov_len;                       \
55         }                                               \
56         n = wanted;                                     \
57 }
58
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
60         struct bvec_iter __start;                       \
61         __start.bi_size = n;                            \
62         __start.bi_bvec_done = skip;                    \
63         __start.bi_idx = 0;                             \
64         for_each_bvec(__v, i->bvec, __bi, __start) {    \
65                 if (!__v.bv_len)                        \
66                         continue;                       \
67                 (void)(STEP);                           \
68         }                                               \
69 }
70
71 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
72         size_t skip = i->iov_offset;                            \
73         if (unlikely(i->type & ITER_BVEC)) {                    \
74                 struct bio_vec v;                               \
75                 struct bvec_iter __bi;                          \
76                 iterate_bvec(i, n, v, __bi, skip, (B))          \
77         } else if (unlikely(i->type & ITER_KVEC)) {             \
78                 const struct kvec *kvec;                        \
79                 struct kvec v;                                  \
80                 iterate_kvec(i, n, v, kvec, skip, (K))          \
81         } else {                                                \
82                 const struct iovec *iov;                        \
83                 struct iovec v;                                 \
84                 iterate_iovec(i, n, v, iov, skip, (I))          \
85         }                                                       \
86 }
87
88 #define iterate_and_advance(i, n, v, I, B, K) {                 \
89         if (unlikely(i->count < n))                             \
90                 n = i->count;                                   \
91         if (i->count) {                                         \
92                 size_t skip = i->iov_offset;                    \
93                 if (unlikely(i->type & ITER_BVEC)) {            \
94                         const struct bio_vec *bvec = i->bvec;   \
95                         struct bio_vec v;                       \
96                         struct bvec_iter __bi;                  \
97                         iterate_bvec(i, n, v, __bi, skip, (B))  \
98                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
99                         i->nr_segs -= i->bvec - bvec;           \
100                         skip = __bi.bi_bvec_done;               \
101                 } else if (unlikely(i->type & ITER_KVEC)) {     \
102                         const struct kvec *kvec;                \
103                         struct kvec v;                          \
104                         iterate_kvec(i, n, v, kvec, skip, (K))  \
105                         if (skip == kvec->iov_len) {            \
106                                 kvec++;                         \
107                                 skip = 0;                       \
108                         }                                       \
109                         i->nr_segs -= kvec - i->kvec;           \
110                         i->kvec = kvec;                         \
111                 } else {                                        \
112                         const struct iovec *iov;                \
113                         struct iovec v;                         \
114                         iterate_iovec(i, n, v, iov, skip, (I))  \
115                         if (skip == iov->iov_len) {             \
116                                 iov++;                          \
117                                 skip = 0;                       \
118                         }                                       \
119                         i->nr_segs -= iov - i->iov;             \
120                         i->iov = iov;                           \
121                 }                                               \
122                 i->count -= n;                                  \
123                 i->iov_offset = skip;                           \
124         }                                                       \
125 }
126
127 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
128                          struct iov_iter *i)
129 {
130         size_t skip, copy, left, wanted;
131         const struct iovec *iov;
132         char __user *buf;
133         void *kaddr, *from;
134
135         if (unlikely(bytes > i->count))
136                 bytes = i->count;
137
138         if (unlikely(!bytes))
139                 return 0;
140
141         wanted = bytes;
142         iov = i->iov;
143         skip = i->iov_offset;
144         buf = iov->iov_base + skip;
145         copy = min(bytes, iov->iov_len - skip);
146
147         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
148                 kaddr = kmap_atomic(page);
149                 from = kaddr + offset;
150
151                 /* first chunk, usually the only one */
152                 left = __copy_to_user_inatomic(buf, from, copy);
153                 copy -= left;
154                 skip += copy;
155                 from += copy;
156                 bytes -= copy;
157
158                 while (unlikely(!left && bytes)) {
159                         iov++;
160                         buf = iov->iov_base;
161                         copy = min(bytes, iov->iov_len);
162                         left = __copy_to_user_inatomic(buf, from, copy);
163                         copy -= left;
164                         skip = copy;
165                         from += copy;
166                         bytes -= copy;
167                 }
168                 if (likely(!bytes)) {
169                         kunmap_atomic(kaddr);
170                         goto done;
171                 }
172                 offset = from - kaddr;
173                 buf += copy;
174                 kunmap_atomic(kaddr);
175                 copy = min(bytes, iov->iov_len - skip);
176         }
177         /* Too bad - revert to non-atomic kmap */
178
179         kaddr = kmap(page);
180         from = kaddr + offset;
181         left = __copy_to_user(buf, from, copy);
182         copy -= left;
183         skip += copy;
184         from += copy;
185         bytes -= copy;
186         while (unlikely(!left && bytes)) {
187                 iov++;
188                 buf = iov->iov_base;
189                 copy = min(bytes, iov->iov_len);
190                 left = __copy_to_user(buf, from, copy);
191                 copy -= left;
192                 skip = copy;
193                 from += copy;
194                 bytes -= copy;
195         }
196         kunmap(page);
197
198 done:
199         if (skip == iov->iov_len) {
200                 iov++;
201                 skip = 0;
202         }
203         i->count -= wanted - bytes;
204         i->nr_segs -= iov - i->iov;
205         i->iov = iov;
206         i->iov_offset = skip;
207         return wanted - bytes;
208 }
209
210 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
211                          struct iov_iter *i)
212 {
213         size_t skip, copy, left, wanted;
214         const struct iovec *iov;
215         char __user *buf;
216         void *kaddr, *to;
217
218         if (unlikely(bytes > i->count))
219                 bytes = i->count;
220
221         if (unlikely(!bytes))
222                 return 0;
223
224         wanted = bytes;
225         iov = i->iov;
226         skip = i->iov_offset;
227         buf = iov->iov_base + skip;
228         copy = min(bytes, iov->iov_len - skip);
229
230         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
231                 kaddr = kmap_atomic(page);
232                 to = kaddr + offset;
233
234                 /* first chunk, usually the only one */
235                 left = __copy_from_user_inatomic(to, buf, copy);
236                 copy -= left;
237                 skip += copy;
238                 to += copy;
239                 bytes -= copy;
240
241                 while (unlikely(!left && bytes)) {
242                         iov++;
243                         buf = iov->iov_base;
244                         copy = min(bytes, iov->iov_len);
245                         left = __copy_from_user_inatomic(to, buf, copy);
246                         copy -= left;
247                         skip = copy;
248                         to += copy;
249                         bytes -= copy;
250                 }
251                 if (likely(!bytes)) {
252                         kunmap_atomic(kaddr);
253                         goto done;
254                 }
255                 offset = to - kaddr;
256                 buf += copy;
257                 kunmap_atomic(kaddr);
258                 copy = min(bytes, iov->iov_len - skip);
259         }
260         /* Too bad - revert to non-atomic kmap */
261
262         kaddr = kmap(page);
263         to = kaddr + offset;
264         left = __copy_from_user(to, buf, copy);
265         copy -= left;
266         skip += copy;
267         to += copy;
268         bytes -= copy;
269         while (unlikely(!left && bytes)) {
270                 iov++;
271                 buf = iov->iov_base;
272                 copy = min(bytes, iov->iov_len);
273                 left = __copy_from_user(to, buf, copy);
274                 copy -= left;
275                 skip = copy;
276                 to += copy;
277                 bytes -= copy;
278         }
279         kunmap(page);
280
281 done:
282         if (skip == iov->iov_len) {
283                 iov++;
284                 skip = 0;
285         }
286         i->count -= wanted - bytes;
287         i->nr_segs -= iov - i->iov;
288         i->iov = iov;
289         i->iov_offset = skip;
290         return wanted - bytes;
291 }
292
293 /*
294  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
295  * bytes.  For each iovec, fault in each page that constitutes the iovec.
296  *
297  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
298  * because it is an invalid address).
299  */
300 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
301 {
302         size_t skip = i->iov_offset;
303         const struct iovec *iov;
304         int err;
305         struct iovec v;
306
307         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
308                 iterate_iovec(i, bytes, v, iov, skip, ({
309                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
310                         if (unlikely(err))
311                         return err;
312                 0;}))
313         }
314         return 0;
315 }
316 EXPORT_SYMBOL(iov_iter_fault_in_readable);
317
318 void iov_iter_init(struct iov_iter *i, int direction,
319                         const struct iovec *iov, unsigned long nr_segs,
320                         size_t count)
321 {
322         /* It will get better.  Eventually... */
323         if (segment_eq(get_fs(), KERNEL_DS)) {
324                 direction |= ITER_KVEC;
325                 i->type = direction;
326                 i->kvec = (struct kvec *)iov;
327         } else {
328                 i->type = direction;
329                 i->iov = iov;
330         }
331         i->nr_segs = nr_segs;
332         i->iov_offset = 0;
333         i->count = count;
334 }
335 EXPORT_SYMBOL(iov_iter_init);
336
337 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
338 {
339         char *from = kmap_atomic(page);
340         memcpy(to, from + offset, len);
341         kunmap_atomic(from);
342 }
343
344 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
345 {
346         char *to = kmap_atomic(page);
347         memcpy(to + offset, from, len);
348         kunmap_atomic(to);
349 }
350
351 static void memzero_page(struct page *page, size_t offset, size_t len)
352 {
353         char *addr = kmap_atomic(page);
354         memset(addr + offset, 0, len);
355         kunmap_atomic(addr);
356 }
357
358 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
359 {
360         const char *from = addr;
361         iterate_and_advance(i, bytes, v,
362                 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
363                                v.iov_len),
364                 memcpy_to_page(v.bv_page, v.bv_offset,
365                                (from += v.bv_len) - v.bv_len, v.bv_len),
366                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
367         )
368
369         return bytes;
370 }
371 EXPORT_SYMBOL(copy_to_iter);
372
373 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
374 {
375         char *to = addr;
376         iterate_and_advance(i, bytes, v,
377                 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
378                                  v.iov_len),
379                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
380                                  v.bv_offset, v.bv_len),
381                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
382         )
383
384         return bytes;
385 }
386 EXPORT_SYMBOL(copy_from_iter);
387
388 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
389 {
390         char *to = addr;
391         iterate_and_advance(i, bytes, v,
392                 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
393                                          v.iov_base, v.iov_len),
394                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
395                                  v.bv_offset, v.bv_len),
396                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
397         )
398
399         return bytes;
400 }
401 EXPORT_SYMBOL(copy_from_iter_nocache);
402
403 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
404                          struct iov_iter *i)
405 {
406         if (i->type & (ITER_BVEC|ITER_KVEC)) {
407                 void *kaddr = kmap_atomic(page);
408                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
409                 kunmap_atomic(kaddr);
410                 return wanted;
411         } else
412                 return copy_page_to_iter_iovec(page, offset, bytes, i);
413 }
414 EXPORT_SYMBOL(copy_page_to_iter);
415
416 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
417                          struct iov_iter *i)
418 {
419         if (i->type & (ITER_BVEC|ITER_KVEC)) {
420                 void *kaddr = kmap_atomic(page);
421                 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
422                 kunmap_atomic(kaddr);
423                 return wanted;
424         } else
425                 return copy_page_from_iter_iovec(page, offset, bytes, i);
426 }
427 EXPORT_SYMBOL(copy_page_from_iter);
428
429 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
430 {
431         iterate_and_advance(i, bytes, v,
432                 __clear_user(v.iov_base, v.iov_len),
433                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
434                 memset(v.iov_base, 0, v.iov_len)
435         )
436
437         return bytes;
438 }
439 EXPORT_SYMBOL(iov_iter_zero);
440
441 size_t iov_iter_copy_from_user_atomic(struct page *page,
442                 struct iov_iter *i, unsigned long offset, size_t bytes)
443 {
444         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
445         iterate_all_kinds(i, bytes, v,
446                 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
447                                           v.iov_base, v.iov_len),
448                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
449                                  v.bv_offset, v.bv_len),
450                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
451         )
452         kunmap_atomic(kaddr);
453         return bytes;
454 }
455 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
456
457 void iov_iter_advance(struct iov_iter *i, size_t size)
458 {
459         iterate_and_advance(i, size, v, 0, 0, 0)
460 }
461 EXPORT_SYMBOL(iov_iter_advance);
462
463 /*
464  * Return the count of just the current iov_iter segment.
465  */
466 size_t iov_iter_single_seg_count(const struct iov_iter *i)
467 {
468         if (i->nr_segs == 1)
469                 return i->count;
470         else if (i->type & ITER_BVEC)
471                 return min(i->count, i->bvec->bv_len - i->iov_offset);
472         else
473                 return min(i->count, i->iov->iov_len - i->iov_offset);
474 }
475 EXPORT_SYMBOL(iov_iter_single_seg_count);
476
477 void iov_iter_kvec(struct iov_iter *i, int direction,
478                         const struct kvec *kvec, unsigned long nr_segs,
479                         size_t count)
480 {
481         BUG_ON(!(direction & ITER_KVEC));
482         i->type = direction;
483         i->kvec = kvec;
484         i->nr_segs = nr_segs;
485         i->iov_offset = 0;
486         i->count = count;
487 }
488 EXPORT_SYMBOL(iov_iter_kvec);
489
490 void iov_iter_bvec(struct iov_iter *i, int direction,
491                         const struct bio_vec *bvec, unsigned long nr_segs,
492                         size_t count)
493 {
494         BUG_ON(!(direction & ITER_BVEC));
495         i->type = direction;
496         i->bvec = bvec;
497         i->nr_segs = nr_segs;
498         i->iov_offset = 0;
499         i->count = count;
500 }
501 EXPORT_SYMBOL(iov_iter_bvec);
502
503 unsigned long iov_iter_alignment(const struct iov_iter *i)
504 {
505         unsigned long res = 0;
506         size_t size = i->count;
507
508         if (!size)
509                 return 0;
510
511         iterate_all_kinds(i, size, v,
512                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
513                 res |= v.bv_offset | v.bv_len,
514                 res |= (unsigned long)v.iov_base | v.iov_len
515         )
516         return res;
517 }
518 EXPORT_SYMBOL(iov_iter_alignment);
519
520 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
521 {
522         unsigned long res = 0;
523         size_t size = i->count;
524         if (!size)
525                 return 0;
526
527         iterate_all_kinds(i, size, v,
528                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
529                         (size != v.iov_len ? size : 0), 0),
530                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
531                         (size != v.bv_len ? size : 0)),
532                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
533                         (size != v.iov_len ? size : 0))
534                 );
535                 return res;
536 }
537 EXPORT_SYMBOL(iov_iter_gap_alignment);
538
539 ssize_t iov_iter_get_pages(struct iov_iter *i,
540                    struct page **pages, size_t maxsize, unsigned maxpages,
541                    size_t *start)
542 {
543         if (maxsize > i->count)
544                 maxsize = i->count;
545
546         if (!maxsize)
547                 return 0;
548
549         iterate_all_kinds(i, maxsize, v, ({
550                 unsigned long addr = (unsigned long)v.iov_base;
551                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
552                 int n;
553                 int res;
554
555                 if (len > maxpages * PAGE_SIZE)
556                         len = maxpages * PAGE_SIZE;
557                 addr &= ~(PAGE_SIZE - 1);
558                 n = DIV_ROUND_UP(len, PAGE_SIZE);
559                 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
560                 if (unlikely(res < 0))
561                         return res;
562                 return (res == n ? len : res * PAGE_SIZE) - *start;
563         0;}),({
564                 /* can't be more than PAGE_SIZE */
565                 *start = v.bv_offset;
566                 get_page(*pages = v.bv_page);
567                 return v.bv_len;
568         }),({
569                 return -EFAULT;
570         })
571         )
572         return 0;
573 }
574 EXPORT_SYMBOL(iov_iter_get_pages);
575
576 static struct page **get_pages_array(size_t n)
577 {
578         struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
579         if (!p)
580                 p = vmalloc(n * sizeof(struct page *));
581         return p;
582 }
583
584 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
585                    struct page ***pages, size_t maxsize,
586                    size_t *start)
587 {
588         struct page **p;
589
590         if (maxsize > i->count)
591                 maxsize = i->count;
592
593         if (!maxsize)
594                 return 0;
595
596         iterate_all_kinds(i, maxsize, v, ({
597                 unsigned long addr = (unsigned long)v.iov_base;
598                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
599                 int n;
600                 int res;
601
602                 addr &= ~(PAGE_SIZE - 1);
603                 n = DIV_ROUND_UP(len, PAGE_SIZE);
604                 p = get_pages_array(n);
605                 if (!p)
606                         return -ENOMEM;
607                 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
608                 if (unlikely(res < 0)) {
609                         kvfree(p);
610                         return res;
611                 }
612                 *pages = p;
613                 return (res == n ? len : res * PAGE_SIZE) - *start;
614         0;}),({
615                 /* can't be more than PAGE_SIZE */
616                 *start = v.bv_offset;
617                 *pages = p = get_pages_array(1);
618                 if (!p)
619                         return -ENOMEM;
620                 get_page(*p = v.bv_page);
621                 return v.bv_len;
622         }),({
623                 return -EFAULT;
624         })
625         )
626         return 0;
627 }
628 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
629
630 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
631                                struct iov_iter *i)
632 {
633         char *to = addr;
634         __wsum sum, next;
635         size_t off = 0;
636         sum = *csum;
637         iterate_and_advance(i, bytes, v, ({
638                 int err = 0;
639                 next = csum_and_copy_from_user(v.iov_base, 
640                                                (to += v.iov_len) - v.iov_len,
641                                                v.iov_len, 0, &err);
642                 if (!err) {
643                         sum = csum_block_add(sum, next, off);
644                         off += v.iov_len;
645                 }
646                 err ? v.iov_len : 0;
647         }), ({
648                 char *p = kmap_atomic(v.bv_page);
649                 next = csum_partial_copy_nocheck(p + v.bv_offset,
650                                                  (to += v.bv_len) - v.bv_len,
651                                                  v.bv_len, 0);
652                 kunmap_atomic(p);
653                 sum = csum_block_add(sum, next, off);
654                 off += v.bv_len;
655         }),({
656                 next = csum_partial_copy_nocheck(v.iov_base,
657                                                  (to += v.iov_len) - v.iov_len,
658                                                  v.iov_len, 0);
659                 sum = csum_block_add(sum, next, off);
660                 off += v.iov_len;
661         })
662         )
663         *csum = sum;
664         return bytes;
665 }
666 EXPORT_SYMBOL(csum_and_copy_from_iter);
667
668 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
669                              struct iov_iter *i)
670 {
671         const char *from = addr;
672         __wsum sum, next;
673         size_t off = 0;
674         sum = *csum;
675         iterate_and_advance(i, bytes, v, ({
676                 int err = 0;
677                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
678                                              v.iov_base, 
679                                              v.iov_len, 0, &err);
680                 if (!err) {
681                         sum = csum_block_add(sum, next, off);
682                         off += v.iov_len;
683                 }
684                 err ? v.iov_len : 0;
685         }), ({
686                 char *p = kmap_atomic(v.bv_page);
687                 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
688                                                  p + v.bv_offset,
689                                                  v.bv_len, 0);
690                 kunmap_atomic(p);
691                 sum = csum_block_add(sum, next, off);
692                 off += v.bv_len;
693         }),({
694                 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
695                                                  v.iov_base,
696                                                  v.iov_len, 0);
697                 sum = csum_block_add(sum, next, off);
698                 off += v.iov_len;
699         })
700         )
701         *csum = sum;
702         return bytes;
703 }
704 EXPORT_SYMBOL(csum_and_copy_to_iter);
705
706 int iov_iter_npages(const struct iov_iter *i, int maxpages)
707 {
708         size_t size = i->count;
709         int npages = 0;
710
711         if (!size)
712                 return 0;
713
714         iterate_all_kinds(i, size, v, ({
715                 unsigned long p = (unsigned long)v.iov_base;
716                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
717                         - p / PAGE_SIZE;
718                 if (npages >= maxpages)
719                         return maxpages;
720         0;}),({
721                 npages++;
722                 if (npages >= maxpages)
723                         return maxpages;
724         }),({
725                 unsigned long p = (unsigned long)v.iov_base;
726                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
727                         - p / PAGE_SIZE;
728                 if (npages >= maxpages)
729                         return maxpages;
730         })
731         )
732         return npages;
733 }
734 EXPORT_SYMBOL(iov_iter_npages);
735
736 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
737 {
738         *new = *old;
739         if (new->type & ITER_BVEC)
740                 return new->bvec = kmemdup(new->bvec,
741                                     new->nr_segs * sizeof(struct bio_vec),
742                                     flags);
743         else
744                 /* iovec and kvec have identical layout */
745                 return new->iov = kmemdup(new->iov,
746                                    new->nr_segs * sizeof(struct iovec),
747                                    flags);
748 }
749 EXPORT_SYMBOL(dup_iter);
750
751 int import_iovec(int type, const struct iovec __user * uvector,
752                  unsigned nr_segs, unsigned fast_segs,
753                  struct iovec **iov, struct iov_iter *i)
754 {
755         ssize_t n;
756         struct iovec *p;
757         n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
758                                   *iov, &p);
759         if (n < 0) {
760                 if (p != *iov)
761                         kfree(p);
762                 *iov = NULL;
763                 return n;
764         }
765         iov_iter_init(i, type, p, nr_segs, n);
766         *iov = p == *iov ? NULL : p;
767         return 0;
768 }
769 EXPORT_SYMBOL(import_iovec);
770
771 #ifdef CONFIG_COMPAT
772 #include <linux/compat.h>
773
774 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
775                  unsigned nr_segs, unsigned fast_segs,
776                  struct iovec **iov, struct iov_iter *i)
777 {
778         ssize_t n;
779         struct iovec *p;
780         n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
781                                   *iov, &p);
782         if (n < 0) {
783                 if (p != *iov)
784                         kfree(p);
785                 *iov = NULL;
786                 return n;
787         }
788         iov_iter_init(i, type, p, nr_segs, n);
789         *iov = p == *iov ? NULL : p;
790         return 0;
791 }
792 #endif
793
794 int import_single_range(int rw, void __user *buf, size_t len,
795                  struct iovec *iov, struct iov_iter *i)
796 {
797         if (len > MAX_RW_COUNT)
798                 len = MAX_RW_COUNT;
799         if (unlikely(!access_ok(!rw, buf, len)))
800                 return -EFAULT;
801
802         iov->iov_base = buf;
803         iov->iov_len = len;
804         iov_iter_init(i, rw, iov, 1, len);
805         return 0;
806 }
807 EXPORT_SYMBOL(import_single_range);