Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <crypto/hash.h>
3 : #include <linux/export.h>
4 : #include <linux/bvec.h>
5 : #include <linux/fault-inject-usercopy.h>
6 : #include <linux/uio.h>
7 : #include <linux/pagemap.h>
8 : #include <linux/highmem.h>
9 : #include <linux/slab.h>
10 : #include <linux/vmalloc.h>
11 : #include <linux/splice.h>
12 : #include <linux/compat.h>
13 : #include <net/checksum.h>
14 : #include <linux/scatterlist.h>
15 : #include <linux/instrumented.h>
16 :
17 : #define PIPE_PARANOIA /* for now */
18 :
19 : /* covers iovec and kvec alike */
20 : #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
21 : size_t off = 0; \
22 : size_t skip = i->iov_offset; \
23 : do { \
24 : len = min(n, __p->iov_len - skip); \
25 : if (likely(len)) { \
26 : base = __p->iov_base + skip; \
27 : len -= (STEP); \
28 : off += len; \
29 : skip += len; \
30 : n -= len; \
31 : if (skip < __p->iov_len) \
32 : break; \
33 : } \
34 : __p++; \
35 : skip = 0; \
36 : } while (n); \
37 : i->iov_offset = skip; \
38 : n = off; \
39 : }
40 :
41 : #define iterate_bvec(i, n, base, len, off, p, STEP) { \
42 : size_t off = 0; \
43 : unsigned skip = i->iov_offset; \
44 : while (n) { \
45 : unsigned offset = p->bv_offset + skip; \
46 : unsigned left; \
47 : void *kaddr = kmap_local_page(p->bv_page + \
48 : offset / PAGE_SIZE); \
49 : base = kaddr + offset % PAGE_SIZE; \
50 : len = min(min(n, (size_t)(p->bv_len - skip)), \
51 : (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
52 : left = (STEP); \
53 : kunmap_local(kaddr); \
54 : len -= left; \
55 : off += len; \
56 : skip += len; \
57 : if (skip == p->bv_len) { \
58 : skip = 0; \
59 : p++; \
60 : } \
61 : n -= len; \
62 : if (left) \
63 : break; \
64 : } \
65 : i->iov_offset = skip; \
66 : n = off; \
67 : }
68 :
69 : #define iterate_xarray(i, n, base, len, __off, STEP) { \
70 : __label__ __out; \
71 : size_t __off = 0; \
72 : struct folio *folio; \
73 : loff_t start = i->xarray_start + i->iov_offset; \
74 : pgoff_t index = start / PAGE_SIZE; \
75 : XA_STATE(xas, i->xarray, index); \
76 : \
77 : len = PAGE_SIZE - offset_in_page(start); \
78 : rcu_read_lock(); \
79 : xas_for_each(&xas, folio, ULONG_MAX) { \
80 : unsigned left; \
81 : size_t offset; \
82 : if (xas_retry(&xas, folio)) \
83 : continue; \
84 : if (WARN_ON(xa_is_value(folio))) \
85 : break; \
86 : if (WARN_ON(folio_test_hugetlb(folio))) \
87 : break; \
88 : offset = offset_in_folio(folio, start + __off); \
89 : while (offset < folio_size(folio)) { \
90 : base = kmap_local_folio(folio, offset); \
91 : len = min(n, len); \
92 : left = (STEP); \
93 : kunmap_local(base); \
94 : len -= left; \
95 : __off += len; \
96 : n -= len; \
97 : if (left || n == 0) \
98 : goto __out; \
99 : offset += len; \
100 : len = PAGE_SIZE; \
101 : } \
102 : } \
103 : __out: \
104 : rcu_read_unlock(); \
105 : i->iov_offset += __off; \
106 : n = __off; \
107 : }
108 :
109 : #define __iterate_and_advance(i, n, base, len, off, I, K) { \
110 : if (unlikely(i->count < n)) \
111 : n = i->count; \
112 : if (likely(n)) { \
113 : if (likely(iter_is_iovec(i))) { \
114 : const struct iovec *iov = i->iov; \
115 : void __user *base; \
116 : size_t len; \
117 : iterate_iovec(i, n, base, len, off, \
118 : iov, (I)) \
119 : i->nr_segs -= iov - i->iov; \
120 : i->iov = iov; \
121 : } else if (iov_iter_is_bvec(i)) { \
122 : const struct bio_vec *bvec = i->bvec; \
123 : void *base; \
124 : size_t len; \
125 : iterate_bvec(i, n, base, len, off, \
126 : bvec, (K)) \
127 : i->nr_segs -= bvec - i->bvec; \
128 : i->bvec = bvec; \
129 : } else if (iov_iter_is_kvec(i)) { \
130 : const struct kvec *kvec = i->kvec; \
131 : void *base; \
132 : size_t len; \
133 : iterate_iovec(i, n, base, len, off, \
134 : kvec, (K)) \
135 : i->nr_segs -= kvec - i->kvec; \
136 : i->kvec = kvec; \
137 : } else if (iov_iter_is_xarray(i)) { \
138 : void *base; \
139 : size_t len; \
140 : iterate_xarray(i, n, base, len, off, \
141 : (K)) \
142 : } \
143 : i->count -= n; \
144 : } \
145 : }
146 : #define iterate_and_advance(i, n, base, len, off, I, K) \
147 : __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
148 :
149 0 : static int copyout(void __user *to, const void *from, size_t n)
150 : {
151 : if (should_fail_usercopy())
152 : return n;
153 0 : if (access_ok(to, n)) {
154 0 : instrument_copy_to_user(to, from, n);
155 0 : n = raw_copy_to_user(to, from, n);
156 : }
157 0 : return n;
158 : }
159 :
160 0 : static int copyin(void *to, const void __user *from, size_t n)
161 : {
162 : if (should_fail_usercopy())
163 : return n;
164 0 : if (access_ok(from, n)) {
165 0 : instrument_copy_from_user(to, from, n);
166 0 : n = raw_copy_from_user(to, from, n);
167 : }
168 0 : return n;
169 : }
170 :
171 0 : static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
172 : struct iov_iter *i)
173 : {
174 : size_t skip, copy, left, wanted;
175 : const struct iovec *iov;
176 : char __user *buf;
177 : void *kaddr, *from;
178 :
179 0 : if (unlikely(bytes > i->count))
180 0 : bytes = i->count;
181 :
182 0 : if (unlikely(!bytes))
183 : return 0;
184 :
185 : might_fault();
186 0 : wanted = bytes;
187 0 : iov = i->iov;
188 0 : skip = i->iov_offset;
189 0 : buf = iov->iov_base + skip;
190 0 : copy = min(bytes, iov->iov_len - skip);
191 :
192 : if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
193 : kaddr = kmap_atomic(page);
194 : from = kaddr + offset;
195 :
196 : /* first chunk, usually the only one */
197 : left = copyout(buf, from, copy);
198 : copy -= left;
199 : skip += copy;
200 : from += copy;
201 : bytes -= copy;
202 :
203 : while (unlikely(!left && bytes)) {
204 : iov++;
205 : buf = iov->iov_base;
206 : copy = min(bytes, iov->iov_len);
207 : left = copyout(buf, from, copy);
208 : copy -= left;
209 : skip = copy;
210 : from += copy;
211 : bytes -= copy;
212 : }
213 : if (likely(!bytes)) {
214 : kunmap_atomic(kaddr);
215 : goto done;
216 : }
217 : offset = from - kaddr;
218 : buf += copy;
219 : kunmap_atomic(kaddr);
220 : copy = min(bytes, iov->iov_len - skip);
221 : }
222 : /* Too bad - revert to non-atomic kmap */
223 :
224 0 : kaddr = kmap(page);
225 0 : from = kaddr + offset;
226 0 : left = copyout(buf, from, copy);
227 0 : copy -= left;
228 0 : skip += copy;
229 0 : from += copy;
230 0 : bytes -= copy;
231 0 : while (unlikely(!left && bytes)) {
232 0 : iov++;
233 0 : buf = iov->iov_base;
234 0 : copy = min(bytes, iov->iov_len);
235 0 : left = copyout(buf, from, copy);
236 0 : copy -= left;
237 0 : skip = copy;
238 0 : from += copy;
239 0 : bytes -= copy;
240 : }
241 0 : kunmap(page);
242 :
243 : done:
244 0 : if (skip == iov->iov_len) {
245 0 : iov++;
246 0 : skip = 0;
247 : }
248 0 : i->count -= wanted - bytes;
249 0 : i->nr_segs -= iov - i->iov;
250 0 : i->iov = iov;
251 0 : i->iov_offset = skip;
252 0 : return wanted - bytes;
253 : }
254 :
255 0 : static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
256 : struct iov_iter *i)
257 : {
258 : size_t skip, copy, left, wanted;
259 : const struct iovec *iov;
260 : char __user *buf;
261 : void *kaddr, *to;
262 :
263 0 : if (unlikely(bytes > i->count))
264 0 : bytes = i->count;
265 :
266 0 : if (unlikely(!bytes))
267 : return 0;
268 :
269 : might_fault();
270 0 : wanted = bytes;
271 0 : iov = i->iov;
272 0 : skip = i->iov_offset;
273 0 : buf = iov->iov_base + skip;
274 0 : copy = min(bytes, iov->iov_len - skip);
275 :
276 : if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
277 : kaddr = kmap_atomic(page);
278 : to = kaddr + offset;
279 :
280 : /* first chunk, usually the only one */
281 : left = copyin(to, buf, copy);
282 : copy -= left;
283 : skip += copy;
284 : to += copy;
285 : bytes -= copy;
286 :
287 : while (unlikely(!left && bytes)) {
288 : iov++;
289 : buf = iov->iov_base;
290 : copy = min(bytes, iov->iov_len);
291 : left = copyin(to, buf, copy);
292 : copy -= left;
293 : skip = copy;
294 : to += copy;
295 : bytes -= copy;
296 : }
297 : if (likely(!bytes)) {
298 : kunmap_atomic(kaddr);
299 : goto done;
300 : }
301 : offset = to - kaddr;
302 : buf += copy;
303 : kunmap_atomic(kaddr);
304 : copy = min(bytes, iov->iov_len - skip);
305 : }
306 : /* Too bad - revert to non-atomic kmap */
307 :
308 0 : kaddr = kmap(page);
309 0 : to = kaddr + offset;
310 0 : left = copyin(to, buf, copy);
311 0 : copy -= left;
312 0 : skip += copy;
313 0 : to += copy;
314 0 : bytes -= copy;
315 0 : while (unlikely(!left && bytes)) {
316 0 : iov++;
317 0 : buf = iov->iov_base;
318 0 : copy = min(bytes, iov->iov_len);
319 0 : left = copyin(to, buf, copy);
320 0 : copy -= left;
321 0 : skip = copy;
322 0 : to += copy;
323 0 : bytes -= copy;
324 : }
325 0 : kunmap(page);
326 :
327 : done:
328 0 : if (skip == iov->iov_len) {
329 0 : iov++;
330 0 : skip = 0;
331 : }
332 0 : i->count -= wanted - bytes;
333 0 : i->nr_segs -= iov - i->iov;
334 0 : i->iov = iov;
335 0 : i->iov_offset = skip;
336 0 : return wanted - bytes;
337 : }
338 :
339 : #ifdef PIPE_PARANOIA
340 0 : static bool sanity(const struct iov_iter *i)
341 : {
342 0 : struct pipe_inode_info *pipe = i->pipe;
343 0 : unsigned int p_head = pipe->head;
344 0 : unsigned int p_tail = pipe->tail;
345 0 : unsigned int p_mask = pipe->ring_size - 1;
346 0 : unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
347 0 : unsigned int i_head = i->head;
348 : unsigned int idx;
349 :
350 0 : if (i->iov_offset) {
351 : struct pipe_buffer *p;
352 0 : if (unlikely(p_occupancy == 0))
353 : goto Bad; // pipe must be non-empty
354 0 : if (unlikely(i_head != p_head - 1))
355 : goto Bad; // must be at the last buffer...
356 :
357 0 : p = &pipe->bufs[i_head & p_mask];
358 0 : if (unlikely(p->offset + p->len != i->iov_offset))
359 : goto Bad; // ... at the end of segment
360 : } else {
361 0 : if (i_head != p_head)
362 : goto Bad; // must be right after the last buffer
363 : }
364 : return true;
365 : Bad:
366 0 : printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
367 0 : printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
368 : p_head, p_tail, pipe->ring_size);
369 0 : for (idx = 0; idx < pipe->ring_size; idx++)
370 0 : printk(KERN_ERR "[%p %p %d %d]\n",
371 : pipe->bufs[idx].ops,
372 : pipe->bufs[idx].page,
373 : pipe->bufs[idx].offset,
374 : pipe->bufs[idx].len);
375 0 : WARN_ON(1);
376 0 : return false;
377 : }
378 : #else
379 : #define sanity(i) true
380 : #endif
381 :
382 0 : static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
383 : struct iov_iter *i)
384 : {
385 0 : struct pipe_inode_info *pipe = i->pipe;
386 : struct pipe_buffer *buf;
387 0 : unsigned int p_tail = pipe->tail;
388 0 : unsigned int p_mask = pipe->ring_size - 1;
389 0 : unsigned int i_head = i->head;
390 : size_t off;
391 :
392 0 : if (unlikely(bytes > i->count))
393 0 : bytes = i->count;
394 :
395 0 : if (unlikely(!bytes))
396 : return 0;
397 :
398 0 : if (!sanity(i))
399 : return 0;
400 :
401 0 : off = i->iov_offset;
402 0 : buf = &pipe->bufs[i_head & p_mask];
403 0 : if (off) {
404 0 : if (offset == off && buf->page == page) {
405 : /* merge with the last one */
406 0 : buf->len += bytes;
407 0 : i->iov_offset += bytes;
408 0 : goto out;
409 : }
410 0 : i_head++;
411 0 : buf = &pipe->bufs[i_head & p_mask];
412 : }
413 0 : if (pipe_full(i_head, p_tail, pipe->max_usage))
414 : return 0;
415 :
416 0 : buf->ops = &page_cache_pipe_buf_ops;
417 0 : buf->flags = 0;
418 0 : get_page(page);
419 0 : buf->page = page;
420 0 : buf->offset = offset;
421 0 : buf->len = bytes;
422 :
423 0 : pipe->head = i_head + 1;
424 0 : i->iov_offset = offset + bytes;
425 0 : i->head = i_head;
426 : out:
427 0 : i->count -= bytes;
428 0 : return bytes;
429 : }
430 :
431 : /*
432 : * fault_in_iov_iter_readable - fault in iov iterator for reading
433 : * @i: iterator
434 : * @size: maximum length
435 : *
436 : * Fault in one or more iovecs of the given iov_iter, to a maximum length of
437 : * @size. For each iovec, fault in each page that constitutes the iovec.
438 : *
439 : * Returns the number of bytes not faulted in (like copy_to_user() and
440 : * copy_from_user()).
441 : *
442 : * Always returns 0 for non-userspace iterators.
443 : */
444 0 : size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
445 : {
446 0 : if (iter_is_iovec(i)) {
447 0 : size_t count = min(size, iov_iter_count(i));
448 : const struct iovec *p;
449 : size_t skip;
450 :
451 0 : size -= count;
452 0 : for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
453 0 : size_t len = min(count, p->iov_len - skip);
454 : size_t ret;
455 :
456 0 : if (unlikely(!len))
457 0 : continue;
458 0 : ret = fault_in_readable(p->iov_base + skip, len);
459 0 : count -= len - ret;
460 0 : if (ret)
461 : break;
462 : }
463 0 : return count + size;
464 : }
465 : return 0;
466 : }
467 : EXPORT_SYMBOL(fault_in_iov_iter_readable);
468 :
469 : /*
470 : * fault_in_iov_iter_writeable - fault in iov iterator for writing
471 : * @i: iterator
472 : * @size: maximum length
473 : *
474 : * Faults in the iterator using get_user_pages(), i.e., without triggering
475 : * hardware page faults. This is primarily useful when we already know that
476 : * some or all of the pages in @i aren't in memory.
477 : *
478 : * Returns the number of bytes not faulted in, like copy_to_user() and
479 : * copy_from_user().
480 : *
481 : * Always returns 0 for non-user-space iterators.
482 : */
483 0 : size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
484 : {
485 0 : if (iter_is_iovec(i)) {
486 0 : size_t count = min(size, iov_iter_count(i));
487 : const struct iovec *p;
488 : size_t skip;
489 :
490 0 : size -= count;
491 0 : for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
492 0 : size_t len = min(count, p->iov_len - skip);
493 : size_t ret;
494 :
495 0 : if (unlikely(!len))
496 0 : continue;
497 0 : ret = fault_in_safe_writeable(p->iov_base + skip, len);
498 0 : count -= len - ret;
499 0 : if (ret)
500 : break;
501 : }
502 0 : return count + size;
503 : }
504 : return 0;
505 : }
506 : EXPORT_SYMBOL(fault_in_iov_iter_writeable);
507 :
508 0 : void iov_iter_init(struct iov_iter *i, unsigned int direction,
509 : const struct iovec *iov, unsigned long nr_segs,
510 : size_t count)
511 : {
512 0 : WARN_ON(direction & ~(READ | WRITE));
513 0 : *i = (struct iov_iter) {
514 : .iter_type = ITER_IOVEC,
515 : .nofault = false,
516 : .data_source = direction,
517 : .iov = iov,
518 : .nr_segs = nr_segs,
519 : .iov_offset = 0,
520 : .count = count
521 : };
522 0 : }
523 : EXPORT_SYMBOL(iov_iter_init);
524 :
525 : static inline bool allocated(struct pipe_buffer *buf)
526 : {
527 : return buf->ops == &default_pipe_buf_ops;
528 : }
529 :
530 : static inline void data_start(const struct iov_iter *i,
531 : unsigned int *iter_headp, size_t *offp)
532 : {
533 0 : unsigned int p_mask = i->pipe->ring_size - 1;
534 0 : unsigned int iter_head = i->head;
535 0 : size_t off = i->iov_offset;
536 :
537 0 : if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
538 : off == PAGE_SIZE)) {
539 0 : iter_head++;
540 0 : off = 0;
541 : }
542 0 : *iter_headp = iter_head;
543 0 : *offp = off;
544 : }
545 :
546 0 : static size_t push_pipe(struct iov_iter *i, size_t size,
547 : int *iter_headp, size_t *offp)
548 : {
549 0 : struct pipe_inode_info *pipe = i->pipe;
550 0 : unsigned int p_tail = pipe->tail;
551 0 : unsigned int p_mask = pipe->ring_size - 1;
552 : unsigned int iter_head;
553 : size_t off;
554 : ssize_t left;
555 :
556 0 : if (unlikely(size > i->count))
557 0 : size = i->count;
558 0 : if (unlikely(!size))
559 : return 0;
560 :
561 0 : left = size;
562 0 : data_start(i, &iter_head, &off);
563 0 : *iter_headp = iter_head;
564 0 : *offp = off;
565 0 : if (off) {
566 0 : left -= PAGE_SIZE - off;
567 0 : if (left <= 0) {
568 0 : pipe->bufs[iter_head & p_mask].len += size;
569 0 : return size;
570 : }
571 0 : pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
572 0 : iter_head++;
573 : }
574 0 : while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
575 0 : struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
576 0 : struct page *page = alloc_page(GFP_USER);
577 0 : if (!page)
578 : break;
579 :
580 0 : buf->ops = &default_pipe_buf_ops;
581 0 : buf->flags = 0;
582 0 : buf->page = page;
583 0 : buf->offset = 0;
584 0 : buf->len = min_t(ssize_t, left, PAGE_SIZE);
585 0 : left -= buf->len;
586 0 : iter_head++;
587 0 : pipe->head = iter_head;
588 :
589 0 : if (left == 0)
590 : return size;
591 : }
592 0 : return size - left;
593 : }
594 :
595 0 : static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
596 : struct iov_iter *i)
597 : {
598 0 : struct pipe_inode_info *pipe = i->pipe;
599 0 : unsigned int p_mask = pipe->ring_size - 1;
600 : unsigned int i_head;
601 : size_t n, off;
602 :
603 0 : if (!sanity(i))
604 : return 0;
605 :
606 0 : bytes = n = push_pipe(i, bytes, &i_head, &off);
607 0 : if (unlikely(!n))
608 : return 0;
609 : do {
610 0 : size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
611 0 : memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
612 0 : i->head = i_head;
613 0 : i->iov_offset = off + chunk;
614 0 : n -= chunk;
615 0 : addr += chunk;
616 0 : off = 0;
617 0 : i_head++;
618 0 : } while (n);
619 0 : i->count -= bytes;
620 0 : return bytes;
621 : }
622 :
623 0 : static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
624 : __wsum sum, size_t off)
625 : {
626 0 : __wsum next = csum_partial_copy_nocheck(from, to, len);
627 0 : return csum_block_add(sum, next, off);
628 : }
629 :
630 0 : static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
631 : struct iov_iter *i, __wsum *sump)
632 : {
633 0 : struct pipe_inode_info *pipe = i->pipe;
634 0 : unsigned int p_mask = pipe->ring_size - 1;
635 0 : __wsum sum = *sump;
636 0 : size_t off = 0;
637 : unsigned int i_head;
638 : size_t r;
639 :
640 0 : if (!sanity(i))
641 : return 0;
642 :
643 0 : bytes = push_pipe(i, bytes, &i_head, &r);
644 0 : while (bytes) {
645 0 : size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
646 0 : char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
647 0 : sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
648 : kunmap_local(p);
649 0 : i->head = i_head;
650 0 : i->iov_offset = r + chunk;
651 0 : bytes -= chunk;
652 0 : off += chunk;
653 0 : r = 0;
654 0 : i_head++;
655 : }
656 0 : *sump = sum;
657 0 : i->count -= off;
658 0 : return off;
659 : }
660 :
661 0 : size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
662 : {
663 0 : if (unlikely(iov_iter_is_pipe(i)))
664 0 : return copy_pipe_to_iter(addr, bytes, i);
665 0 : if (iter_is_iovec(i))
666 : might_fault();
667 0 : iterate_and_advance(i, bytes, base, len, off,
668 : copyout(base, addr + off, len),
669 : memcpy(base, addr + off, len)
670 : )
671 :
672 : return bytes;
673 : }
674 : EXPORT_SYMBOL(_copy_to_iter);
675 :
676 : #ifdef CONFIG_ARCH_HAS_COPY_MC
677 : static int copyout_mc(void __user *to, const void *from, size_t n)
678 : {
679 : if (access_ok(to, n)) {
680 : instrument_copy_to_user(to, from, n);
681 : n = copy_mc_to_user((__force void *) to, from, n);
682 : }
683 : return n;
684 : }
685 :
686 : static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
687 : struct iov_iter *i)
688 : {
689 : struct pipe_inode_info *pipe = i->pipe;
690 : unsigned int p_mask = pipe->ring_size - 1;
691 : unsigned int i_head;
692 : size_t n, off, xfer = 0;
693 :
694 : if (!sanity(i))
695 : return 0;
696 :
697 : n = push_pipe(i, bytes, &i_head, &off);
698 : while (n) {
699 : size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
700 : char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
701 : unsigned long rem;
702 : rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
703 : chunk -= rem;
704 : kunmap_local(p);
705 : i->head = i_head;
706 : i->iov_offset = off + chunk;
707 : xfer += chunk;
708 : if (rem)
709 : break;
710 : n -= chunk;
711 : off = 0;
712 : i_head++;
713 : }
714 : i->count -= xfer;
715 : return xfer;
716 : }
717 :
718 : /**
719 : * _copy_mc_to_iter - copy to iter with source memory error exception handling
720 : * @addr: source kernel address
721 : * @bytes: total transfer length
722 : * @i: destination iterator
723 : *
724 : * The pmem driver deploys this for the dax operation
725 : * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
726 : * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
727 : * successfully copied.
728 : *
729 : * The main differences between this and typical _copy_to_iter().
730 : *
731 : * * Typical tail/residue handling after a fault retries the copy
732 : * byte-by-byte until the fault happens again. Re-triggering machine
733 : * checks is potentially fatal so the implementation uses source
734 : * alignment and poison alignment assumptions to avoid re-triggering
735 : * hardware exceptions.
736 : *
737 : * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
738 : * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
739 : * a short copy.
740 : *
741 : * Return: number of bytes copied (may be %0)
742 : */
743 : size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
744 : {
745 : if (unlikely(iov_iter_is_pipe(i)))
746 : return copy_mc_pipe_to_iter(addr, bytes, i);
747 : if (iter_is_iovec(i))
748 : might_fault();
749 : __iterate_and_advance(i, bytes, base, len, off,
750 : copyout_mc(base, addr + off, len),
751 : copy_mc_to_kernel(base, addr + off, len)
752 : )
753 :
754 : return bytes;
755 : }
756 : EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
757 : #endif /* CONFIG_ARCH_HAS_COPY_MC */
758 :
759 0 : size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
760 : {
761 0 : if (unlikely(iov_iter_is_pipe(i))) {
762 0 : WARN_ON(1);
763 0 : return 0;
764 : }
765 0 : if (iter_is_iovec(i))
766 : might_fault();
767 0 : iterate_and_advance(i, bytes, base, len, off,
768 : copyin(addr + off, base, len),
769 : memcpy(addr + off, base, len)
770 : )
771 :
772 : return bytes;
773 : }
774 : EXPORT_SYMBOL(_copy_from_iter);
775 :
776 0 : size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
777 : {
778 0 : if (unlikely(iov_iter_is_pipe(i))) {
779 0 : WARN_ON(1);
780 0 : return 0;
781 : }
782 0 : iterate_and_advance(i, bytes, base, len, off,
783 : __copy_from_user_inatomic_nocache(addr + off, base, len),
784 : memcpy(addr + off, base, len)
785 : )
786 :
787 : return bytes;
788 : }
789 : EXPORT_SYMBOL(_copy_from_iter_nocache);
790 :
791 : #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
792 : /**
793 : * _copy_from_iter_flushcache - write destination through cpu cache
794 : * @addr: destination kernel address
795 : * @bytes: total transfer length
796 : * @i: source iterator
797 : *
798 : * The pmem driver arranges for filesystem-dax to use this facility via
799 : * dax_copy_from_iter() for ensuring that writes to persistent memory
800 : * are flushed through the CPU cache. It is differentiated from
801 : * _copy_from_iter_nocache() in that guarantees all data is flushed for
802 : * all iterator types. The _copy_from_iter_nocache() only attempts to
803 : * bypass the cache for the ITER_IOVEC case, and on some archs may use
804 : * instructions that strand dirty-data in the cache.
805 : *
806 : * Return: number of bytes copied (may be %0)
807 : */
808 : size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
809 : {
810 : if (unlikely(iov_iter_is_pipe(i))) {
811 : WARN_ON(1);
812 : return 0;
813 : }
814 : iterate_and_advance(i, bytes, base, len, off,
815 : __copy_from_user_flushcache(addr + off, base, len),
816 : memcpy_flushcache(addr + off, base, len)
817 : )
818 :
819 : return bytes;
820 : }
821 : EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
822 : #endif
823 :
824 0 : static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
825 : {
826 : struct page *head;
827 0 : size_t v = n + offset;
828 :
829 : /*
830 : * The general case needs to access the page order in order
831 : * to compute the page size.
832 : * However, we mostly deal with order-0 pages and thus can
833 : * avoid a possible cache line miss for requests that fit all
834 : * page orders.
835 : */
836 0 : if (n <= v && v <= PAGE_SIZE)
837 : return true;
838 :
839 0 : head = compound_head(page);
840 0 : v += (page - head) << PAGE_SHIFT;
841 :
842 0 : if (likely(n <= v && v <= (page_size(head))))
843 : return true;
844 0 : WARN_ON(1);
845 0 : return false;
846 : }
847 :
848 0 : static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
849 : struct iov_iter *i)
850 : {
851 0 : if (likely(iter_is_iovec(i)))
852 0 : return copy_page_to_iter_iovec(page, offset, bytes, i);
853 0 : if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
854 0 : void *kaddr = kmap_local_page(page);
855 0 : size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
856 : kunmap_local(kaddr);
857 0 : return wanted;
858 : }
859 0 : if (iov_iter_is_pipe(i))
860 0 : return copy_page_to_iter_pipe(page, offset, bytes, i);
861 0 : if (unlikely(iov_iter_is_discard(i))) {
862 0 : if (unlikely(i->count < bytes))
863 0 : bytes = i->count;
864 0 : i->count -= bytes;
865 0 : return bytes;
866 : }
867 0 : WARN_ON(1);
868 0 : return 0;
869 : }
870 :
871 0 : size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
872 : struct iov_iter *i)
873 : {
874 0 : size_t res = 0;
875 0 : if (unlikely(!page_copy_sane(page, offset, bytes)))
876 : return 0;
877 0 : page += offset / PAGE_SIZE; // first subpage
878 0 : offset %= PAGE_SIZE;
879 : while (1) {
880 0 : size_t n = __copy_page_to_iter(page, offset,
881 0 : min(bytes, (size_t)PAGE_SIZE - offset), i);
882 0 : res += n;
883 0 : bytes -= n;
884 0 : if (!bytes || !n)
885 : break;
886 0 : offset += n;
887 0 : if (offset == PAGE_SIZE) {
888 0 : page++;
889 0 : offset = 0;
890 : }
891 : }
892 : return res;
893 : }
894 : EXPORT_SYMBOL(copy_page_to_iter);
895 :
896 0 : size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
897 : struct iov_iter *i)
898 : {
899 0 : if (unlikely(!page_copy_sane(page, offset, bytes)))
900 : return 0;
901 0 : if (likely(iter_is_iovec(i)))
902 0 : return copy_page_from_iter_iovec(page, offset, bytes, i);
903 0 : if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
904 0 : void *kaddr = kmap_local_page(page);
905 0 : size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
906 : kunmap_local(kaddr);
907 0 : return wanted;
908 : }
909 0 : WARN_ON(1);
910 0 : return 0;
911 : }
912 : EXPORT_SYMBOL(copy_page_from_iter);
913 :
914 0 : static size_t pipe_zero(size_t bytes, struct iov_iter *i)
915 : {
916 0 : struct pipe_inode_info *pipe = i->pipe;
917 0 : unsigned int p_mask = pipe->ring_size - 1;
918 : unsigned int i_head;
919 : size_t n, off;
920 :
921 0 : if (!sanity(i))
922 : return 0;
923 :
924 0 : bytes = n = push_pipe(i, bytes, &i_head, &off);
925 0 : if (unlikely(!n))
926 : return 0;
927 :
928 : do {
929 0 : size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
930 0 : char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
931 0 : memset(p + off, 0, chunk);
932 : kunmap_local(p);
933 0 : i->head = i_head;
934 0 : i->iov_offset = off + chunk;
935 0 : n -= chunk;
936 0 : off = 0;
937 0 : i_head++;
938 0 : } while (n);
939 0 : i->count -= bytes;
940 0 : return bytes;
941 : }
942 :
943 0 : size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
944 : {
945 0 : if (unlikely(iov_iter_is_pipe(i)))
946 0 : return pipe_zero(bytes, i);
947 0 : iterate_and_advance(i, bytes, base, len, count,
948 : clear_user(base, len),
949 : memset(base, 0, len)
950 : )
951 :
952 : return bytes;
953 : }
954 : EXPORT_SYMBOL(iov_iter_zero);
955 :
956 0 : size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
957 : struct iov_iter *i)
958 : {
959 0 : char *kaddr = kmap_atomic(page), *p = kaddr + offset;
960 0 : if (unlikely(!page_copy_sane(page, offset, bytes))) {
961 0 : kunmap_atomic(kaddr);
962 0 : return 0;
963 : }
964 0 : if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
965 0 : kunmap_atomic(kaddr);
966 0 : WARN_ON(1);
967 0 : return 0;
968 : }
969 0 : iterate_and_advance(i, bytes, base, len, off,
970 : copyin(p + off, base, len),
971 : memcpy(p + off, base, len)
972 : )
973 0 : kunmap_atomic(kaddr);
974 0 : return bytes;
975 : }
976 : EXPORT_SYMBOL(copy_page_from_iter_atomic);
977 :
978 0 : static inline void pipe_truncate(struct iov_iter *i)
979 : {
980 0 : struct pipe_inode_info *pipe = i->pipe;
981 0 : unsigned int p_tail = pipe->tail;
982 0 : unsigned int p_head = pipe->head;
983 0 : unsigned int p_mask = pipe->ring_size - 1;
984 :
985 0 : if (!pipe_empty(p_head, p_tail)) {
986 : struct pipe_buffer *buf;
987 0 : unsigned int i_head = i->head;
988 0 : size_t off = i->iov_offset;
989 :
990 0 : if (off) {
991 0 : buf = &pipe->bufs[i_head & p_mask];
992 0 : buf->len = off - buf->offset;
993 0 : i_head++;
994 : }
995 0 : while (p_head != i_head) {
996 0 : p_head--;
997 0 : pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
998 : }
999 :
1000 0 : pipe->head = p_head;
1001 : }
1002 0 : }
1003 :
1004 0 : static void pipe_advance(struct iov_iter *i, size_t size)
1005 : {
1006 0 : struct pipe_inode_info *pipe = i->pipe;
1007 0 : if (size) {
1008 : struct pipe_buffer *buf;
1009 0 : unsigned int p_mask = pipe->ring_size - 1;
1010 0 : unsigned int i_head = i->head;
1011 0 : size_t off = i->iov_offset, left = size;
1012 :
1013 0 : if (off) /* make it relative to the beginning of buffer */
1014 0 : left += off - pipe->bufs[i_head & p_mask].offset;
1015 : while (1) {
1016 0 : buf = &pipe->bufs[i_head & p_mask];
1017 0 : if (left <= buf->len)
1018 : break;
1019 0 : left -= buf->len;
1020 0 : i_head++;
1021 : }
1022 0 : i->head = i_head;
1023 0 : i->iov_offset = buf->offset + left;
1024 : }
1025 0 : i->count -= size;
1026 : /* ... and discard everything past that point */
1027 0 : pipe_truncate(i);
1028 0 : }
1029 :
1030 0 : static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
1031 : {
1032 : struct bvec_iter bi;
1033 :
1034 0 : bi.bi_size = i->count;
1035 0 : bi.bi_bvec_done = i->iov_offset;
1036 0 : bi.bi_idx = 0;
1037 0 : bvec_iter_advance(i->bvec, &bi, size);
1038 :
1039 0 : i->bvec += bi.bi_idx;
1040 0 : i->nr_segs -= bi.bi_idx;
1041 0 : i->count = bi.bi_size;
1042 0 : i->iov_offset = bi.bi_bvec_done;
1043 0 : }
1044 :
1045 0 : static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
1046 : {
1047 : const struct iovec *iov, *end;
1048 :
1049 0 : if (!i->count)
1050 : return;
1051 0 : i->count -= size;
1052 :
1053 0 : size += i->iov_offset; // from beginning of current segment
1054 0 : for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
1055 0 : if (likely(size < iov->iov_len))
1056 : break;
1057 0 : size -= iov->iov_len;
1058 : }
1059 0 : i->iov_offset = size;
1060 0 : i->nr_segs -= iov - i->iov;
1061 0 : i->iov = iov;
1062 : }
1063 :
1064 0 : void iov_iter_advance(struct iov_iter *i, size_t size)
1065 : {
1066 0 : if (unlikely(i->count < size))
1067 0 : size = i->count;
1068 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
1069 : /* iovec and kvec have identical layouts */
1070 0 : iov_iter_iovec_advance(i, size);
1071 0 : } else if (iov_iter_is_bvec(i)) {
1072 0 : iov_iter_bvec_advance(i, size);
1073 0 : } else if (iov_iter_is_pipe(i)) {
1074 0 : pipe_advance(i, size);
1075 0 : } else if (unlikely(iov_iter_is_xarray(i))) {
1076 0 : i->iov_offset += size;
1077 0 : i->count -= size;
1078 0 : } else if (iov_iter_is_discard(i)) {
1079 0 : i->count -= size;
1080 : }
1081 0 : }
1082 : EXPORT_SYMBOL(iov_iter_advance);
1083 :
1084 0 : void iov_iter_revert(struct iov_iter *i, size_t unroll)
1085 : {
1086 0 : if (!unroll)
1087 : return;
1088 0 : if (WARN_ON(unroll > MAX_RW_COUNT))
1089 : return;
1090 0 : i->count += unroll;
1091 0 : if (unlikely(iov_iter_is_pipe(i))) {
1092 0 : struct pipe_inode_info *pipe = i->pipe;
1093 0 : unsigned int p_mask = pipe->ring_size - 1;
1094 0 : unsigned int i_head = i->head;
1095 0 : size_t off = i->iov_offset;
1096 0 : while (1) {
1097 0 : struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1098 0 : size_t n = off - b->offset;
1099 0 : if (unroll < n) {
1100 0 : off -= unroll;
1101 0 : break;
1102 : }
1103 0 : unroll -= n;
1104 0 : if (!unroll && i_head == i->start_head) {
1105 : off = 0;
1106 : break;
1107 : }
1108 0 : i_head--;
1109 0 : b = &pipe->bufs[i_head & p_mask];
1110 0 : off = b->offset + b->len;
1111 : }
1112 0 : i->iov_offset = off;
1113 0 : i->head = i_head;
1114 0 : pipe_truncate(i);
1115 0 : return;
1116 : }
1117 0 : if (unlikely(iov_iter_is_discard(i)))
1118 : return;
1119 0 : if (unroll <= i->iov_offset) {
1120 0 : i->iov_offset -= unroll;
1121 0 : return;
1122 : }
1123 0 : unroll -= i->iov_offset;
1124 0 : if (iov_iter_is_xarray(i)) {
1125 0 : BUG(); /* We should never go beyond the start of the specified
1126 : * range since we might then be straying into pages that
1127 : * aren't pinned.
1128 : */
1129 0 : } else if (iov_iter_is_bvec(i)) {
1130 0 : const struct bio_vec *bvec = i->bvec;
1131 0 : while (1) {
1132 0 : size_t n = (--bvec)->bv_len;
1133 0 : i->nr_segs++;
1134 0 : if (unroll <= n) {
1135 0 : i->bvec = bvec;
1136 0 : i->iov_offset = n - unroll;
1137 0 : return;
1138 : }
1139 0 : unroll -= n;
1140 : }
1141 : } else { /* same logics for iovec and kvec */
1142 0 : const struct iovec *iov = i->iov;
1143 0 : while (1) {
1144 0 : size_t n = (--iov)->iov_len;
1145 0 : i->nr_segs++;
1146 0 : if (unroll <= n) {
1147 0 : i->iov = iov;
1148 0 : i->iov_offset = n - unroll;
1149 0 : return;
1150 : }
1151 0 : unroll -= n;
1152 : }
1153 : }
1154 : }
1155 : EXPORT_SYMBOL(iov_iter_revert);
1156 :
1157 : /*
1158 : * Return the count of just the current iov_iter segment.
1159 : */
1160 0 : size_t iov_iter_single_seg_count(const struct iov_iter *i)
1161 : {
1162 0 : if (i->nr_segs > 1) {
1163 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1164 0 : return min(i->count, i->iov->iov_len - i->iov_offset);
1165 0 : if (iov_iter_is_bvec(i))
1166 0 : return min(i->count, i->bvec->bv_len - i->iov_offset);
1167 : }
1168 0 : return i->count;
1169 : }
1170 : EXPORT_SYMBOL(iov_iter_single_seg_count);
1171 :
1172 0 : void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1173 : const struct kvec *kvec, unsigned long nr_segs,
1174 : size_t count)
1175 : {
1176 0 : WARN_ON(direction & ~(READ | WRITE));
1177 0 : *i = (struct iov_iter){
1178 : .iter_type = ITER_KVEC,
1179 : .data_source = direction,
1180 : .kvec = kvec,
1181 : .nr_segs = nr_segs,
1182 : .iov_offset = 0,
1183 : .count = count
1184 : };
1185 0 : }
1186 : EXPORT_SYMBOL(iov_iter_kvec);
1187 :
1188 0 : void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1189 : const struct bio_vec *bvec, unsigned long nr_segs,
1190 : size_t count)
1191 : {
1192 0 : WARN_ON(direction & ~(READ | WRITE));
1193 0 : *i = (struct iov_iter){
1194 : .iter_type = ITER_BVEC,
1195 : .data_source = direction,
1196 : .bvec = bvec,
1197 : .nr_segs = nr_segs,
1198 : .iov_offset = 0,
1199 : .count = count
1200 : };
1201 0 : }
1202 : EXPORT_SYMBOL(iov_iter_bvec);
1203 :
1204 0 : void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1205 : struct pipe_inode_info *pipe,
1206 : size_t count)
1207 : {
1208 0 : BUG_ON(direction != READ);
1209 0 : WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1210 0 : *i = (struct iov_iter){
1211 : .iter_type = ITER_PIPE,
1212 : .data_source = false,
1213 : .pipe = pipe,
1214 0 : .head = pipe->head,
1215 : .start_head = pipe->head,
1216 : .iov_offset = 0,
1217 : .count = count
1218 : };
1219 0 : }
1220 : EXPORT_SYMBOL(iov_iter_pipe);
1221 :
1222 : /**
1223 : * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1224 : * @i: The iterator to initialise.
1225 : * @direction: The direction of the transfer.
1226 : * @xarray: The xarray to access.
1227 : * @start: The start file position.
1228 : * @count: The size of the I/O buffer in bytes.
1229 : *
1230 : * Set up an I/O iterator to either draw data out of the pages attached to an
1231 : * inode or to inject data into those pages. The pages *must* be prevented
1232 : * from evaporation, either by taking a ref on them or locking them by the
1233 : * caller.
1234 : */
1235 0 : void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1236 : struct xarray *xarray, loff_t start, size_t count)
1237 : {
1238 0 : BUG_ON(direction & ~1);
1239 0 : *i = (struct iov_iter) {
1240 : .iter_type = ITER_XARRAY,
1241 : .data_source = direction,
1242 : .xarray = xarray,
1243 : .xarray_start = start,
1244 : .count = count,
1245 : .iov_offset = 0
1246 : };
1247 0 : }
1248 : EXPORT_SYMBOL(iov_iter_xarray);
1249 :
1250 : /**
1251 : * iov_iter_discard - Initialise an I/O iterator that discards data
1252 : * @i: The iterator to initialise.
1253 : * @direction: The direction of the transfer.
1254 : * @count: The size of the I/O buffer in bytes.
1255 : *
1256 : * Set up an I/O iterator that just discards everything that's written to it.
1257 : * It's only available as a READ iterator.
1258 : */
1259 0 : void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1260 : {
1261 0 : BUG_ON(direction != READ);
1262 0 : *i = (struct iov_iter){
1263 : .iter_type = ITER_DISCARD,
1264 : .data_source = false,
1265 : .count = count,
1266 : .iov_offset = 0
1267 : };
1268 0 : }
1269 : EXPORT_SYMBOL(iov_iter_discard);
1270 :
1271 0 : static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1272 : {
1273 0 : unsigned long res = 0;
1274 0 : size_t size = i->count;
1275 0 : size_t skip = i->iov_offset;
1276 : unsigned k;
1277 :
1278 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1279 0 : size_t len = i->iov[k].iov_len - skip;
1280 0 : if (len) {
1281 0 : res |= (unsigned long)i->iov[k].iov_base + skip;
1282 0 : if (len > size)
1283 0 : len = size;
1284 0 : res |= len;
1285 0 : size -= len;
1286 0 : if (!size)
1287 : break;
1288 : }
1289 : }
1290 0 : return res;
1291 : }
1292 :
1293 0 : static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1294 : {
1295 0 : unsigned res = 0;
1296 0 : size_t size = i->count;
1297 0 : unsigned skip = i->iov_offset;
1298 : unsigned k;
1299 :
1300 0 : for (k = 0; k < i->nr_segs; k++, skip = 0) {
1301 0 : size_t len = i->bvec[k].bv_len - skip;
1302 0 : res |= (unsigned long)i->bvec[k].bv_offset + skip;
1303 0 : if (len > size)
1304 0 : len = size;
1305 0 : res |= len;
1306 0 : size -= len;
1307 0 : if (!size)
1308 : break;
1309 : }
1310 0 : return res;
1311 : }
1312 :
1313 0 : unsigned long iov_iter_alignment(const struct iov_iter *i)
1314 : {
1315 : /* iovec and kvec have identical layouts */
1316 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1317 0 : return iov_iter_alignment_iovec(i);
1318 :
1319 0 : if (iov_iter_is_bvec(i))
1320 0 : return iov_iter_alignment_bvec(i);
1321 :
1322 0 : if (iov_iter_is_pipe(i)) {
1323 0 : unsigned int p_mask = i->pipe->ring_size - 1;
1324 0 : size_t size = i->count;
1325 :
1326 0 : if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1327 0 : return size | i->iov_offset;
1328 : return size;
1329 : }
1330 :
1331 0 : if (iov_iter_is_xarray(i))
1332 0 : return (i->xarray_start + i->iov_offset) | i->count;
1333 :
1334 : return 0;
1335 : }
1336 : EXPORT_SYMBOL(iov_iter_alignment);
1337 :
1338 0 : unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1339 : {
1340 0 : unsigned long res = 0;
1341 0 : unsigned long v = 0;
1342 0 : size_t size = i->count;
1343 : unsigned k;
1344 :
1345 0 : if (WARN_ON(!iter_is_iovec(i)))
1346 : return ~0U;
1347 :
1348 0 : for (k = 0; k < i->nr_segs; k++) {
1349 0 : if (i->iov[k].iov_len) {
1350 0 : unsigned long base = (unsigned long)i->iov[k].iov_base;
1351 0 : if (v) // if not the first one
1352 0 : res |= base | v; // this start | previous end
1353 0 : v = base + i->iov[k].iov_len;
1354 0 : if (size <= i->iov[k].iov_len)
1355 : break;
1356 0 : size -= i->iov[k].iov_len;
1357 : }
1358 : }
1359 : return res;
1360 : }
1361 : EXPORT_SYMBOL(iov_iter_gap_alignment);
1362 :
1363 0 : static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1364 : size_t maxsize,
1365 : struct page **pages,
1366 : int iter_head,
1367 : size_t *start)
1368 : {
1369 0 : struct pipe_inode_info *pipe = i->pipe;
1370 0 : unsigned int p_mask = pipe->ring_size - 1;
1371 0 : ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1372 0 : if (!n)
1373 : return -EFAULT;
1374 :
1375 0 : maxsize = n;
1376 0 : n += *start;
1377 0 : while (n > 0) {
1378 0 : get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1379 0 : iter_head++;
1380 0 : n -= PAGE_SIZE;
1381 : }
1382 :
1383 : return maxsize;
1384 : }
1385 :
1386 0 : static ssize_t pipe_get_pages(struct iov_iter *i,
1387 : struct page **pages, size_t maxsize, unsigned maxpages,
1388 : size_t *start)
1389 : {
1390 : unsigned int iter_head, npages;
1391 : size_t capacity;
1392 :
1393 0 : if (!sanity(i))
1394 : return -EFAULT;
1395 :
1396 0 : data_start(i, &iter_head, start);
1397 : /* Amount of free space: some of this one + all after this one */
1398 0 : npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1399 0 : capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1400 :
1401 0 : return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1402 : }
1403 :
1404 0 : static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1405 : pgoff_t index, unsigned int nr_pages)
1406 : {
1407 0 : XA_STATE(xas, xa, index);
1408 : struct page *page;
1409 0 : unsigned int ret = 0;
1410 :
1411 : rcu_read_lock();
1412 0 : for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1413 0 : if (xas_retry(&xas, page))
1414 0 : continue;
1415 :
1416 : /* Has the page moved or been split? */
1417 0 : if (unlikely(page != xas_reload(&xas))) {
1418 0 : xas_reset(&xas);
1419 0 : continue;
1420 : }
1421 :
1422 0 : pages[ret] = find_subpage(page, xas.xa_index);
1423 0 : get_page(pages[ret]);
1424 0 : if (++ret == nr_pages)
1425 : break;
1426 : }
1427 : rcu_read_unlock();
1428 0 : return ret;
1429 : }
1430 :
1431 0 : static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1432 : struct page **pages, size_t maxsize,
1433 : unsigned maxpages, size_t *_start_offset)
1434 : {
1435 : unsigned nr, offset;
1436 : pgoff_t index, count;
1437 0 : size_t size = maxsize, actual;
1438 : loff_t pos;
1439 :
1440 0 : if (!size || !maxpages)
1441 : return 0;
1442 :
1443 0 : pos = i->xarray_start + i->iov_offset;
1444 0 : index = pos >> PAGE_SHIFT;
1445 0 : offset = pos & ~PAGE_MASK;
1446 0 : *_start_offset = offset;
1447 :
1448 0 : count = 1;
1449 0 : if (size > PAGE_SIZE - offset) {
1450 0 : size -= PAGE_SIZE - offset;
1451 0 : count += size >> PAGE_SHIFT;
1452 0 : size &= ~PAGE_MASK;
1453 0 : if (size)
1454 0 : count++;
1455 : }
1456 :
1457 0 : if (count > maxpages)
1458 0 : count = maxpages;
1459 :
1460 0 : nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
1461 0 : if (nr == 0)
1462 : return 0;
1463 :
1464 0 : actual = PAGE_SIZE * nr;
1465 0 : actual -= offset;
1466 0 : if (nr == count && size > 0) {
1467 0 : unsigned last_offset = (nr > 1) ? 0 : offset;
1468 0 : actual -= PAGE_SIZE - (last_offset + size);
1469 : }
1470 0 : return actual;
1471 : }
1472 :
1473 : /* must be done on non-empty ITER_IOVEC one */
1474 0 : static unsigned long first_iovec_segment(const struct iov_iter *i,
1475 : size_t *size, size_t *start,
1476 : size_t maxsize, unsigned maxpages)
1477 : {
1478 : size_t skip;
1479 : long k;
1480 :
1481 0 : for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1482 0 : unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
1483 0 : size_t len = i->iov[k].iov_len - skip;
1484 :
1485 0 : if (unlikely(!len))
1486 0 : continue;
1487 0 : if (len > maxsize)
1488 0 : len = maxsize;
1489 0 : len += (*start = addr % PAGE_SIZE);
1490 0 : if (len > maxpages * PAGE_SIZE)
1491 0 : len = maxpages * PAGE_SIZE;
1492 0 : *size = len;
1493 0 : return addr & PAGE_MASK;
1494 : }
1495 0 : BUG(); // if it had been empty, we wouldn't get called
1496 : }
1497 :
1498 : /* must be done on non-empty ITER_BVEC one */
1499 : static struct page *first_bvec_segment(const struct iov_iter *i,
1500 : size_t *size, size_t *start,
1501 : size_t maxsize, unsigned maxpages)
1502 : {
1503 : struct page *page;
1504 0 : size_t skip = i->iov_offset, len;
1505 :
1506 0 : len = i->bvec->bv_len - skip;
1507 0 : if (len > maxsize)
1508 0 : len = maxsize;
1509 0 : skip += i->bvec->bv_offset;
1510 0 : page = i->bvec->bv_page + skip / PAGE_SIZE;
1511 0 : len += (*start = skip % PAGE_SIZE);
1512 0 : if (len > maxpages * PAGE_SIZE)
1513 0 : len = maxpages * PAGE_SIZE;
1514 0 : *size = len;
1515 : return page;
1516 : }
1517 :
1518 0 : ssize_t iov_iter_get_pages(struct iov_iter *i,
1519 : struct page **pages, size_t maxsize, unsigned maxpages,
1520 : size_t *start)
1521 : {
1522 : size_t len;
1523 : int n, res;
1524 :
1525 0 : if (maxsize > i->count)
1526 0 : maxsize = i->count;
1527 0 : if (!maxsize)
1528 : return 0;
1529 :
1530 0 : if (likely(iter_is_iovec(i))) {
1531 0 : unsigned int gup_flags = 0;
1532 : unsigned long addr;
1533 :
1534 0 : if (iov_iter_rw(i) != WRITE)
1535 0 : gup_flags |= FOLL_WRITE;
1536 0 : if (i->nofault)
1537 0 : gup_flags |= FOLL_NOFAULT;
1538 :
1539 0 : addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
1540 0 : n = DIV_ROUND_UP(len, PAGE_SIZE);
1541 0 : res = get_user_pages_fast(addr, n, gup_flags, pages);
1542 0 : if (unlikely(res <= 0))
1543 0 : return res;
1544 0 : return (res == n ? len : res * PAGE_SIZE) - *start;
1545 : }
1546 0 : if (iov_iter_is_bvec(i)) {
1547 : struct page *page;
1548 :
1549 0 : page = first_bvec_segment(i, &len, start, maxsize, maxpages);
1550 0 : n = DIV_ROUND_UP(len, PAGE_SIZE);
1551 0 : while (n--)
1552 0 : get_page(*pages++ = page++);
1553 0 : return len - *start;
1554 : }
1555 0 : if (iov_iter_is_pipe(i))
1556 0 : return pipe_get_pages(i, pages, maxsize, maxpages, start);
1557 0 : if (iov_iter_is_xarray(i))
1558 0 : return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1559 : return -EFAULT;
1560 : }
1561 : EXPORT_SYMBOL(iov_iter_get_pages);
1562 :
1563 : static struct page **get_pages_array(size_t n)
1564 : {
1565 0 : return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1566 : }
1567 :
1568 0 : static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1569 : struct page ***pages, size_t maxsize,
1570 : size_t *start)
1571 : {
1572 : struct page **p;
1573 : unsigned int iter_head, npages;
1574 : ssize_t n;
1575 :
1576 0 : if (!sanity(i))
1577 : return -EFAULT;
1578 :
1579 0 : data_start(i, &iter_head, start);
1580 : /* Amount of free space: some of this one + all after this one */
1581 0 : npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1582 0 : n = npages * PAGE_SIZE - *start;
1583 0 : if (maxsize > n)
1584 : maxsize = n;
1585 : else
1586 0 : npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1587 0 : p = get_pages_array(npages);
1588 0 : if (!p)
1589 : return -ENOMEM;
1590 0 : n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1591 0 : if (n > 0)
1592 0 : *pages = p;
1593 : else
1594 0 : kvfree(p);
1595 : return n;
1596 : }
1597 :
1598 0 : static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
1599 : struct page ***pages, size_t maxsize,
1600 : size_t *_start_offset)
1601 : {
1602 : struct page **p;
1603 : unsigned nr, offset;
1604 : pgoff_t index, count;
1605 0 : size_t size = maxsize, actual;
1606 : loff_t pos;
1607 :
1608 0 : if (!size)
1609 : return 0;
1610 :
1611 0 : pos = i->xarray_start + i->iov_offset;
1612 0 : index = pos >> PAGE_SHIFT;
1613 0 : offset = pos & ~PAGE_MASK;
1614 0 : *_start_offset = offset;
1615 :
1616 0 : count = 1;
1617 0 : if (size > PAGE_SIZE - offset) {
1618 0 : size -= PAGE_SIZE - offset;
1619 0 : count += size >> PAGE_SHIFT;
1620 0 : size &= ~PAGE_MASK;
1621 0 : if (size)
1622 0 : count++;
1623 : }
1624 :
1625 0 : p = get_pages_array(count);
1626 0 : if (!p)
1627 : return -ENOMEM;
1628 0 : *pages = p;
1629 :
1630 0 : nr = iter_xarray_populate_pages(p, i->xarray, index, count);
1631 0 : if (nr == 0)
1632 : return 0;
1633 :
1634 0 : actual = PAGE_SIZE * nr;
1635 0 : actual -= offset;
1636 0 : if (nr == count && size > 0) {
1637 0 : unsigned last_offset = (nr > 1) ? 0 : offset;
1638 0 : actual -= PAGE_SIZE - (last_offset + size);
1639 : }
1640 0 : return actual;
1641 : }
1642 :
1643 0 : ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1644 : struct page ***pages, size_t maxsize,
1645 : size_t *start)
1646 : {
1647 : struct page **p;
1648 : size_t len;
1649 : int n, res;
1650 :
1651 0 : if (maxsize > i->count)
1652 0 : maxsize = i->count;
1653 0 : if (!maxsize)
1654 : return 0;
1655 :
1656 0 : if (likely(iter_is_iovec(i))) {
1657 0 : unsigned int gup_flags = 0;
1658 : unsigned long addr;
1659 :
1660 0 : if (iov_iter_rw(i) != WRITE)
1661 0 : gup_flags |= FOLL_WRITE;
1662 0 : if (i->nofault)
1663 0 : gup_flags |= FOLL_NOFAULT;
1664 :
1665 0 : addr = first_iovec_segment(i, &len, start, maxsize, ~0U);
1666 0 : n = DIV_ROUND_UP(len, PAGE_SIZE);
1667 0 : p = get_pages_array(n);
1668 0 : if (!p)
1669 : return -ENOMEM;
1670 0 : res = get_user_pages_fast(addr, n, gup_flags, p);
1671 0 : if (unlikely(res <= 0)) {
1672 0 : kvfree(p);
1673 0 : *pages = NULL;
1674 0 : return res;
1675 : }
1676 0 : *pages = p;
1677 0 : return (res == n ? len : res * PAGE_SIZE) - *start;
1678 : }
1679 0 : if (iov_iter_is_bvec(i)) {
1680 : struct page *page;
1681 :
1682 0 : page = first_bvec_segment(i, &len, start, maxsize, ~0U);
1683 0 : n = DIV_ROUND_UP(len, PAGE_SIZE);
1684 0 : *pages = p = get_pages_array(n);
1685 0 : if (!p)
1686 : return -ENOMEM;
1687 0 : while (n--)
1688 0 : get_page(*p++ = page++);
1689 0 : return len - *start;
1690 : }
1691 0 : if (iov_iter_is_pipe(i))
1692 0 : return pipe_get_pages_alloc(i, pages, maxsize, start);
1693 0 : if (iov_iter_is_xarray(i))
1694 0 : return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
1695 : return -EFAULT;
1696 : }
1697 : EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1698 :
1699 0 : size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1700 : struct iov_iter *i)
1701 : {
1702 : __wsum sum, next;
1703 0 : sum = *csum;
1704 0 : if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1705 0 : WARN_ON(1);
1706 0 : return 0;
1707 : }
1708 0 : iterate_and_advance(i, bytes, base, len, off, ({
1709 : next = csum_and_copy_from_user(base, addr + off, len);
1710 : sum = csum_block_add(sum, next, off);
1711 : next ? 0 : len;
1712 : }), ({
1713 : sum = csum_and_memcpy(addr + off, base, len, sum, off);
1714 : })
1715 : )
1716 0 : *csum = sum;
1717 0 : return bytes;
1718 : }
1719 : EXPORT_SYMBOL(csum_and_copy_from_iter);
1720 :
1721 0 : size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1722 : struct iov_iter *i)
1723 : {
1724 0 : struct csum_state *csstate = _csstate;
1725 : __wsum sum, next;
1726 :
1727 0 : if (unlikely(iov_iter_is_discard(i))) {
1728 0 : WARN_ON(1); /* for now */
1729 0 : return 0;
1730 : }
1731 :
1732 0 : sum = csum_shift(csstate->csum, csstate->off);
1733 0 : if (unlikely(iov_iter_is_pipe(i)))
1734 0 : bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1735 0 : else iterate_and_advance(i, bytes, base, len, off, ({
1736 : next = csum_and_copy_to_user(addr + off, base, len);
1737 : sum = csum_block_add(sum, next, off);
1738 : next ? 0 : len;
1739 : }), ({
1740 : sum = csum_and_memcpy(base, addr + off, len, sum, off);
1741 : })
1742 : )
1743 0 : csstate->csum = csum_shift(sum, csstate->off);
1744 0 : csstate->off += bytes;
1745 0 : return bytes;
1746 : }
1747 : EXPORT_SYMBOL(csum_and_copy_to_iter);
1748 :
1749 0 : size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1750 : struct iov_iter *i)
1751 : {
1752 : #ifdef CONFIG_CRYPTO_HASH
1753 : struct ahash_request *hash = hashp;
1754 : struct scatterlist sg;
1755 : size_t copied;
1756 :
1757 : copied = copy_to_iter(addr, bytes, i);
1758 : sg_init_one(&sg, addr, copied);
1759 : ahash_request_set_crypt(hash, &sg, NULL, copied);
1760 : crypto_ahash_update(hash);
1761 : return copied;
1762 : #else
1763 0 : return 0;
1764 : #endif
1765 : }
1766 : EXPORT_SYMBOL(hash_and_copy_to_iter);
1767 :
1768 0 : static int iov_npages(const struct iov_iter *i, int maxpages)
1769 : {
1770 0 : size_t skip = i->iov_offset, size = i->count;
1771 : const struct iovec *p;
1772 0 : int npages = 0;
1773 :
1774 0 : for (p = i->iov; size; skip = 0, p++) {
1775 0 : unsigned offs = offset_in_page(p->iov_base + skip);
1776 0 : size_t len = min(p->iov_len - skip, size);
1777 :
1778 0 : if (len) {
1779 0 : size -= len;
1780 0 : npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1781 0 : if (unlikely(npages > maxpages))
1782 : return maxpages;
1783 : }
1784 : }
1785 : return npages;
1786 : }
1787 :
1788 : static int bvec_npages(const struct iov_iter *i, int maxpages)
1789 : {
1790 0 : size_t skip = i->iov_offset, size = i->count;
1791 : const struct bio_vec *p;
1792 0 : int npages = 0;
1793 :
1794 0 : for (p = i->bvec; size; skip = 0, p++) {
1795 0 : unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1796 0 : size_t len = min(p->bv_len - skip, size);
1797 :
1798 0 : size -= len;
1799 0 : npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1800 0 : if (unlikely(npages > maxpages))
1801 : return maxpages;
1802 : }
1803 : return npages;
1804 : }
1805 :
1806 0 : int iov_iter_npages(const struct iov_iter *i, int maxpages)
1807 : {
1808 0 : if (unlikely(!i->count))
1809 : return 0;
1810 : /* iovec and kvec have identical layouts */
1811 0 : if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1812 0 : return iov_npages(i, maxpages);
1813 0 : if (iov_iter_is_bvec(i))
1814 : return bvec_npages(i, maxpages);
1815 0 : if (iov_iter_is_pipe(i)) {
1816 : unsigned int iter_head;
1817 : int npages;
1818 : size_t off;
1819 :
1820 0 : if (!sanity(i))
1821 : return 0;
1822 :
1823 0 : data_start(i, &iter_head, &off);
1824 : /* some of this one + all after this one */
1825 0 : npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1826 0 : return min(npages, maxpages);
1827 : }
1828 0 : if (iov_iter_is_xarray(i)) {
1829 0 : unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1830 0 : int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1831 0 : return min(npages, maxpages);
1832 : }
1833 : return 0;
1834 : }
1835 : EXPORT_SYMBOL(iov_iter_npages);
1836 :
1837 0 : const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1838 : {
1839 0 : *new = *old;
1840 0 : if (unlikely(iov_iter_is_pipe(new))) {
1841 0 : WARN_ON(1);
1842 0 : return NULL;
1843 : }
1844 0 : if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
1845 : return NULL;
1846 0 : if (iov_iter_is_bvec(new))
1847 0 : return new->bvec = kmemdup(new->bvec,
1848 0 : new->nr_segs * sizeof(struct bio_vec),
1849 : flags);
1850 : else
1851 : /* iovec and kvec have identical layout */
1852 0 : return new->iov = kmemdup(new->iov,
1853 0 : new->nr_segs * sizeof(struct iovec),
1854 : flags);
1855 : }
1856 : EXPORT_SYMBOL(dup_iter);
1857 :
1858 0 : static int copy_compat_iovec_from_user(struct iovec *iov,
1859 : const struct iovec __user *uvec, unsigned long nr_segs)
1860 : {
1861 0 : const struct compat_iovec __user *uiov =
1862 : (const struct compat_iovec __user *)uvec;
1863 0 : int ret = -EFAULT, i;
1864 :
1865 0 : if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1866 : return -EFAULT;
1867 :
1868 0 : for (i = 0; i < nr_segs; i++) {
1869 : compat_uptr_t buf;
1870 : compat_ssize_t len;
1871 :
1872 0 : unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1873 0 : unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1874 :
1875 : /* check for compat_size_t not fitting in compat_ssize_t .. */
1876 0 : if (len < 0) {
1877 : ret = -EINVAL;
1878 : goto uaccess_end;
1879 : }
1880 0 : iov[i].iov_base = compat_ptr(buf);
1881 0 : iov[i].iov_len = len;
1882 : }
1883 :
1884 : ret = 0;
1885 : uaccess_end:
1886 : user_access_end();
1887 : return ret;
1888 : }
1889 :
1890 0 : static int copy_iovec_from_user(struct iovec *iov,
1891 : const struct iovec __user *uvec, unsigned long nr_segs)
1892 : {
1893 : unsigned long seg;
1894 :
1895 0 : if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1896 : return -EFAULT;
1897 0 : for (seg = 0; seg < nr_segs; seg++) {
1898 0 : if ((ssize_t)iov[seg].iov_len < 0)
1899 : return -EINVAL;
1900 : }
1901 :
1902 : return 0;
1903 : }
1904 :
1905 0 : struct iovec *iovec_from_user(const struct iovec __user *uvec,
1906 : unsigned long nr_segs, unsigned long fast_segs,
1907 : struct iovec *fast_iov, bool compat)
1908 : {
1909 0 : struct iovec *iov = fast_iov;
1910 : int ret;
1911 :
1912 : /*
1913 : * SuS says "The readv() function *may* fail if the iovcnt argument was
1914 : * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1915 : * traditionally returned zero for zero segments, so...
1916 : */
1917 0 : if (nr_segs == 0)
1918 : return iov;
1919 0 : if (nr_segs > UIO_MAXIOV)
1920 : return ERR_PTR(-EINVAL);
1921 0 : if (nr_segs > fast_segs) {
1922 0 : iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1923 0 : if (!iov)
1924 : return ERR_PTR(-ENOMEM);
1925 : }
1926 :
1927 0 : if (compat)
1928 0 : ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1929 : else
1930 0 : ret = copy_iovec_from_user(iov, uvec, nr_segs);
1931 0 : if (ret) {
1932 0 : if (iov != fast_iov)
1933 0 : kfree(iov);
1934 0 : return ERR_PTR(ret);
1935 : }
1936 :
1937 : return iov;
1938 : }
1939 :
1940 0 : ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1941 : unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1942 : struct iov_iter *i, bool compat)
1943 : {
1944 0 : ssize_t total_len = 0;
1945 : unsigned long seg;
1946 : struct iovec *iov;
1947 :
1948 0 : iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1949 0 : if (IS_ERR(iov)) {
1950 0 : *iovp = NULL;
1951 0 : return PTR_ERR(iov);
1952 : }
1953 :
1954 : /*
1955 : * According to the Single Unix Specification we should return EINVAL if
1956 : * an element length is < 0 when cast to ssize_t or if the total length
1957 : * would overflow the ssize_t return value of the system call.
1958 : *
1959 : * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1960 : * overflow case.
1961 : */
1962 0 : for (seg = 0; seg < nr_segs; seg++) {
1963 0 : ssize_t len = (ssize_t)iov[seg].iov_len;
1964 :
1965 0 : if (!access_ok(iov[seg].iov_base, len)) {
1966 0 : if (iov != *iovp)
1967 0 : kfree(iov);
1968 0 : *iovp = NULL;
1969 0 : return -EFAULT;
1970 : }
1971 :
1972 0 : if (len > MAX_RW_COUNT - total_len) {
1973 0 : len = MAX_RW_COUNT - total_len;
1974 0 : iov[seg].iov_len = len;
1975 : }
1976 0 : total_len += len;
1977 : }
1978 :
1979 0 : iov_iter_init(i, type, iov, nr_segs, total_len);
1980 0 : if (iov == *iovp)
1981 0 : *iovp = NULL;
1982 : else
1983 0 : *iovp = iov;
1984 : return total_len;
1985 : }
1986 :
1987 : /**
1988 : * import_iovec() - Copy an array of &struct iovec from userspace
1989 : * into the kernel, check that it is valid, and initialize a new
1990 : * &struct iov_iter iterator to access it.
1991 : *
1992 : * @type: One of %READ or %WRITE.
1993 : * @uvec: Pointer to the userspace array.
1994 : * @nr_segs: Number of elements in userspace array.
1995 : * @fast_segs: Number of elements in @iov.
1996 : * @iovp: (input and output parameter) Pointer to pointer to (usually small
1997 : * on-stack) kernel array.
1998 : * @i: Pointer to iterator that will be initialized on success.
1999 : *
2000 : * If the array pointed to by *@iov is large enough to hold all @nr_segs,
2001 : * then this function places %NULL in *@iov on return. Otherwise, a new
2002 : * array will be allocated and the result placed in *@iov. This means that
2003 : * the caller may call kfree() on *@iov regardless of whether the small
2004 : * on-stack array was used or not (and regardless of whether this function
2005 : * returns an error or not).
2006 : *
2007 : * Return: Negative error code on error, bytes imported on success
2008 : */
2009 0 : ssize_t import_iovec(int type, const struct iovec __user *uvec,
2010 : unsigned nr_segs, unsigned fast_segs,
2011 : struct iovec **iovp, struct iov_iter *i)
2012 : {
2013 0 : return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
2014 : in_compat_syscall());
2015 : }
2016 : EXPORT_SYMBOL(import_iovec);
2017 :
2018 0 : int import_single_range(int rw, void __user *buf, size_t len,
2019 : struct iovec *iov, struct iov_iter *i)
2020 : {
2021 0 : if (len > MAX_RW_COUNT)
2022 0 : len = MAX_RW_COUNT;
2023 0 : if (unlikely(!access_ok(buf, len)))
2024 : return -EFAULT;
2025 :
2026 0 : iov->iov_base = buf;
2027 0 : iov->iov_len = len;
2028 0 : iov_iter_init(i, rw, iov, 1, len);
2029 0 : return 0;
2030 : }
2031 : EXPORT_SYMBOL(import_single_range);
2032 :
2033 : /**
2034 : * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
2035 : * iov_iter_save_state() was called.
2036 : *
2037 : * @i: &struct iov_iter to restore
2038 : * @state: state to restore from
2039 : *
2040 : * Used after iov_iter_save_state() to bring restore @i, if operations may
2041 : * have advanced it.
2042 : *
2043 : * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
2044 : */
2045 0 : void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
2046 : {
2047 0 : if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
2048 0 : !iov_iter_is_kvec(i))
2049 : return;
2050 0 : i->iov_offset = state->iov_offset;
2051 0 : i->count = state->count;
2052 : /*
2053 : * For the *vec iters, nr_segs + iov is constant - if we increment
2054 : * the vec, then we also decrement the nr_segs count. Hence we don't
2055 : * need to track both of these, just one is enough and we can deduct
2056 : * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
2057 : * size, so we can just increment the iov pointer as they are unionzed.
2058 : * ITER_BVEC _may_ be the same size on some archs, but on others it is
2059 : * not. Be safe and handle it separately.
2060 : */
2061 : BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
2062 0 : if (iov_iter_is_bvec(i))
2063 0 : i->bvec -= state->nr_segs - i->nr_segs;
2064 : else
2065 0 : i->iov -= state->nr_segs - i->nr_segs;
2066 0 : i->nr_segs = state->nr_segs;
2067 : }
|