Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_SCATTERLIST_H
3 : #define _LINUX_SCATTERLIST_H
4 :
5 : #include <linux/string.h>
6 : #include <linux/types.h>
7 : #include <linux/bug.h>
8 : #include <linux/mm.h>
9 : #include <asm/io.h>
10 :
11 : struct scatterlist {
12 : unsigned long page_link;
13 : unsigned int offset;
14 : unsigned int length;
15 : dma_addr_t dma_address;
16 : #ifdef CONFIG_NEED_SG_DMA_LENGTH
17 : unsigned int dma_length;
18 : #endif
19 : };
20 :
21 : /*
22 : * These macros should be used after a dma_map_sg call has been done
23 : * to get bus addresses of each of the SG entries and their lengths.
24 : * You should only work with the number of sg entries dma_map_sg
25 : * returns, or alternatively stop on the first sg_dma_len(sg) which
26 : * is 0.
27 : */
28 : #define sg_dma_address(sg) ((sg)->dma_address)
29 :
30 : #ifdef CONFIG_NEED_SG_DMA_LENGTH
31 : #define sg_dma_len(sg) ((sg)->dma_length)
32 : #else
33 : #define sg_dma_len(sg) ((sg)->length)
34 : #endif
35 :
36 : struct sg_table {
37 : struct scatterlist *sgl; /* the list */
38 : unsigned int nents; /* number of mapped entries */
39 : unsigned int orig_nents; /* original size of list */
40 : };
41 :
42 : struct sg_append_table {
43 : struct sg_table sgt; /* The scatter list table */
44 : struct scatterlist *prv; /* last populated sge in the table */
45 : unsigned int total_nents; /* Total entries in the table */
46 : };
47 :
48 : /*
49 : * Notes on SG table design.
50 : *
51 : * We use the unsigned long page_link field in the scatterlist struct to place
52 : * the page pointer AND encode information about the sg table as well. The two
53 : * lower bits are reserved for this information.
54 : *
55 : * If bit 0 is set, then the page_link contains a pointer to the next sg
56 : * table list. Otherwise the next entry is at sg + 1.
57 : *
58 : * If bit 1 is set, then this sg entry is the last element in a list.
59 : *
60 : * See sg_next().
61 : *
62 : */
63 :
64 : #define SG_CHAIN 0x01UL
65 : #define SG_END 0x02UL
66 :
67 : /*
68 : * We overload the LSB of the page pointer to indicate whether it's
69 : * a valid sg entry, or whether it points to the start of a new scatterlist.
70 : * Those low bits are there for everyone! (thanks mason :-)
71 : */
72 : #define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
73 :
74 : static inline unsigned int __sg_flags(struct scatterlist *sg)
75 : {
76 0 : return sg->page_link & SG_PAGE_LINK_MASK;
77 : }
78 :
79 : static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
80 : {
81 0 : return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
82 : }
83 :
84 : static inline bool sg_is_chain(struct scatterlist *sg)
85 : {
86 0 : return __sg_flags(sg) & SG_CHAIN;
87 : }
88 :
89 : static inline bool sg_is_last(struct scatterlist *sg)
90 : {
91 0 : return __sg_flags(sg) & SG_END;
92 : }
93 :
94 : /**
95 : * sg_assign_page - Assign a given page to an SG entry
96 : * @sg: SG entry
97 : * @page: The page
98 : *
99 : * Description:
100 : * Assign page to sg entry. Also see sg_set_page(), the most commonly used
101 : * variant.
102 : *
103 : **/
104 0 : static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
105 : {
106 0 : unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
107 :
108 : /*
109 : * In order for the low bit stealing approach to work, pages
110 : * must be aligned at a 32-bit boundary as a minimum.
111 : */
112 0 : BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
113 : #ifdef CONFIG_DEBUG_SG
114 : BUG_ON(sg_is_chain(sg));
115 : #endif
116 0 : sg->page_link = page_link | (unsigned long) page;
117 0 : }
118 :
119 : /**
120 : * sg_set_page - Set sg entry to point at given page
121 : * @sg: SG entry
122 : * @page: The page
123 : * @len: Length of data
124 : * @offset: Offset into page
125 : *
126 : * Description:
127 : * Use this function to set an sg entry pointing at a page, never assign
128 : * the page directly. We encode sg table information in the lower bits
129 : * of the page pointer. See sg_page() for looking up the page belonging
130 : * to an sg entry.
131 : *
132 : **/
133 : static inline void sg_set_page(struct scatterlist *sg, struct page *page,
134 : unsigned int len, unsigned int offset)
135 : {
136 0 : sg_assign_page(sg, page);
137 0 : sg->offset = offset;
138 0 : sg->length = len;
139 : }
140 :
141 : static inline struct page *sg_page(struct scatterlist *sg)
142 : {
143 : #ifdef CONFIG_DEBUG_SG
144 : BUG_ON(sg_is_chain(sg));
145 : #endif
146 0 : return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
147 : }
148 :
149 : /**
150 : * sg_set_buf - Set sg entry to point at given data
151 : * @sg: SG entry
152 : * @buf: Data
153 : * @buflen: Data length
154 : *
155 : **/
156 : static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
157 : unsigned int buflen)
158 : {
159 : #ifdef CONFIG_DEBUG_SG
160 : BUG_ON(!virt_addr_valid(buf));
161 : #endif
162 0 : sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
163 : }
164 :
165 : /*
166 : * Loop over each sg element, following the pointer to a new list if necessary
167 : */
168 : #define for_each_sg(sglist, sg, nr, __i) \
169 : for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
170 :
171 : /*
172 : * Loop over each sg element in the given sg_table object.
173 : */
174 : #define for_each_sgtable_sg(sgt, sg, i) \
175 : for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
176 :
177 : /*
178 : * Loop over each sg element in the given *DMA mapped* sg_table object.
179 : * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
180 : * of the each element.
181 : */
182 : #define for_each_sgtable_dma_sg(sgt, sg, i) \
183 : for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
184 :
185 : static inline void __sg_chain(struct scatterlist *chain_sg,
186 : struct scatterlist *sgl)
187 : {
188 : /*
189 : * offset and length are unused for chain entry. Clear them.
190 : */
191 0 : chain_sg->offset = 0;
192 0 : chain_sg->length = 0;
193 :
194 : /*
195 : * Set lowest bit to indicate a link pointer, and make sure to clear
196 : * the termination bit if it happens to be set.
197 : */
198 0 : chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END;
199 : }
200 :
201 : /**
202 : * sg_chain - Chain two sglists together
203 : * @prv: First scatterlist
204 : * @prv_nents: Number of entries in prv
205 : * @sgl: Second scatterlist
206 : *
207 : * Description:
208 : * Links @prv@ and @sgl@ together, to form a longer scatterlist.
209 : *
210 : **/
211 : static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
212 : struct scatterlist *sgl)
213 : {
214 0 : __sg_chain(&prv[prv_nents - 1], sgl);
215 : }
216 :
217 : /**
218 : * sg_mark_end - Mark the end of the scatterlist
219 : * @sg: SG entryScatterlist
220 : *
221 : * Description:
222 : * Marks the passed in sg entry as the termination point for the sg
223 : * table. A call to sg_next() on this entry will return NULL.
224 : *
225 : **/
226 : static inline void sg_mark_end(struct scatterlist *sg)
227 : {
228 : /*
229 : * Set termination bit, clear potential chain bit
230 : */
231 0 : sg->page_link |= SG_END;
232 0 : sg->page_link &= ~SG_CHAIN;
233 : }
234 :
235 : /**
236 : * sg_unmark_end - Undo setting the end of the scatterlist
237 : * @sg: SG entryScatterlist
238 : *
239 : * Description:
240 : * Removes the termination marker from the given entry of the scatterlist.
241 : *
242 : **/
243 : static inline void sg_unmark_end(struct scatterlist *sg)
244 : {
245 0 : sg->page_link &= ~SG_END;
246 : }
247 :
248 : /**
249 : * sg_phys - Return physical address of an sg entry
250 : * @sg: SG entry
251 : *
252 : * Description:
253 : * This calls page_to_phys() on the page in this sg entry, and adds the
254 : * sg offset. The caller must know that it is legal to call page_to_phys()
255 : * on the sg page.
256 : *
257 : **/
258 : static inline dma_addr_t sg_phys(struct scatterlist *sg)
259 : {
260 0 : return page_to_phys(sg_page(sg)) + sg->offset;
261 : }
262 :
263 : /**
264 : * sg_virt - Return virtual address of an sg entry
265 : * @sg: SG entry
266 : *
267 : * Description:
268 : * This calls page_address() on the page in this sg entry, and adds the
269 : * sg offset. The caller must know that the sg page has a valid virtual
270 : * mapping.
271 : *
272 : **/
273 : static inline void *sg_virt(struct scatterlist *sg)
274 : {
275 : return page_address(sg_page(sg)) + sg->offset;
276 : }
277 :
278 : /**
279 : * sg_init_marker - Initialize markers in sg table
280 : * @sgl: The SG table
281 : * @nents: Number of entries in table
282 : *
283 : **/
284 : static inline void sg_init_marker(struct scatterlist *sgl,
285 : unsigned int nents)
286 : {
287 0 : sg_mark_end(&sgl[nents - 1]);
288 : }
289 :
290 : int sg_nents(struct scatterlist *sg);
291 : int sg_nents_for_len(struct scatterlist *sg, u64 len);
292 : struct scatterlist *sg_next(struct scatterlist *);
293 : struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
294 : void sg_init_table(struct scatterlist *, unsigned int);
295 : void sg_init_one(struct scatterlist *, const void *, unsigned int);
296 : int sg_split(struct scatterlist *in, const int in_mapped_nents,
297 : const off_t skip, const int nb_splits,
298 : const size_t *split_sizes,
299 : struct scatterlist **out, int *out_mapped_nents,
300 : gfp_t gfp_mask);
301 :
302 : typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
303 : typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
304 :
305 : void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
306 : sg_free_fn *, unsigned int);
307 : void sg_free_table(struct sg_table *);
308 : void sg_free_append_table(struct sg_append_table *sgt);
309 : int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
310 : struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
311 : int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
312 : int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
313 : struct page **pages, unsigned int n_pages,
314 : unsigned int offset, unsigned long size,
315 : unsigned int max_segment,
316 : unsigned int left_pages, gfp_t gfp_mask);
317 : int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
318 : unsigned int n_pages, unsigned int offset,
319 : unsigned long size,
320 : unsigned int max_segment, gfp_t gfp_mask);
321 :
322 : /**
323 : * sg_alloc_table_from_pages - Allocate and initialize an sg table from
324 : * an array of pages
325 : * @sgt: The sg table header to use
326 : * @pages: Pointer to an array of page pointers
327 : * @n_pages: Number of pages in the pages array
328 : * @offset: Offset from start of the first page to the start of a buffer
329 : * @size: Number of valid bytes in the buffer (after offset)
330 : * @gfp_mask: GFP allocation mask
331 : *
332 : * Description:
333 : * Allocate and initialize an sg table from a list of pages. Contiguous
334 : * ranges of the pages are squashed into a single scatterlist node. A user
335 : * may provide an offset at a start and a size of valid data in a buffer
336 : * specified by the page array. The returned sg table is released by
337 : * sg_free_table.
338 : *
339 : * Returns:
340 : * 0 on success, negative error on failure
341 : */
342 : static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
343 : struct page **pages,
344 : unsigned int n_pages,
345 : unsigned int offset,
346 : unsigned long size, gfp_t gfp_mask)
347 : {
348 0 : return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
349 : size, UINT_MAX, gfp_mask);
350 : }
351 :
352 : #ifdef CONFIG_SGL_ALLOC
353 : struct scatterlist *sgl_alloc_order(unsigned long long length,
354 : unsigned int order, bool chainable,
355 : gfp_t gfp, unsigned int *nent_p);
356 : struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
357 : unsigned int *nent_p);
358 : void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
359 : void sgl_free_order(struct scatterlist *sgl, int order);
360 : void sgl_free(struct scatterlist *sgl);
361 : #endif /* CONFIG_SGL_ALLOC */
362 :
363 : size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
364 : size_t buflen, off_t skip, bool to_buffer);
365 :
366 : size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
367 : const void *buf, size_t buflen);
368 : size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
369 : void *buf, size_t buflen);
370 :
371 : size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
372 : const void *buf, size_t buflen, off_t skip);
373 : size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
374 : void *buf, size_t buflen, off_t skip);
375 : size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
376 : size_t buflen, off_t skip);
377 :
378 : /*
379 : * Maximum number of entries that will be allocated in one piece, if
380 : * a list larger than this is required then chaining will be utilized.
381 : */
382 : #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
383 :
384 : /*
385 : * The maximum number of SG segments that we will put inside a
386 : * scatterlist (unless chaining is used). Should ideally fit inside a
387 : * single page, to avoid a higher order allocation. We could define this
388 : * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
389 : * minimum value is 32
390 : */
391 : #define SG_CHUNK_SIZE 128
392 :
393 : /*
394 : * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
395 : * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
396 : */
397 : #ifdef CONFIG_ARCH_NO_SG_CHAIN
398 : #define SG_MAX_SEGMENTS SG_CHUNK_SIZE
399 : #else
400 : #define SG_MAX_SEGMENTS 2048
401 : #endif
402 :
403 : #ifdef CONFIG_SG_POOL
404 : void sg_free_table_chained(struct sg_table *table,
405 : unsigned nents_first_chunk);
406 : int sg_alloc_table_chained(struct sg_table *table, int nents,
407 : struct scatterlist *first_chunk,
408 : unsigned nents_first_chunk);
409 : #endif
410 :
411 : /*
412 : * sg page iterator
413 : *
414 : * Iterates over sg entries page-by-page. On each successful iteration, you
415 : * can call sg_page_iter_page(@piter) to get the current page.
416 : * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to
417 : * the page's page offset within the sg. The iteration will stop either when a
418 : * maximum number of sg entries was reached or a terminating sg
419 : * (sg_last(sg) == true) was reached.
420 : */
421 : struct sg_page_iter {
422 : struct scatterlist *sg; /* sg holding the page */
423 : unsigned int sg_pgoffset; /* page offset within the sg */
424 :
425 : /* these are internal states, keep away */
426 : unsigned int __nents; /* remaining sg entries */
427 : int __pg_advance; /* nr pages to advance at the
428 : * next step */
429 : };
430 :
431 : /*
432 : * sg page iterator for DMA addresses
433 : *
434 : * This is the same as sg_page_iter however you can call
435 : * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
436 : * address. sg_page_iter_page() cannot be called on this iterator.
437 : */
438 : struct sg_dma_page_iter {
439 : struct sg_page_iter base;
440 : };
441 :
442 : bool __sg_page_iter_next(struct sg_page_iter *piter);
443 : bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
444 : void __sg_page_iter_start(struct sg_page_iter *piter,
445 : struct scatterlist *sglist, unsigned int nents,
446 : unsigned long pgoffset);
447 : /**
448 : * sg_page_iter_page - get the current page held by the page iterator
449 : * @piter: page iterator holding the page
450 : */
451 : static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
452 : {
453 0 : return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
454 : }
455 :
456 : /**
457 : * sg_page_iter_dma_address - get the dma address of the current page held by
458 : * the page iterator.
459 : * @dma_iter: page iterator holding the page
460 : */
461 : static inline dma_addr_t
462 : sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
463 : {
464 0 : return sg_dma_address(dma_iter->base.sg) +
465 0 : (dma_iter->base.sg_pgoffset << PAGE_SHIFT);
466 : }
467 :
468 : /**
469 : * for_each_sg_page - iterate over the pages of the given sg list
470 : * @sglist: sglist to iterate over
471 : * @piter: page iterator to hold current page, sg, sg_pgoffset
472 : * @nents: maximum number of sg entries to iterate over
473 : * @pgoffset: starting page offset (in pages)
474 : *
475 : * Callers may use sg_page_iter_page() to get each page pointer.
476 : * In each loop it operates on PAGE_SIZE unit.
477 : */
478 : #define for_each_sg_page(sglist, piter, nents, pgoffset) \
479 : for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
480 : __sg_page_iter_next(piter);)
481 :
482 : /**
483 : * for_each_sg_dma_page - iterate over the pages of the given sg list
484 : * @sglist: sglist to iterate over
485 : * @dma_iter: DMA page iterator to hold current page
486 : * @dma_nents: maximum number of sg entries to iterate over, this is the value
487 : * returned from dma_map_sg
488 : * @pgoffset: starting page offset (in pages)
489 : *
490 : * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
491 : * In each loop it operates on PAGE_SIZE unit.
492 : */
493 : #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
494 : for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
495 : pgoffset); \
496 : __sg_page_iter_dma_next(dma_iter);)
497 :
498 : /**
499 : * for_each_sgtable_page - iterate over all pages in the sg_table object
500 : * @sgt: sg_table object to iterate over
501 : * @piter: page iterator to hold current page
502 : * @pgoffset: starting page offset (in pages)
503 : *
504 : * Iterates over the all memory pages in the buffer described by
505 : * a scatterlist stored in the given sg_table object.
506 : * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
507 : */
508 : #define for_each_sgtable_page(sgt, piter, pgoffset) \
509 : for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
510 :
511 : /**
512 : * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
513 : * @sgt: sg_table object to iterate over
514 : * @dma_iter: DMA page iterator to hold current page
515 : * @pgoffset: starting page offset (in pages)
516 : *
517 : * Iterates over the all DMA mapped pages in the buffer described by
518 : * a scatterlist stored in the given sg_table object.
519 : * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
520 : * unit.
521 : */
522 : #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
523 : for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
524 :
525 :
526 : /*
527 : * Mapping sg iterator
528 : *
529 : * Iterates over sg entries mapping page-by-page. On each successful
530 : * iteration, @miter->page points to the mapped page and
531 : * @miter->length bytes of data can be accessed at @miter->addr. As
532 : * long as an iteration is enclosed between start and stop, the user
533 : * is free to choose control structure and when to stop.
534 : *
535 : * @miter->consumed is set to @miter->length on each iteration. It
536 : * can be adjusted if the user can't consume all the bytes in one go.
537 : * Also, a stopped iteration can be resumed by calling next on it.
538 : * This is useful when iteration needs to release all resources and
539 : * continue later (e.g. at the next interrupt).
540 : */
541 :
542 : #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
543 : #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
544 : #define SG_MITER_FROM_SG (1 << 2) /* nop */
545 :
546 : struct sg_mapping_iter {
547 : /* the following three fields can be accessed directly */
548 : struct page *page; /* currently mapped page */
549 : void *addr; /* pointer to the mapped area */
550 : size_t length; /* length of the mapped area */
551 : size_t consumed; /* number of consumed bytes */
552 : struct sg_page_iter piter; /* page iterator */
553 :
554 : /* these are internal states, keep away */
555 : unsigned int __offset; /* offset within page */
556 : unsigned int __remaining; /* remaining bytes on page */
557 : unsigned int __flags;
558 : };
559 :
560 : void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
561 : unsigned int nents, unsigned int flags);
562 : bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
563 : bool sg_miter_next(struct sg_mapping_iter *miter);
564 : void sg_miter_stop(struct sg_mapping_iter *miter);
565 :
566 : #endif /* _LINUX_SCATTERLIST_H */
|