1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * Copyright (c) 2016 Matthew Macy
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
15 * disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33 #ifndef _LINUXKPI_LINUX_SCATTERLIST_H_
34 #define _LINUXKPI_LINUX_SCATTERLIST_H_
35
36 #include <sys/types.h>
37 #include <sys/sf_buf.h>
38
39 #include <linux/page.h>
40 #include <linux/slab.h>
41 #include <linux/mm.h>
42
43 struct bus_dmamap;
44 struct scatterlist {
45 unsigned long page_link;
46 #define SG_PAGE_LINK_CHAIN 0x1UL
47 #define SG_PAGE_LINK_LAST 0x2UL
48 #define SG_PAGE_LINK_MASK 0x3UL
49 unsigned int offset;
50 unsigned int length;
51 dma_addr_t dma_address;
52 struct bus_dmamap *dma_map; /* FreeBSD specific */
53 };
54
55 CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
56
57 struct sg_table {
58 struct scatterlist *sgl;
59 unsigned int nents;
60 unsigned int orig_nents;
61 };
62
63 struct sg_page_iter {
64 struct scatterlist *sg;
65 unsigned int sg_pgoffset;
66 unsigned int maxents;
67 struct {
68 unsigned int nents;
69 int pg_advance;
70 } internal;
71 };
72
73 struct sg_dma_page_iter {
74 struct sg_page_iter base;
75 };
76
77 #define SCATTERLIST_MAX_SEGMENT (-1U & ~(PAGE_SIZE - 1))
78
79 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
80
81 #define SG_MAGIC 0x87654321UL
82 #define SG_CHAIN SG_PAGE_LINK_CHAIN
83 #define SG_END SG_PAGE_LINK_LAST
84
85 #define sg_is_chain(sg) ((sg)->page_link & SG_PAGE_LINK_CHAIN)
86 #define sg_is_last(sg) ((sg)->page_link & SG_PAGE_LINK_LAST)
87 #define sg_chain_ptr(sg) \
88 ((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
89
90 #define sg_dma_address(sg) (sg)->dma_address
91 #define sg_dma_len(sg) (sg)->length
92
93 #define for_each_sg_page(sgl, iter, nents, pgoffset) \
94 for (_sg_iter_init(sgl, iter, nents, pgoffset); \
95 (iter)->sg; _sg_iter_next(iter))
96 #define for_each_sg_dma_page(sgl, iter, nents, pgoffset) \
97 for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
98
99 #define for_each_sg(sglist, sg, sgmax, iter) \
100 for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
101
102 #define for_each_sgtable_sg(sgt, sg, i) \
103 for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
104
105 #define for_each_sgtable_page(sgt, iter, pgoffset) \
106 for_each_sg_page((sgt)->sgl, iter, (sgt)->orig_nents, pgoffset)
107
108 #define for_each_sgtable_dma_sg(sgt, sg, iter) \
109 for_each_sg((sgt)->sgl, sg, (sgt)->nents, iter)
110
111 #define for_each_sgtable_dma_page(sgt, iter, pgoffset) \
112 for_each_sg_dma_page((sgt)->sgl, iter, (sgt)->nents, pgoffset)
113
114 typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
115 typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
116
117 static inline void
118 sg_assign_page(struct scatterlist *sg, struct page *page)
119 {
120 unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
121
122 sg->page_link = page_link | (unsigned long)page;
123 }
124
125 static inline void
126 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
127 unsigned int offset)
128 {
129 sg_assign_page(sg, page);
130 sg->offset = offset;
131 sg->length = len;
132 }
133
134 static inline struct page *
135 sg_page(struct scatterlist *sg)
136 {
137 return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
138 }
139
140 static inline void
141 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
142 {
143 sg_set_page(sg, virt_to_page(buf), buflen,
144 ((uintptr_t)buf) & (PAGE_SIZE - 1));
145 }
146
147 static inline struct scatterlist *
148 sg_next(struct scatterlist *sg)
149 {
150 if (sg_is_last(sg))
151 return (NULL);
152 sg++;
153 if (sg_is_chain(sg))
154 sg = sg_chain_ptr(sg);
155 return (sg);
156 }
157
158 static inline vm_paddr_t
159 sg_phys(struct scatterlist *sg)
160 {
161 return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
162 }
163
164 static inline void *
165 sg_virt(struct scatterlist *sg)
166 {
167
168 return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
169 }
170
171 static inline void
172 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
173 struct scatterlist *sgl)
174 {
175 struct scatterlist *sg = &prv[prv_nents - 1];
176
177 sg->offset = 0;
178 sg->length = 0;
179 sg->page_link = ((unsigned long)sgl |
180 SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
181 }
182
183 static inline void
184 sg_mark_end(struct scatterlist *sg)
185 {
186 sg->page_link |= SG_PAGE_LINK_LAST;
187 sg->page_link &= ~SG_PAGE_LINK_CHAIN;
188 }
189
190 static inline void
191 sg_init_table(struct scatterlist *sg, unsigned int nents)
192 {
193 bzero(sg, sizeof(*sg) * nents);
194 sg_mark_end(&sg[nents - 1]);
195 }
196
197 static inline void
198 sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
199 {
200 sg_init_table(sg, 1);
201 sg_set_buf(sg, buf, buflen);
202 }
203
204 static struct scatterlist *
205 sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
206 {
207 if (nents == SG_MAX_SINGLE_ALLOC) {
208 return ((void *)__get_free_page(gfp_mask));
209 } else
210 return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
211 }
212
213 static inline void
214 sg_kfree(struct scatterlist *sg, unsigned int nents)
215 {
216 if (nents == SG_MAX_SINGLE_ALLOC) {
217 free_page((unsigned long)sg);
218 } else
219 kfree(sg);
220 }
221
222 static inline void
223 __sg_free_table(struct sg_table *table, unsigned int max_ents,
224 bool skip_first_chunk, sg_free_fn * free_fn)
225 {
226 struct scatterlist *sgl, *next;
227
228 if (unlikely(!table->sgl))
229 return;
230
231 sgl = table->sgl;
232 while (table->orig_nents) {
233 unsigned int alloc_size = table->orig_nents;
234 unsigned int sg_size;
235
236 if (alloc_size > max_ents) {
237 next = sg_chain_ptr(&sgl[max_ents - 1]);
238 alloc_size = max_ents;
239 sg_size = alloc_size - 1;
240 } else {
241 sg_size = alloc_size;
242 next = NULL;
243 }
244
245 table->orig_nents -= sg_size;
246 if (skip_first_chunk)
247 skip_first_chunk = 0;
248 else
249 free_fn(sgl, alloc_size);
250 sgl = next;
251 }
252
253 table->sgl = NULL;
254 }
255
256 static inline void
257 sg_free_table(struct sg_table *table)
258 {
259 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
260 }
261
262 static inline int
263 __sg_alloc_table(struct sg_table *table, unsigned int nents,
264 unsigned int max_ents, struct scatterlist *first_chunk,
265 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
266 {
267 struct scatterlist *sg, *prv;
268 unsigned int left;
269
270 memset(table, 0, sizeof(*table));
271
272 if (nents == 0)
273 return (-EINVAL);
274 left = nents;
275 prv = NULL;
276 do {
277 unsigned int sg_size;
278 unsigned int alloc_size = left;
279
280 if (alloc_size > max_ents) {
281 alloc_size = max_ents;
282 sg_size = alloc_size - 1;
283 } else
284 sg_size = alloc_size;
285
286 left -= sg_size;
287
288 if (first_chunk) {
289 sg = first_chunk;
290 first_chunk = NULL;
291 } else {
292 sg = alloc_fn(alloc_size, gfp_mask);
293 }
294 if (unlikely(!sg)) {
295 if (prv)
296 table->nents = ++table->orig_nents;
297
298 return (-ENOMEM);
299 }
300 sg_init_table(sg, alloc_size);
301 table->nents = table->orig_nents += sg_size;
302
303 if (prv)
304 sg_chain(prv, max_ents, sg);
305 else
306 table->sgl = sg;
307
308 if (!left)
309 sg_mark_end(&sg[sg_size - 1]);
310
311 prv = sg;
312 } while (left);
313
314 return (0);
315 }
316
317 static inline int
318 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
319 {
320 int ret;
321
322 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
323 NULL, gfp_mask, sg_kmalloc);
324 if (unlikely(ret))
325 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
326
327 return (ret);
328 }
329
330 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
331 static inline struct scatterlist *
332 __sg_alloc_table_from_pages(struct sg_table *sgt,
333 struct page **pages, unsigned int count,
334 unsigned long off, unsigned long size,
335 unsigned int max_segment,
336 struct scatterlist *prv, unsigned int left_pages,
337 gfp_t gfp_mask)
338 #else
339 static inline int
340 __sg_alloc_table_from_pages(struct sg_table *sgt,
341 struct page **pages, unsigned int count,
342 unsigned long off, unsigned long size,
343 unsigned int max_segment, gfp_t gfp_mask)
344 #endif
345 {
346 unsigned int i, segs, cur, len;
347 int rc;
348 struct scatterlist *s;
349
350 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
351 if (prv != NULL) {
352 panic(
353 "Support for prv != NULL not implemented in "
354 "__sg_alloc_table_from_pages()");
355 }
356 #endif
357
358 if (__predict_false(!max_segment || offset_in_page(max_segment)))
359 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
360 return (ERR_PTR(-EINVAL));
361 #else
362 return (-EINVAL);
363 #endif
364
365 len = 0;
366 for (segs = i = 1; i < count; ++i) {
367 len += PAGE_SIZE;
368 if (len >= max_segment ||
369 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
370 ++segs;
371 len = 0;
372 }
373 }
374 if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
375 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
376 return (ERR_PTR(rc));
377 #else
378 return (rc);
379 #endif
380
381 cur = 0;
382 for (i = 0, s = sgt->sgl; i < sgt->orig_nents; i++) {
383 unsigned long seg_size;
384 unsigned int j;
385
386 s = sg_next(s);
387
388 len = 0;
389 for (j = cur + 1; j < count; ++j) {
390 len += PAGE_SIZE;
391 if (len >= max_segment || page_to_pfn(pages[j]) !=
392 page_to_pfn(pages[j - 1]) + 1)
393 break;
394 }
395
396 seg_size = ((j - cur) << PAGE_SHIFT) - off;
397 sg_set_page(s, pages[cur], MIN(size, seg_size), off);
398 size -= seg_size;
399 off = 0;
400 cur = j;
401 }
402 KASSERT(s != NULL, ("s is NULL after loop in __sg_alloc_table_from_pages()"));
403
404 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
405 if (left_pages == 0)
406 sg_mark_end(s);
407
408 return (s);
409 #else
410 return (0);
411 #endif
412 }
413
414 static inline int
415 sg_alloc_table_from_pages(struct sg_table *sgt,
416 struct page **pages, unsigned int count,
417 unsigned long off, unsigned long size,
418 gfp_t gfp_mask)
419 {
420
421 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
422 return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
423 SCATTERLIST_MAX_SEGMENT, NULL, 0, gfp_mask)));
424 #else
425 return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
426 SCATTERLIST_MAX_SEGMENT, gfp_mask));
427 #endif
428 }
429
430 static inline int
431 sg_nents(struct scatterlist *sg)
432 {
433 int nents;
434
435 for (nents = 0; sg; sg = sg_next(sg))
436 nents++;
437 return (nents);
438 }
439
440 static inline void
441 __sg_page_iter_start(struct sg_page_iter *piter,
442 struct scatterlist *sglist, unsigned int nents,
443 unsigned long pgoffset)
444 {
445 piter->internal.pg_advance = 0;
446 piter->internal.nents = nents;
447
448 piter->sg = sglist;
449 piter->sg_pgoffset = pgoffset;
450 }
451
452 static inline void
453 _sg_iter_next(struct sg_page_iter *iter)
454 {
455 struct scatterlist *sg;
456 unsigned int pgcount;
457
458 sg = iter->sg;
459 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
460
461 ++iter->sg_pgoffset;
462 while (iter->sg_pgoffset >= pgcount) {
463 iter->sg_pgoffset -= pgcount;
464 sg = sg_next(sg);
465 --iter->maxents;
466 if (sg == NULL || iter->maxents == 0)
467 break;
468 pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
469 }
470 iter->sg = sg;
471 }
472
473 static inline int
474 sg_page_count(struct scatterlist *sg)
475 {
476 return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
477 }
478 #define sg_dma_page_count(sg) \
479 sg_page_count(sg)
480
481 static inline bool
482 __sg_page_iter_next(struct sg_page_iter *piter)
483 {
484 unsigned int pgcount;
485
486 if (piter->internal.nents == 0)
487 return (0);
488 if (piter->sg == NULL)
489 return (0);
490
491 piter->sg_pgoffset += piter->internal.pg_advance;
492 piter->internal.pg_advance = 1;
493
494 while (1) {
495 pgcount = sg_page_count(piter->sg);
496 if (likely(piter->sg_pgoffset < pgcount))
497 break;
498 piter->sg_pgoffset -= pgcount;
499 piter->sg = sg_next(piter->sg);
500 if (--piter->internal.nents == 0)
501 return (0);
502 if (piter->sg == NULL)
503 return (0);
504 }
505 return (1);
506 }
507 #define __sg_page_iter_dma_next(itr) \
508 __sg_page_iter_next(&(itr)->base)
509
510 static inline void
511 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
512 unsigned int nents, unsigned long pgoffset)
513 {
514 if (nents) {
515 iter->sg = sgl;
516 iter->sg_pgoffset = pgoffset - 1;
517 iter->maxents = nents;
518 _sg_iter_next(iter);
519 } else {
520 iter->sg = NULL;
521 iter->sg_pgoffset = 0;
522 iter->maxents = 0;
523 }
524 }
525
526 /*
527 * sg_page_iter_dma_address() is implemented as a macro because it
528 * needs to accept two different and identical structure types. This
529 * allows both old and new code to co-exist. The compile time assert
530 * adds some safety, that the structure sizes match.
531 */
532 #define sg_page_iter_dma_address(spi) ({ \
533 struct sg_page_iter *__spi = (void *)(spi); \
534 dma_addr_t __dma_address; \
535 CTASSERT(sizeof(*(spi)) == sizeof(*__spi)); \
536 __dma_address = __spi->sg->dma_address + \
537 (__spi->sg_pgoffset << PAGE_SHIFT); \
538 __dma_address; \
539 })
540
541 static inline struct page *
542 sg_page_iter_page(struct sg_page_iter *piter)
543 {
544 return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
545 }
546
547 static __inline size_t
548 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
549 const void *buf, size_t buflen, off_t skip)
550 {
551 struct sg_page_iter piter;
552 struct page *page;
553 struct sf_buf *sf;
554 size_t len, copied;
555 char *p, *b;
556
557 if (buflen == 0)
558 return (0);
559
560 b = __DECONST(char *, buf);
561 copied = 0;
562 sched_pin();
563 for_each_sg_page(sgl, &piter, nents, 0) {
564
565 /* Skip to the start. */
566 if (piter.sg->length <= skip) {
567 skip -= piter.sg->length;
568 continue;
569 }
570
571 /* See how much to copy. */
572 KASSERT(((piter.sg->length - skip) != 0 && (buflen != 0)),
573 ("%s: sg len %u - skip %ju || buflen %zu is 0\n",
574 __func__, piter.sg->length, (uintmax_t)skip, buflen));
575 len = min(piter.sg->length - skip, buflen);
576
577 page = sg_page_iter_page(&piter);
578 sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
579 if (sf == NULL)
580 break;
581 p = (char *)sf_buf_kva(sf) + piter.sg_pgoffset + skip;
582 memcpy(p, b, len);
583 sf_buf_free(sf);
584
585 /* We copied so nothing more to skip. */
586 skip = 0;
587 copied += len;
588 /* Either we exactly filled the page, or we are done. */
589 buflen -= len;
590 if (buflen == 0)
591 break;
592 b += len;
593 }
594 sched_unpin();
595
596 return (copied);
597 }
598
599 static inline size_t
600 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
601 const void *buf, size_t buflen)
602 {
603 return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
604 }
605
606 static inline size_t
607 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
608 void *buf, size_t buflen, off_t offset)
609 {
610 struct sg_page_iter iter;
611 struct scatterlist *sg;
612 struct page *page;
613 struct sf_buf *sf;
614 char *vaddr;
615 size_t total = 0;
616 size_t len;
617
618 if (!PMAP_HAS_DMAP)
619 sched_pin();
620 for_each_sg_page(sgl, &iter, nents, 0) {
621 sg = iter.sg;
622
623 if (offset >= sg->length) {
624 offset -= sg->length;
625 continue;
626 }
627 len = ulmin(buflen, sg->length - offset);
628 if (len == 0)
629 break;
630
631 page = sg_page_iter_page(&iter);
632 if (!PMAP_HAS_DMAP) {
633 sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
634 if (sf == NULL)
635 break;
636 vaddr = (char *)sf_buf_kva(sf);
637 } else
638 vaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page));
639 memcpy(buf, vaddr + sg->offset + offset, len);
640 if (!PMAP_HAS_DMAP)
641 sf_buf_free(sf);
642
643 /* start at beginning of next page */
644 offset = 0;
645
646 /* advance buffer */
647 buf = (char *)buf + len;
648 buflen -= len;
649 total += len;
650 }
651 if (!PMAP_HAS_DMAP)
652 sched_unpin();
653 return (total);
654 }
655
656 #endif /* _LINUXKPI_LINUX_SCATTERLIST_H_ */
Cache object: 92b0ba4e31b8385210059afabc776817
|