FreeBSD/Linux Kernel Cross Reference
sys/sys/mbuf.h
1 /* $NetBSD: mbuf.h,v 1.144 2008/10/24 22:31:40 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1999, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and Matt Thomas of 3am Software Foundry.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1982, 1986, 1988, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
62 */
63
64 #ifndef _SYS_MBUF_H_
65 #define _SYS_MBUF_H_
66
67 #ifdef _KERNEL_OPT
68 #include "opt_mbuftrace.h"
69 #endif
70
71 #ifndef M_WAITOK
72 #include <sys/malloc.h>
73 #endif
74 #include <sys/pool.h>
75 #include <sys/queue.h>
76 #if defined(_KERNEL)
77 #include <sys/percpu_types.h>
78 #endif /* defined(_KERNEL) */
79
80 /* For offsetof() */
81 #if defined(_KERNEL) || defined(_STANDALONE)
82 #include <sys/systm.h>
83 #else
84 #include <stddef.h>
85 #endif
86
87 #include <uvm/uvm_param.h> /* for MIN_PAGE_SIZE */
88
89 /*
90 * Mbufs are of a single size, MSIZE (machine/param.h), which
91 * includes overhead. An mbuf may add a single "mbuf cluster" of size
92 * MCLBYTES (also in machine/param.h), which has no additional overhead
93 * and is used instead of the internal data area; this is done when
94 * at least MINCLSIZE of data must be stored.
95 */
96
97 /* Packet tags structure */
98 struct m_tag {
99 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
100 uint16_t m_tag_id; /* Tag ID */
101 uint16_t m_tag_len; /* Length of data */
102 };
103
104 /* mbuf ownership structure */
105 struct mowner {
106 char mo_name[16]; /* owner name (fxp0) */
107 char mo_descr[16]; /* owner description (input) */
108 LIST_ENTRY(mowner) mo_link; /* */
109 struct percpu *mo_counters;
110 };
111
112 #define MOWNER_INIT(x, y) { .mo_name = x, .mo_descr = y }
113
114 enum mowner_counter_index {
115 MOWNER_COUNTER_CLAIMS, /* # of small mbuf claimed */
116 MOWNER_COUNTER_RELEASES, /* # of small mbuf released */
117 MOWNER_COUNTER_CLUSTER_CLAIMS, /* # of M_CLUSTER mbuf claimed */
118 MOWNER_COUNTER_CLUSTER_RELEASES,/* # of M_CLUSTER mbuf released */
119 MOWNER_COUNTER_EXT_CLAIMS, /* # of M_EXT mbuf claimed */
120 MOWNER_COUNTER_EXT_RELEASES, /* # of M_EXT mbuf released */
121
122 MOWNER_COUNTER_NCOUNTERS,
123 };
124
125 #if defined(_KERNEL)
126 struct mowner_counter {
127 u_long mc_counter[MOWNER_COUNTER_NCOUNTERS];
128 };
129 #endif /* defined(_KERNEL) */
130
131 /* userland-exported version of struct mowner */
132 struct mowner_user {
133 char mo_name[16]; /* owner name (fxp0) */
134 char mo_descr[16]; /* owner description (input) */
135 LIST_ENTRY(mowner) mo_link; /* unused padding; for compatibility */
136 u_long mo_counter[MOWNER_COUNTER_NCOUNTERS]; /* counters */
137 };
138
139 /*
140 * Macros for type conversion
141 * mtod(m,t) - convert mbuf pointer to data pointer of correct type
142 */
143 #define mtod(m, t) ((t)((m)->m_data))
144
145 /* header at beginning of each mbuf: */
146 struct m_hdr {
147 struct mbuf *mh_next; /* next buffer in chain */
148 struct mbuf *mh_nextpkt; /* next chain in queue/record */
149 char *mh_data; /* location of data */
150 struct mowner *mh_owner; /* mbuf owner */
151 int mh_len; /* amount of data in this mbuf */
152 int mh_flags; /* flags; see below */
153 paddr_t mh_paddr; /* physical address of mbuf */
154 short mh_type; /* type of data in this mbuf */
155 };
156
157 /*
158 * record/packet header in first mbuf of chain; valid if M_PKTHDR set
159 *
160 * A note about csum_data: For the out-bound direction, the low 16 bits
161 * indicates the offset after the L4 header where the final L4 checksum value
162 * is to be stored and the high 16 bits is the length of the L3 header (the
163 * start of the data to be checksumed). For the in-bound direction, it is only
164 * valid if the M_CSUM_DATA flag is set. In this case, an L4 checksum has been
165 * calculated by hardware, but it is up to software to perform final
166 * verification.
167 *
168 * Note for in-bound TCP/UDP checksums, we expect the csum_data to NOT
169 * be bit-wise inverted (the final step in the calculation of an IP
170 * checksum) -- this is so we can accumulate the checksum for fragmented
171 * packets during reassembly.
172 */
173 struct pkthdr {
174 struct ifnet *rcvif; /* rcv interface */
175 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
176 int len; /* total packet length */
177 int csum_flags; /* checksum flags */
178 uint32_t csum_data; /* checksum data */
179 u_int segsz; /* segment size */
180 };
181
182 /*
183 * Note: These bits are carefully arrange so that the compiler can have
184 * a prayer of generating a jump table.
185 */
186 #define M_CSUM_TCPv4 0x00000001 /* TCP header/payload */
187 #define M_CSUM_UDPv4 0x00000002 /* UDP header/payload */
188 #define M_CSUM_TCP_UDP_BAD 0x00000004 /* TCP/UDP checksum bad */
189 #define M_CSUM_DATA 0x00000008 /* consult csum_data */
190 #define M_CSUM_TCPv6 0x00000010 /* IPv6 TCP header/payload */
191 #define M_CSUM_UDPv6 0x00000020 /* IPv6 UDP header/payload */
192 #define M_CSUM_IPv4 0x00000040 /* IPv4 header */
193 #define M_CSUM_IPv4_BAD 0x00000080 /* IPv4 header checksum bad */
194 #define M_CSUM_TSOv4 0x00000100 /* TCPv4 segmentation offload */
195 #define M_CSUM_TSOv6 0x00000200 /* TCPv6 segmentation offload */
196
197 /* Checksum-assist quirks: keep separate from jump-table bits. */
198 #define M_CSUM_NO_PSEUDOHDR 0x80000000 /* Rx csum_data does not include
199 * the UDP/TCP pseudo-hdr, and
200 * is not yet 1s-complemented.
201 */
202
203 #define M_CSUM_BITS \
204 "\2\1TCPv4\2UDPv4\3TCP_UDP_BAD\4DATA\5TCPv6\6UDPv6\7IPv4\10IPv4_BAD" \
205 "\11TSOv4\12TSOv6\40NO_PSEUDOHDR"
206
207 /*
208 * Macros for manipulating csum_data on outgoing packets. These are
209 * used to pass information down from the L4/L3 to the L2.
210 */
211 #define M_CSUM_DATA_IPv4_IPHL(x) ((x) >> 16)
212 #define M_CSUM_DATA_IPv4_OFFSET(x) ((x) & 0xffff)
213
214 /*
215 * Macros for M_CSUM_TCPv6 and M_CSUM_UDPv6
216 *
217 * M_CSUM_DATA_IPv6_HL: length of ip6_hdr + ext header.
218 * ie. offset of UDP/TCP header in the packet.
219 *
220 * M_CSUM_DATA_IPv6_OFFSET: offset of the checksum field in UDP/TCP header.
221 */
222
223 #define M_CSUM_DATA_IPv6_HL(x) ((x) >> 16)
224 #define M_CSUM_DATA_IPv6_HL_SET(x, v) (x) = ((x) & 0xffff) | ((v) << 16)
225 #define M_CSUM_DATA_IPv6_OFFSET(x) ((x) & 0xffff)
226
227 /*
228 * Max # of pages we can attach to m_ext. This is carefully chosen
229 * to be able to handle SOSEND_LOAN_CHUNK with our minimum sized page.
230 */
231 #ifdef MIN_PAGE_SIZE
232 #define M_EXT_MAXPAGES ((65536 / MIN_PAGE_SIZE) + 1)
233 #endif
234
235 /* description of external storage mapped into mbuf, valid if M_EXT set */
236 struct _m_ext_storage {
237 unsigned int ext_refcnt;
238 int ext_flags;
239 char *ext_buf; /* start of buffer */
240 void (*ext_free) /* free routine if not the usual */
241 (struct mbuf *, void *, size_t, void *);
242 void *ext_arg; /* argument for ext_free */
243 size_t ext_size; /* size of buffer, for ext_free */
244 struct malloc_type *ext_type; /* malloc type */
245 union {
246 paddr_t extun_paddr; /* physical address (M_EXT_CLUSTER) */
247 /* pages (M_EXT_PAGES) */
248 /*
249 * XXX This is gross, but it doesn't really matter; this is
250 * XXX overlaid on top of the mbuf data area.
251 */
252 #ifdef M_EXT_MAXPAGES
253 struct vm_page *extun_pgs[M_EXT_MAXPAGES];
254 #endif
255 } ext_un;
256 #define ext_paddr ext_un.extun_paddr
257 #define ext_pgs ext_un.extun_pgs
258 #ifdef DEBUG
259 const char *ext_ofile;
260 const char *ext_nfile;
261 int ext_oline;
262 int ext_nline;
263 #endif
264 };
265
266 struct _m_ext {
267 struct mbuf *ext_ref;
268 struct _m_ext_storage ext_storage;
269 };
270
271 #define M_PADDR_INVALID POOL_PADDR_INVALID
272
273 /*
274 * Definition of "struct mbuf".
275 * Don't change this without understanding how MHLEN/MLEN are defined.
276 */
277 #define MBUF_DEFINE(name, mhlen, mlen) \
278 struct name { \
279 struct m_hdr m_hdr; \
280 union { \
281 struct { \
282 struct pkthdr MH_pkthdr; \
283 union { \
284 struct _m_ext MH_ext; \
285 char MH_databuf[(mhlen)]; \
286 } MH_dat; \
287 } MH; \
288 char M_databuf[(mlen)]; \
289 } M_dat; \
290 }
291 #define m_next m_hdr.mh_next
292 #define m_len m_hdr.mh_len
293 #define m_data m_hdr.mh_data
294 #define m_owner m_hdr.mh_owner
295 #define m_type m_hdr.mh_type
296 #define m_flags m_hdr.mh_flags
297 #define m_nextpkt m_hdr.mh_nextpkt
298 #define m_paddr m_hdr.mh_paddr
299 #define m_pkthdr M_dat.MH.MH_pkthdr
300 #define m_ext_storage M_dat.MH.MH_dat.MH_ext.ext_storage
301 #define m_ext_ref M_dat.MH.MH_dat.MH_ext.ext_ref
302 #define m_ext m_ext_ref->m_ext_storage
303 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
304 #define m_dat M_dat.M_databuf
305
306 /*
307 * Dummy mbuf structure to calculate the right values for MLEN/MHLEN, taking
308 * into account inter-structure padding.
309 */
310 MBUF_DEFINE(_mbuf_dummy, 1, 1);
311
312 /* normal data len */
313 #define MLEN (MSIZE - offsetof(struct _mbuf_dummy, m_dat))
314 /* data len w/pkthdr */
315 #define MHLEN (MSIZE - offsetof(struct _mbuf_dummy, m_pktdat))
316
317 #define MINCLSIZE (MHLEN+MLEN+1) /* smallest amount to put in cluster */
318 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
319
320 /*
321 * The *real* struct mbuf
322 */
323 MBUF_DEFINE(mbuf, MHLEN, MLEN);
324
325 /* mbuf flags */
326 #define M_EXT 0x00001 /* has associated external storage */
327 #define M_PKTHDR 0x00002 /* start of record */
328 #define M_EOR 0x00004 /* end of record */
329 #define M_PROTO1 0x00008 /* protocol-specific */
330
331 /* mbuf pkthdr flags, also in m_flags */
332 #define M_AUTHIPHDR 0x00010 /* data origin authentication for IP header */
333 #define M_DECRYPTED 0x00020 /* confidentiality */
334 #define M_LOOP 0x00040 /* for Mbuf statistics */
335 #define M_AUTHIPDGM 0x00080 /* data origin authentication */
336 #define M_BCAST 0x00100 /* send/received as link-level broadcast */
337 #define M_MCAST 0x00200 /* send/received as link-level multicast */
338 #define M_CANFASTFWD 0x00400 /* used by filters to indicate packet can
339 be fast-forwarded */
340 #define M_ANYCAST6 0x00800 /* received as IPv6 anycast */
341 #define M_LINK0 0x01000 /* link layer specific flag */
342 #define M_LINK1 0x02000 /* link layer specific flag */
343 #define M_LINK2 0x04000 /* link layer specific flag */
344 #define M_LINK3 0x08000 /* link layer specific flag */
345 #define M_LINK4 0x10000 /* link layer specific flag */
346 #define M_LINK5 0x20000 /* link layer specific flag */
347 #define M_LINK6 0x40000 /* link layer specific flag */
348 #define M_LINK7 0x80000 /* link layer specific flag */
349
350 /* additional flags for M_EXT mbufs */
351 #define M_EXT_FLAGS 0xff000000
352 #define M_EXT_CLUSTER 0x01000000 /* ext is a cluster */
353 #define M_EXT_PAGES 0x02000000 /* ext_pgs is valid */
354 #define M_EXT_ROMAP 0x04000000 /* ext mapping is r-o at MMU */
355 #define M_EXT_RW 0x08000000 /* ext storage is writable */
356
357 /* for source-level compatibility */
358 #define M_CLUSTER M_EXT_CLUSTER
359
360 #define M_FLAGS_BITS \
361 "\2\1EXT\2PKTHDR\3EOR\4PROTO1\5AUTHIPHDR\6DECRYPTED\7LOOP\10AUTHIPDGM" \
362 "\11BCAST\12MCAST\13CANFASTFWD\14ANYCAST6\15LINK0\16LINK1\17LINK2\20LINK3" \
363 "\21LINK4\22LINK5\23LINK6\24LINK7" \
364 "\31EXT_CLUSTER\32EXT_PAGES\33EXT_ROMAP\34EXT_RW"
365
366 /* flags copied when copying m_pkthdr */
367 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_BCAST|M_MCAST|M_CANFASTFWD|M_ANYCAST6|M_LINK0|M_LINK1|M_LINK2|M_AUTHIPHDR|M_DECRYPTED|M_LOOP|M_AUTHIPDGM)
368
369 /* flag copied when shallow-copying external storage */
370 #define M_EXTCOPYFLAGS (M_EXT|M_EXT_FLAGS)
371
372 /* mbuf types */
373 #define MT_FREE 0 /* should be on free list */
374 #define MT_DATA 1 /* dynamic (data) allocation */
375 #define MT_HEADER 2 /* packet header */
376 #define MT_SONAME 3 /* socket name */
377 #define MT_SOOPTS 4 /* socket options */
378 #define MT_FTABLE 5 /* fragment reassembly header */
379 #define MT_CONTROL 6 /* extra-data protocol message */
380 #define MT_OOBDATA 7 /* expedited data */
381
382 /* flags to m_get/MGET */
383 #define M_DONTWAIT M_NOWAIT
384 #define M_WAIT M_WAITOK
385
386 #ifdef MBUFTRACE
387 /*
388 * mbuf allocation tracing
389 */
390 void mowner_init(struct mbuf *, int);
391 void mowner_ref(struct mbuf *, int);
392 void m_claim(struct mbuf *, struct mowner *);
393 void mowner_revoke(struct mbuf *, bool, int);
394 void mowner_attach(struct mowner *);
395 void mowner_detach(struct mowner *);
396 void m_claimm(struct mbuf *, struct mowner *);
397 #else
398 #define mowner_init(m, type) do { } while (/* CONSTCOND */ 0)
399 #define mowner_ref(m, flags) do { } while (/* CONSTCOND */ 0)
400 #define mowner_revoke(m, all, flags) do { } while (/* CONSTCOND */ 0)
401 #define m_claim(m, mowner) do { } while (/* CONSTCOND */ 0)
402 #define mowner_attach(mo) do { } while (/* CONSTCOND */ 0)
403 #define mowner_detach(mo) do { } while (/* CONSTCOND */ 0)
404 #define m_claimm(m, mo) do { } while (/* CONSTCOND */ 0)
405 #endif
406
407 #define MCLAIM(m, mo) m_claim((m), (mo))
408 #define MOWNER_ATTACH(mo) mowner_attach(mo)
409 #define MOWNER_DETACH(mo) mowner_detach(mo)
410
411 /*
412 * mbuf allocation/deallocation macros:
413 *
414 * MGET(struct mbuf *m, int how, int type)
415 * allocates an mbuf and initializes it to contain internal data.
416 *
417 * MGETHDR(struct mbuf *m, int how, int type)
418 * allocates an mbuf and initializes it to contain a packet header
419 * and internal data.
420 *
421 * If 'how' is M_WAIT, these macros (and the corresponding functions)
422 * are guaranteed to return successfully.
423 */
424 #define MGET(m, how, type) m = m_get((how), (type))
425 #define MGETHDR(m, how, type) m = m_gethdr((how), (type))
426
427 #if defined(_KERNEL)
428 #define _M_
429 /*
430 * Macros for tracking external storage associated with an mbuf.
431 */
432 #ifdef DEBUG
433 #define MCLREFDEBUGN(m, file, line) \
434 do { \
435 (m)->m_ext.ext_nfile = (file); \
436 (m)->m_ext.ext_nline = (line); \
437 } while (/* CONSTCOND */ 0)
438
439 #define MCLREFDEBUGO(m, file, line) \
440 do { \
441 (m)->m_ext.ext_ofile = (file); \
442 (m)->m_ext.ext_oline = (line); \
443 } while (/* CONSTCOND */ 0)
444 #else
445 #define MCLREFDEBUGN(m, file, line)
446 #define MCLREFDEBUGO(m, file, line)
447 #endif
448
449 #define MCLINITREFERENCE(m) \
450 do { \
451 KDASSERT(((m)->m_flags & M_EXT) == 0); \
452 (m)->m_ext_ref = (m); \
453 (m)->m_ext.ext_refcnt = 1; \
454 MCLREFDEBUGO((m), __FILE__, __LINE__); \
455 MCLREFDEBUGN((m), NULL, 0); \
456 } while (/* CONSTCOND */ 0)
457
458 /*
459 * Macros for mbuf external storage.
460 *
461 * MCLGET allocates and adds an mbuf cluster to a normal mbuf;
462 * the flag M_EXT is set upon success.
463 *
464 * MEXTMALLOC allocates external storage and adds it to
465 * a normal mbuf; the flag M_EXT is set upon success.
466 *
467 * MEXTADD adds pre-allocated external storage to
468 * a normal mbuf; the flag M_EXT is set upon success.
469 */
470
471 #define _MCLGET(m, pool_cache, size, how) \
472 do { \
473 (m)->m_ext_storage.ext_buf = \
474 pool_cache_get_paddr((pool_cache), \
475 (how) == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : 0, \
476 &(m)->m_ext_storage.ext_paddr); \
477 if ((m)->m_ext_storage.ext_buf != NULL) { \
478 MCLINITREFERENCE(m); \
479 (m)->m_data = (m)->m_ext.ext_buf; \
480 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
481 M_EXT|M_CLUSTER|M_EXT_RW; \
482 (m)->m_ext.ext_flags = 0; \
483 (m)->m_ext.ext_size = (size); \
484 (m)->m_ext.ext_free = NULL; \
485 (m)->m_ext.ext_arg = (pool_cache); \
486 /* ext_paddr initialized above */ \
487 mowner_ref((m), M_EXT|M_CLUSTER); \
488 } \
489 } while (/* CONSTCOND */ 0)
490
491 /*
492 * The standard mbuf cluster pool.
493 */
494 #define MCLGET(m, how) _MCLGET((m), mcl_cache, MCLBYTES, (how))
495
496 #define MEXTMALLOC(m, size, how) \
497 do { \
498 (m)->m_ext_storage.ext_buf = \
499 (void *)malloc((size), mbtypes[(m)->m_type], (how)); \
500 if ((m)->m_ext_storage.ext_buf != NULL) { \
501 MCLINITREFERENCE(m); \
502 (m)->m_data = (m)->m_ext.ext_buf; \
503 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
504 M_EXT|M_EXT_RW; \
505 (m)->m_ext.ext_flags = 0; \
506 (m)->m_ext.ext_size = (size); \
507 (m)->m_ext.ext_free = NULL; \
508 (m)->m_ext.ext_arg = NULL; \
509 (m)->m_ext.ext_type = mbtypes[(m)->m_type]; \
510 mowner_ref((m), M_EXT); \
511 } \
512 } while (/* CONSTCOND */ 0)
513
514 #define MEXTADD(m, buf, size, type, free, arg) \
515 do { \
516 MCLINITREFERENCE(m); \
517 (m)->m_data = (m)->m_ext.ext_buf = (void *)(buf); \
518 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | M_EXT; \
519 (m)->m_ext.ext_flags = 0; \
520 (m)->m_ext.ext_size = (size); \
521 (m)->m_ext.ext_free = (free); \
522 (m)->m_ext.ext_arg = (arg); \
523 (m)->m_ext.ext_type = (type); \
524 mowner_ref((m), M_EXT); \
525 } while (/* CONSTCOND */ 0)
526
527 /*
528 * Reset the data pointer on an mbuf.
529 */
530 #define MRESETDATA(m) \
531 do { \
532 if ((m)->m_flags & M_EXT) \
533 (m)->m_data = (m)->m_ext.ext_buf; \
534 else if ((m)->m_flags & M_PKTHDR) \
535 (m)->m_data = (m)->m_pktdat; \
536 else \
537 (m)->m_data = (m)->m_dat; \
538 } while (/* CONSTCOND */ 0)
539
540 /*
541 * MFREE(struct mbuf *m, struct mbuf *n)
542 * Free a single mbuf and associated external storage.
543 * Place the successor, if any, in n.
544 */
545 #define MFREE(m, n) \
546 mowner_revoke((m), 1, (m)->m_flags); \
547 mbstat_type_add((m)->m_type, -1); \
548 if ((m)->m_flags & M_PKTHDR) \
549 m_tag_delete_chain((m), NULL); \
550 (n) = (m)->m_next; \
551 if ((m)->m_flags & M_EXT) { \
552 m_ext_free(m); \
553 } else { \
554 pool_cache_put(mb_cache, (m)); \
555 } \
556
557 /*
558 * Copy mbuf pkthdr from `from' to `to'.
559 * `from' must have M_PKTHDR set, and `to' must be empty.
560 */
561 #define M_COPY_PKTHDR(to, from) \
562 do { \
563 (to)->m_pkthdr = (from)->m_pkthdr; \
564 (to)->m_flags = (from)->m_flags & M_COPYFLAGS; \
565 SLIST_INIT(&(to)->m_pkthdr.tags); \
566 m_tag_copy_chain((to), (from)); \
567 (to)->m_data = (to)->m_pktdat; \
568 } while (/* CONSTCOND */ 0)
569
570 /*
571 * Move mbuf pkthdr from `from' to `to'.
572 * `from' must have M_PKTHDR set, and `to' must be empty.
573 */
574 #define M_MOVE_PKTHDR(to, from) m_move_pkthdr(to, from)
575
576 /*
577 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
578 * an object of the specified size at the end of the mbuf, longword aligned.
579 */
580 #define M_ALIGN(m, len) \
581 do { \
582 (m)->m_data += (MLEN - (len)) &~ (sizeof(long) - 1); \
583 } while (/* CONSTCOND */ 0)
584
585 /*
586 * As above, for mbufs allocated with m_gethdr/MGETHDR
587 * or initialized by M_COPY_PKTHDR.
588 */
589 #define MH_ALIGN(m, len) \
590 do { \
591 (m)->m_data += (MHLEN - (len)) &~ (sizeof(long) - 1); \
592 } while (/* CONSTCOND */ 0)
593
594 /*
595 * Determine if an mbuf's data area is read-only. This is true
596 * if external storage is read-only mapped, or not marked as R/W,
597 * or referenced by more than one mbuf.
598 */
599 #define M_READONLY(m) \
600 (((m)->m_flags & M_EXT) != 0 && \
601 (((m)->m_flags & (M_EXT_ROMAP|M_EXT_RW)) != M_EXT_RW || \
602 (m)->m_ext.ext_refcnt > 1))
603
604 #define M_UNWRITABLE(__m, __len) \
605 ((__m)->m_len < (__len) || M_READONLY((__m)))
606 /*
607 * Determine if an mbuf's data area is read-only at the MMU.
608 */
609 #define M_ROMAP(m) \
610 (((m)->m_flags & (M_EXT|M_EXT_ROMAP)) == (M_EXT|M_EXT_ROMAP))
611
612 /*
613 * Compute the amount of space available
614 * before the current start of data in an mbuf.
615 */
616 #define _M_LEADINGSPACE(m) \
617 ((m)->m_flags & M_EXT ? (m)->m_data - (m)->m_ext.ext_buf : \
618 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
619 (m)->m_data - (m)->m_dat)
620
621 #define M_LEADINGSPACE(m) \
622 (M_READONLY((m)) ? 0 : _M_LEADINGSPACE((m)))
623
624 /*
625 * Compute the amount of space available
626 * after the end of data in an mbuf.
627 */
628 #define _M_TRAILINGSPACE(m) \
629 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size - \
630 ((m)->m_data + (m)->m_len) : \
631 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
632
633 #define M_TRAILINGSPACE(m) \
634 (M_READONLY((m)) ? 0 : _M_TRAILINGSPACE((m)))
635
636 /*
637 * Compute the address of an mbuf's data area.
638 */
639 #define M_BUFADDR(m) \
640 (((m)->m_flags & M_PKTHDR) ? (m)->m_pktdat : (m)->m_dat)
641
642 /*
643 * Compute the offset of the beginning of the data buffer of a non-ext
644 * mbuf.
645 */
646 #define M_BUFOFFSET(m) \
647 (((m)->m_flags & M_PKTHDR) ? \
648 offsetof(struct mbuf, m_pktdat) : offsetof(struct mbuf, m_dat))
649
650 /*
651 * Arrange to prepend space of size plen to mbuf m.
652 * If a new mbuf must be allocated, how specifies whether to wait.
653 * If how is M_DONTWAIT and allocation fails, the original mbuf chain
654 * is freed and m is set to NULL.
655 */
656 #define M_PREPEND(m, plen, how) \
657 do { \
658 if (M_LEADINGSPACE(m) >= (plen)) { \
659 (m)->m_data -= (plen); \
660 (m)->m_len += (plen); \
661 } else \
662 (m) = m_prepend((m), (plen), (how)); \
663 if ((m) && (m)->m_flags & M_PKTHDR) \
664 (m)->m_pkthdr.len += (plen); \
665 } while (/* CONSTCOND */ 0)
666
667 /* change mbuf to new type */
668 #define MCHTYPE(m, t) \
669 do { \
670 mbstat_type_add((m)->m_type, -1); \
671 mbstat_type_add(t, 1); \
672 (m)->m_type = t; \
673 } while (/* CONSTCOND */ 0)
674
675 /* length to m_copy to copy all */
676 #define M_COPYALL 1000000000
677
678 /* compatibility with 4.3 */
679 #define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
680
681 /*
682 * Allow drivers and/or protocols to use the rcvif member of
683 * PKTHDR mbufs to store private context information.
684 */
685 #define M_GETCTX(m, t) ((t)(m)->m_pkthdr.rcvif)
686 #define M_SETCTX(m, c) ((void)((m)->m_pkthdr.rcvif = (void *)(c)))
687
688 #endif /* defined(_KERNEL) */
689
690 /*
691 * Simple mbuf queueing system
692 *
693 * this is basically a SIMPLEQ adapted to mbuf use (ie using
694 * m_nextpkt instead of field.sqe_next).
695 *
696 * m_next is ignored, so queueing chains of mbufs is possible
697 */
698 #define MBUFQ_HEAD(name) \
699 struct name { \
700 struct mbuf *mq_first; \
701 struct mbuf **mq_last; \
702 }
703
704 #define MBUFQ_INIT(q) do { \
705 (q)->mq_first = NULL; \
706 (q)->mq_last = &(q)->mq_first; \
707 } while (/*CONSTCOND*/0)
708
709 #define MBUFQ_ENQUEUE(q, m) do { \
710 (m)->m_nextpkt = NULL; \
711 *(q)->mq_last = (m); \
712 (q)->mq_last = &(m)->m_nextpkt; \
713 } while (/*CONSTCOND*/0)
714
715 #define MBUFQ_PREPEND(q, m) do { \
716 if (((m)->m_nextpkt = (q)->mq_first) == NULL) \
717 (q)->mq_last = &(m)->m_nextpkt; \
718 (q)->mq_first = (m); \
719 } while (/*CONSTCOND*/0)
720
721 #define MBUFQ_DEQUEUE(q, m) do { \
722 if (((m) = (q)->mq_first) != NULL) { \
723 if (((q)->mq_first = (m)->m_nextpkt) == NULL) \
724 (q)->mq_last = &(q)->mq_first; \
725 else \
726 (m)->m_nextpkt = NULL; \
727 } \
728 } while (/*CONSTCOND*/0)
729
730 #define MBUFQ_DRAIN(q) do { \
731 struct mbuf *__m0; \
732 while ((__m0 = (q)->mq_first) != NULL) { \
733 (q)->mq_first = __m0->m_nextpkt; \
734 m_freem(__m0); \
735 } \
736 (q)->mq_last = &(q)->mq_first; \
737 } while (/*CONSTCOND*/0)
738
739 #define MBUFQ_FIRST(q) ((q)->mq_first)
740 #define MBUFQ_NEXT(m) ((m)->m_nextpkt)
741 #define MBUFQ_LAST(q) (*(q)->mq_last)
742
743 /*
744 * Mbuf statistics.
745 * For statistics related to mbuf and cluster allocations, see also the
746 * pool headers (mb_cache and mcl_cache).
747 */
748 struct mbstat {
749 u_long _m_spare; /* formerly m_mbufs */
750 u_long _m_spare1; /* formerly m_clusters */
751 u_long _m_spare2; /* spare field */
752 u_long _m_spare3; /* formely m_clfree - free clusters */
753 u_long m_drops; /* times failed to find space */
754 u_long m_wait; /* times waited for space */
755 u_long m_drain; /* times drained protocols for space */
756 u_short m_mtypes[256]; /* type specific mbuf allocations */
757 };
758
759 struct mbstat_cpu {
760 u_int m_mtypes[256]; /* type specific mbuf allocations */
761 };
762
763 /*
764 * Mbuf sysctl variables.
765 */
766 #define MBUF_MSIZE 1 /* int: mbuf base size */
767 #define MBUF_MCLBYTES 2 /* int: mbuf cluster size */
768 #define MBUF_NMBCLUSTERS 3 /* int: limit on the # of clusters */
769 #define MBUF_MBLOWAT 4 /* int: mbuf low water mark */
770 #define MBUF_MCLLOWAT 5 /* int: mbuf cluster low water mark */
771 #define MBUF_STATS 6 /* struct: mbstat */
772 #define MBUF_MOWNERS 7 /* struct: m_owner[] */
773 #define MBUF_MAXID 8 /* number of valid MBUF ids */
774
775 #define CTL_MBUF_NAMES { \
776 { 0, 0 }, \
777 { "msize", CTLTYPE_INT }, \
778 { "mclbytes", CTLTYPE_INT }, \
779 { "nmbclusters", CTLTYPE_INT }, \
780 { "mblowat", CTLTYPE_INT }, \
781 { "mcllowat", CTLTYPE_INT }, \
782 { 0 /* "stats" */, CTLTYPE_STRUCT }, \
783 { 0 /* "mowners" */, CTLTYPE_STRUCT }, \
784 }
785
786 #ifdef _KERNEL
787 extern struct mbstat mbstat;
788 extern int nmbclusters; /* limit on the # of clusters */
789 extern int mblowat; /* mbuf low water mark */
790 extern int mcllowat; /* mbuf cluster low water mark */
791 extern int max_linkhdr; /* largest link-level header */
792 extern int max_protohdr; /* largest protocol header */
793 extern int max_hdr; /* largest link+protocol header */
794 extern int max_datalen; /* MHLEN - max_hdr */
795 extern const int msize; /* mbuf base size */
796 extern const int mclbytes; /* mbuf cluster size */
797 extern pool_cache_t mb_cache;
798 extern pool_cache_t mcl_cache;
799 #ifdef MBUFTRACE
800 LIST_HEAD(mownerhead, mowner);
801 extern struct mownerhead mowners;
802 extern struct mowner unknown_mowners[];
803 extern struct mowner revoked_mowner;
804 #endif
805
806 MALLOC_DECLARE(M_MBUF);
807 MALLOC_DECLARE(M_SONAME);
808 MALLOC_DECLARE(M_SOOPTS);
809
810 struct mbuf *m_copym(struct mbuf *, int, int, int);
811 struct mbuf *m_copypacket(struct mbuf *, int);
812 struct mbuf *m_devget(char *, int, int, struct ifnet *,
813 void (*copy)(const void *, void *, size_t));
814 struct mbuf *m_dup(struct mbuf *, int, int, int);
815 struct mbuf *m_free(struct mbuf *);
816 struct mbuf *m_get(int, int);
817 struct mbuf *m_getclr(int, int);
818 struct mbuf *m_gethdr(int, int);
819 struct mbuf *m_prepend(struct mbuf *,int, int);
820 struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
821 struct mbuf *m_pullup(struct mbuf *, int);
822 struct mbuf *m_copyup(struct mbuf *, int, int);
823 struct mbuf *m_split(struct mbuf *,int, int);
824 struct mbuf *m_getptr(struct mbuf *, int, int *);
825 void m_adj(struct mbuf *, int);
826 int m_apply(struct mbuf *, int, int,
827 int (*)(void *, void *, unsigned int), void *);
828 void m_cat(struct mbuf *,struct mbuf *);
829 void m_clget(struct mbuf *, int);
830 int m_mballoc(int, int);
831 void m_copyback(struct mbuf *, int, int, const void *);
832 struct mbuf *m_copyback_cow(struct mbuf *, int, int, const void *, int);
833 int m_makewritable(struct mbuf **, int, int, int);
834 struct mbuf *m_getcl(int, int, int);
835 void m_copydata(struct mbuf *, int, int, void *);
836 void m_freem(struct mbuf *);
837 void m_reclaim(void *, int);
838 void mbinit(void);
839 void m_ext_free(struct mbuf *);
840 char * m_mapin(struct mbuf *);
841 void m_move_pkthdr(struct mbuf *to, struct mbuf *from);
842
843 /* Inline routines. */
844 static __inline u_int m_length(const struct mbuf *) __unused;
845
846 /* Statistics */
847 void mbstat_type_add(int, int);
848
849 /* Packet tag routines */
850 struct m_tag *m_tag_get(int, int, int);
851 void m_tag_free(struct m_tag *);
852 void m_tag_prepend(struct mbuf *, struct m_tag *);
853 void m_tag_unlink(struct mbuf *, struct m_tag *);
854 void m_tag_delete(struct mbuf *, struct m_tag *);
855 void m_tag_delete_chain(struct mbuf *, struct m_tag *);
856 void m_tag_delete_nonpersistent(struct mbuf *);
857 struct m_tag *m_tag_find(struct mbuf *, int, struct m_tag *);
858 struct m_tag *m_tag_copy(struct m_tag *);
859 int m_tag_copy_chain(struct mbuf *, struct mbuf *);
860 void m_tag_init(struct mbuf *);
861 struct m_tag *m_tag_first(struct mbuf *);
862 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
863
864 /* Packet tag types */
865 #define PACKET_TAG_NONE 0 /* Nothing */
866 #define PACKET_TAG_VLAN 1 /* VLAN ID */
867 #define PACKET_TAG_ENCAP 2 /* encapsulation data */
868 #define PACKET_TAG_ESP 3 /* ESP information */
869 #define PACKET_TAG_PF 11 /* packet filter */
870 #define PACKET_TAG_ALTQ_QID 12 /* ALTQ queue id */
871
872 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 16
873 #define PACKET_TAG_IPSEC_IN_DONE 17
874 #define PACKET_TAG_IPSEC_OUT_DONE 18
875 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 19 /* NIC IPsec crypto req'ed */
876 #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 20 /* NIC notifies IPsec */
877 #define PACKET_TAG_IPSEC_PENDING_TDB 21 /* Reminder to do IPsec */
878
879 #define PACKET_TAG_IPSEC_SOCKET 22 /* IPSEC socket ref */
880 #define PACKET_TAG_IPSEC_HISTORY 23 /* IPSEC history */
881
882 #define PACKET_TAG_IPSEC_NAT_T_PORTS 25 /* two uint16_t */
883
884 #define PACKET_TAG_INET6 26 /* IPv6 info */
885
886 #define PACKET_TAG_ECO_RETRYPARMS 27 /* Econet retry parameters */
887
888 #define PACKET_TAG_TUNNEL_INFO 28 /* tunnel identification and
889 * protocol callback, for
890 * loop detection/recovery
891 */
892
893 /*
894 * Return the number of bytes in the mbuf chain, m.
895 */
896 static __inline u_int
897 m_length(const struct mbuf *m)
898 {
899 const struct mbuf *m0;
900 u_int pktlen;
901
902 if ((m->m_flags & M_PKTHDR) != 0)
903 return m->m_pkthdr.len;
904
905 pktlen = 0;
906 for (m0 = m; m0 != NULL; m0 = m0->m_next)
907 pktlen += m0->m_len;
908 return pktlen;
909 }
910
911 void m_print(const struct mbuf *, const char *, void (*)(const char *, ...));
912
913 #endif /* _KERNEL */
914 #endif /* !_SYS_MBUF_H_ */
915
916 #ifdef _KERNEL
917 #ifdef MBTYPES
918 struct malloc_type *mbtypes[] = { /* XXX */
919 M_FREE, /* MT_FREE 0 should be on free list */
920 M_MBUF, /* MT_DATA 1 dynamic (data) allocation */
921 M_MBUF, /* MT_HEADER 2 packet header */
922 M_SONAME, /* MT_SONAME 3 socket name */
923 M_SOOPTS, /* MT_SOOPTS 4 socket options */
924 M_FTABLE, /* MT_FTABLE 5 fragment reassembly header */
925 M_MBUF, /* MT_CONTROL 6 extra-data protocol message */
926 M_MBUF, /* MT_OOBDATA 7 expedited data */
927 };
928 #undef MBTYPES
929 #else
930 extern struct malloc_type *mbtypes[];
931 #endif /* MBTYPES */
932 #endif /* _KERNEL */
Cache object: 1d1d98fd9dc4708c60a3528259611874
|