FreeBSD/Linux Kernel Cross Reference
sys/sys/mbuf.h
1 /* $NetBSD: mbuf.h,v 1.133 2006/11/23 19:41:58 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1999, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and Matt Thomas of 3am Software Foundry.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
69 */
70
71 #ifndef _SYS_MBUF_H_
72 #define _SYS_MBUF_H_
73
74 #ifdef _KERNEL_OPT
75 #include "opt_mbuftrace.h"
76 #endif
77
78 #ifndef M_WAITOK
79 #include <sys/malloc.h>
80 #endif
81 #include <sys/pool.h>
82 #include <sys/queue.h>
83
84 /* For offsetof() */
85 #if defined(_KERNEL) || defined(_STANDALONE)
86 #include <sys/systm.h>
87 #else
88 #include <stddef.h>
89 #endif
90
91 #include <uvm/uvm_param.h> /* for MIN_PAGE_SIZE */
92
93 /*
94 * Mbufs are of a single size, MSIZE (machine/param.h), which
95 * includes overhead. An mbuf may add a single "mbuf cluster" of size
96 * MCLBYTES (also in machine/param.h), which has no additional overhead
97 * and is used instead of the internal data area; this is done when
98 * at least MINCLSIZE of data must be stored.
99 */
100
101 /* Packet tags structure */
102 struct m_tag {
103 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
104 uint16_t m_tag_id; /* Tag ID */
105 uint16_t m_tag_len; /* Length of data */
106 };
107
108 /* mbuf ownership structure */
109 struct mowner {
110 char mo_name[16]; /* owner name (fxp0) */
111 char mo_descr[16]; /* owner description (input) */
112 LIST_ENTRY(mowner) mo_link; /* */
113 u_long mo_claims; /* # of small mbuf claimed */
114 u_long mo_releases; /* # of small mbuf released */
115 u_long mo_cluster_claims; /* # of M_CLUSTER mbuf claimed */
116 u_long mo_cluster_releases; /* # of M_CLUSTER mbuf released */
117 u_long mo_ext_claims; /* # of M_EXT mbuf claimed */
118 u_long mo_ext_releases; /* # of M_EXT mbuf released */
119 };
120
121 #define MOWNER_INIT(x, y) { x, y, { NULL, NULL }, 0, 0, 0, 0, 0, 0 }
122
123 /*
124 * Macros for type conversion
125 * mtod(m,t) - convert mbuf pointer to data pointer of correct type
126 */
127 #define mtod(m,t) ((t)((m)->m_data))
128
129 /* header at beginning of each mbuf: */
130 struct m_hdr {
131 struct mbuf *mh_next; /* next buffer in chain */
132 struct mbuf *mh_nextpkt; /* next chain in queue/record */
133 caddr_t mh_data; /* location of data */
134 struct mowner *mh_owner; /* mbuf owner */
135 int mh_len; /* amount of data in this mbuf */
136 int mh_flags; /* flags; see below */
137 paddr_t mh_paddr; /* physical address of mbuf */
138 short mh_type; /* type of data in this mbuf */
139 };
140
141 /*
142 * record/packet header in first mbuf of chain; valid if M_PKTHDR set
143 *
144 * A note about csum_data: For the out-bound direction, the low 16 bits
145 * indicates the offset after the L4 header where the final L4 checksum value
146 * is to be stored and the high 16 bits is the length of the L3 header (the
147 * start of the data to be checksumed). For the in-bound direction, it is only
148 * valid if the M_CSUM_DATA flag is set. In this case, an L4 checksum has been
149 * calculated by hardware, but it is up to software to perform final
150 * verification.
151 *
152 * Note for in-bound TCP/UDP checksums, we expect the csum_data to NOT
153 * be bit-wise inverted (the final step in the calculation of an IP
154 * checksum) -- this is so we can accumulate the checksum for fragmented
155 * packets during reassembly.
156 */
157 struct pkthdr {
158 struct ifnet *rcvif; /* rcv interface */
159 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
160 int len; /* total packet length */
161 int csum_flags; /* checksum flags */
162 uint32_t csum_data; /* checksum data */
163 u_int segsz; /* segment size */
164 };
165
166 /*
167 * Note: These bits are carefully arrange so that the compiler can have
168 * a prayer of generating a jump table.
169 */
170 #define M_CSUM_TCPv4 0x00000001 /* TCP header/payload */
171 #define M_CSUM_UDPv4 0x00000002 /* UDP header/payload */
172 #define M_CSUM_TCP_UDP_BAD 0x00000004 /* TCP/UDP checksum bad */
173 #define M_CSUM_DATA 0x00000008 /* consult csum_data */
174 #define M_CSUM_TCPv6 0x00000010 /* IPv6 TCP header/payload */
175 #define M_CSUM_UDPv6 0x00000020 /* IPv6 UDP header/payload */
176 #define M_CSUM_IPv4 0x00000040 /* IPv4 header */
177 #define M_CSUM_IPv4_BAD 0x00000080 /* IPv4 header checksum bad */
178 #define M_CSUM_TSOv4 0x00000100 /* TCPv4 segmentation offload */
179 #define M_CSUM_TSOv6 0x00000200 /* TCPv6 segmentation offload */
180
181 /* Checksum-assist quirks: keep separate from jump-table bits. */
182 #define M_CSUM_NO_PSEUDOHDR 0x80000000 /* Rx csum_data does not include
183 * the UDP/TCP pseudo-hdr, and
184 * is not yet 1s-complemented.
185 */
186
187 #define M_CSUM_BITS \
188 "\2\1TCPv4\2UDPv4\3TCP_UDP_BAD\4DATA\5TCPv6\6UDPv6\7IPv4\10IPv4_BAD" \
189 "\11TSOv4\12TSOv6\40NO_PSEUDOHDR"
190
191 /*
192 * Macros for manipulating csum_data on outgoing packets. These are
193 * used to pass information down from the L4/L3 to the L2.
194 */
195 #define M_CSUM_DATA_IPv4_IPHL(x) ((x) >> 16)
196 #define M_CSUM_DATA_IPv4_OFFSET(x) ((x) & 0xffff)
197
198 /*
199 * Macros for M_CSUM_TCPv6 and M_CSUM_UDPv6
200 *
201 * M_CSUM_DATA_IPv6_HL: length of ip6_hdr + ext header.
202 * ie. offset of UDP/TCP header in the packet.
203 *
204 * M_CSUM_DATA_IPv6_OFFSET: offset of the checksum field in UDP/TCP header.
205 */
206
207 #define M_CSUM_DATA_IPv6_HL(x) ((x) >> 16)
208 #define M_CSUM_DATA_IPv6_HL_SET(x, v) (x) = ((x) & 0xffff) | ((v) << 16)
209 #define M_CSUM_DATA_IPv6_OFFSET(x) ((x) & 0xffff)
210
211 /*
212 * Max # of pages we can attach to m_ext. This is carefully chosen
213 * to be able to handle SOSEND_LOAN_CHUNK with our minimum sized page.
214 */
215 #ifdef MIN_PAGE_SIZE
216 #define M_EXT_MAXPAGES ((65536 / MIN_PAGE_SIZE) + 1)
217 #endif
218
219 /* description of external storage mapped into mbuf, valid if M_EXT set */
220 struct _m_ext {
221 caddr_t ext_buf; /* start of buffer */
222 void (*ext_free) /* free routine if not the usual */
223 (struct mbuf *, caddr_t, size_t, void *);
224 void *ext_arg; /* argument for ext_free */
225 size_t ext_size; /* size of buffer, for ext_free */
226 struct malloc_type *ext_type; /* malloc type */
227 struct mbuf *ext_nextref;
228 struct mbuf *ext_prevref;
229 union {
230 paddr_t extun_paddr; /* physical address (M_EXT_CLUSTER) */
231 /* pages (M_EXT_PAGES) */
232 /*
233 * XXX This is gross, but it doesn't really matter; this is
234 * XXX overlaid on top of the mbuf data area.
235 */
236 #ifdef M_EXT_MAXPAGES
237 struct vm_page *extun_pgs[M_EXT_MAXPAGES];
238 #endif
239 } ext_un;
240 #define ext_paddr ext_un.extun_paddr
241 #define ext_pgs ext_un.extun_pgs
242 #ifdef DEBUG
243 const char *ext_ofile;
244 const char *ext_nfile;
245 int ext_oline;
246 int ext_nline;
247 #endif
248 };
249
250 #define M_PADDR_INVALID POOL_PADDR_INVALID
251
252 /*
253 * Definition of "struct mbuf".
254 * Don't change this without understanding how MHLEN/MLEN are defined.
255 */
256 #define MBUF_DEFINE(name, mhlen, mlen) \
257 struct name { \
258 struct m_hdr m_hdr; \
259 union { \
260 struct { \
261 struct pkthdr MH_pkthdr; \
262 union { \
263 struct _m_ext MH_ext; \
264 char MH_databuf[(mhlen)]; \
265 } MH_dat; \
266 } MH; \
267 char M_databuf[(mlen)]; \
268 } M_dat; \
269 }
270 #define m_next m_hdr.mh_next
271 #define m_len m_hdr.mh_len
272 #define m_data m_hdr.mh_data
273 #define m_owner m_hdr.mh_owner
274 #define m_type m_hdr.mh_type
275 #define m_flags m_hdr.mh_flags
276 #define m_nextpkt m_hdr.mh_nextpkt
277 #define m_paddr m_hdr.mh_paddr
278 #define m_pkthdr M_dat.MH.MH_pkthdr
279 #define m_ext M_dat.MH.MH_dat.MH_ext
280 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
281 #define m_dat M_dat.M_databuf
282
283 /*
284 * Dummy mbuf structure to calculate the right values for MLEN/MHLEN, taking
285 * into account inter-structure padding.
286 */
287 MBUF_DEFINE(_mbuf_dummy, 1, 1);
288
289 /* normal data len */
290 #define MLEN (MSIZE - offsetof(struct _mbuf_dummy, m_dat))
291 /* data len w/pkthdr */
292 #define MHLEN (MSIZE - offsetof(struct _mbuf_dummy, m_pktdat))
293
294 #define MINCLSIZE (MHLEN+MLEN+1) /* smallest amount to put in cluster */
295 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
296
297 /*
298 * The *real* struct mbuf
299 */
300 MBUF_DEFINE(mbuf, MHLEN, MLEN);
301
302 /* mbuf flags */
303 #define M_EXT 0x0001 /* has associated external storage */
304 #define M_PKTHDR 0x0002 /* start of record */
305 #define M_EOR 0x0004 /* end of record */
306 #define M_PROTO1 0x0008 /* protocol-specific */
307
308 /* mbuf pkthdr flags, also in m_flags */
309 #define M_AUTHIPHDR 0x0010 /* data origin authentication for IP header */
310 #define M_DECRYPTED 0x0020 /* confidentiality */
311 #define M_LOOP 0x0040 /* for Mbuf statistics */
312 #define M_AUTHIPDGM 0x0080 /* data origin authentication */
313 #define M_BCAST 0x0100 /* send/received as link-level broadcast */
314 #define M_MCAST 0x0200 /* send/received as link-level multicast */
315 #define M_CANFASTFWD 0x0400 /* used by filters to indicate packet can
316 be fast-forwarded */
317 #define M_ANYCAST6 0x00800 /* received as IPv6 anycast */
318 #define M_LINK0 0x01000 /* link layer specific flag */
319 #define M_LINK1 0x02000 /* link layer specific flag */
320 #define M_LINK2 0x04000 /* link layer specific flag */
321 #define M_LINK3 0x08000 /* link layer specific flag */
322 #define M_LINK4 0x10000 /* link layer specific flag */
323 #define M_LINK5 0x20000 /* link layer specific flag */
324 #define M_LINK6 0x40000 /* link layer specific flag */
325
326 /* additional flags for M_EXT mbufs */
327 #define M_EXT_FLAGS 0xff000000
328 #define M_EXT_CLUSTER 0x01000000 /* ext is a cluster */
329 #define M_EXT_PAGES 0x02000000 /* ext_pgs is valid */
330 #define M_EXT_ROMAP 0x04000000 /* ext mapping is r-o at MMU */
331 #define M_EXT_RW 0x08000000 /* ext storage is writable */
332
333 /* for source-level compatibility */
334 #define M_CLUSTER M_EXT_CLUSTER
335
336 #define M_FLAGS_BITS \
337 "\2\1EXT\2PKTHDR\3EOR\4PROTO1\5AUTHIPHDR\6DECRYPTED\7LOOP\10AUTHIPDGM" \
338 "\11BCAST\12MCAST\13CANFASTFWD\14ANYCAST6\15LINK0\16LINK1\17LINK2\20LINK3" \
339 "\31EXT_CLUSTER\32EXT_PAGES\33EXT_ROMAP\34EXT_RW"
340
341 /* flags copied when copying m_pkthdr */
342 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_BCAST|M_MCAST|M_CANFASTFWD|M_ANYCAST6|M_LINK0|M_LINK1|M_LINK2|M_AUTHIPHDR|M_DECRYPTED|M_LOOP|M_AUTHIPDGM)
343
344 /* flag copied when shallow-copying external storage */
345 #define M_EXTCOPYFLAGS (M_EXT|M_EXT_FLAGS)
346
347 /* mbuf types */
348 #define MT_FREE 0 /* should be on free list */
349 #define MT_DATA 1 /* dynamic (data) allocation */
350 #define MT_HEADER 2 /* packet header */
351 #define MT_SONAME 3 /* socket name */
352 #define MT_SOOPTS 4 /* socket options */
353 #define MT_FTABLE 5 /* fragment reassembly header */
354 #define MT_CONTROL 6 /* extra-data protocol message */
355 #define MT_OOBDATA 7 /* expedited data */
356
357 /* flags to m_get/MGET */
358 #define M_DONTWAIT M_NOWAIT
359 #define M_WAIT M_WAITOK
360
361 /*
362 * mbuf utility macros:
363 *
364 * MBUFLOCK(code)
365 * prevents a section of code from from being interrupted by network
366 * drivers.
367 */
368 #define MBUFLOCK(code) \
369 do { \
370 int _ms = splvm(); \
371 { code } \
372 splx(_ms); \
373 } while (/* CONSTCOND */ 0)
374
375 #ifdef MBUFTRACE
376 /*
377 * mbuf allocation tracing macros
378 *
379 */
380 #define _MOWNERINIT(m, type) \
381 ((m)->m_owner = &unknown_mowners[(type)], (m)->m_owner->mo_claims++)
382
383 #define _MOWNERREF(m, flags) do { \
384 if ((flags) & M_EXT) \
385 (m)->m_owner->mo_ext_claims++; \
386 if ((flags) & M_CLUSTER) \
387 (m)->m_owner->mo_cluster_claims++; \
388 } while (/* CONSTCOND */ 0)
389
390 #define MOWNERREF(m, flags) MBUFLOCK( _MOWNERREF((m), (flags)); );
391
392 #define _MOWNERREVOKE(m, all, flags) do { \
393 if ((flags) & M_EXT) \
394 (m)->m_owner->mo_ext_releases++; \
395 if ((flags) & M_CLUSTER) \
396 (m)->m_owner->mo_cluster_releases++; \
397 if (all) { \
398 (m)->m_owner->mo_releases++; \
399 (m)->m_owner = &revoked_mowner; \
400 } \
401 } while (/* CONSTCOND */ 0)
402
403 #define _MOWNERCLAIM(m, mowner) do { \
404 (m)->m_owner = (mowner); \
405 (mowner)->mo_claims++; \
406 if ((m)->m_flags & M_EXT) \
407 (mowner)->mo_ext_claims++; \
408 if ((m)->m_flags & M_CLUSTER) \
409 (mowner)->mo_cluster_claims++; \
410 } while (/* CONSTCOND */ 0)
411
412 #define MCLAIM(m, mowner) \
413 MBUFLOCK( \
414 if ((m)->m_owner != (mowner) && (mowner) != NULL) { \
415 _MOWNERREVOKE((m), 1, (m)->m_flags); \
416 _MOWNERCLAIM((m), (mowner)); \
417 } \
418 )
419
420 #define MOWNER_ATTACH(mo) LIST_INSERT_HEAD(&mowners, (mo), mo_link)
421 #define MOWNER_DETACH(mo) LIST_REMOVE((mo), mo_link)
422 #define MBUFTRACE_ASSERT(cond) KASSERT(cond)
423 #else
424 #define _MOWNERINIT(m, type) do { } while (/* CONSTCOND */ 0)
425 #define _MOWNERREF(m, flags) do { } while (/* CONSTCOND */ 0)
426 #define MOWNERREF(m, flags) do { } while (/* CONSTCOND */ 0)
427 #define _MOWNERREVOKE(m, all, flags) do { } while (/* CONSTCOND */ 0)
428 #define _MOWNERCLAIM(m, mowner) do { } while (/* CONSTCOND */ 0)
429 #define MCLAIM(m, mowner) do { } while (/* CONSTCOND */ 0)
430 #define MOWNER_ATTACH(mo) do { } while (/* CONSTCOND */ 0)
431 #define MOWNER_DETACH(mo) do { } while (/* CONSTCOND */ 0)
432 #define m_claimm(m, mo) do { } while (/* CONSTCOND */ 0)
433 #define MBUFTRACE_ASSERT(cond) do { } while (/* CONSTCOND */ 0)
434 #endif
435
436
437 /*
438 * mbuf allocation/deallocation macros:
439 *
440 * MGET(struct mbuf *m, int how, int type)
441 * allocates an mbuf and initializes it to contain internal data.
442 *
443 * MGETHDR(struct mbuf *m, int how, int type)
444 * allocates an mbuf and initializes it to contain a packet header
445 * and internal data.
446 *
447 * If 'how' is M_WAIT, these macros (and the corresponding functions)
448 * are guaranteed to return successfully.
449 */
450 #define MGET(m, how, type) \
451 MBUFLOCK( \
452 (m) = pool_cache_get(&mbpool_cache, \
453 (how) == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0); \
454 if (m) { \
455 mbstat.m_mtypes[type]++; \
456 _MOWNERINIT((m), (type)); \
457 (m)->m_type = (type); \
458 (m)->m_next = (struct mbuf *)NULL; \
459 (m)->m_nextpkt = (struct mbuf *)NULL; \
460 (m)->m_data = (m)->m_dat; \
461 (m)->m_flags = 0; \
462 } \
463 )
464
465 #define MGETHDR(m, how, type) \
466 MBUFLOCK( \
467 (m) = pool_cache_get(&mbpool_cache, \
468 (how) == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0); \
469 if (m) { \
470 mbstat.m_mtypes[type]++; \
471 _MOWNERINIT((m), (type)); \
472 (m)->m_type = (type); \
473 (m)->m_next = (struct mbuf *)NULL; \
474 (m)->m_nextpkt = (struct mbuf *)NULL; \
475 (m)->m_data = (m)->m_pktdat; \
476 (m)->m_flags = M_PKTHDR; \
477 (m)->m_pkthdr.rcvif = NULL; \
478 (m)->m_pkthdr.csum_flags = 0; \
479 (m)->m_pkthdr.csum_data = 0; \
480 SLIST_INIT(&(m)->m_pkthdr.tags); \
481 } \
482 )
483
484 #if defined(_KERNEL)
485 #define _M_
486 /*
487 * Macros for tracking external storage associated with an mbuf.
488 *
489 * Note: add and delete reference must be called at splvm().
490 */
491 #ifdef DEBUG
492 #define MCLREFDEBUGN(m, file, line) \
493 do { \
494 (m)->m_ext.ext_nfile = (file); \
495 (m)->m_ext.ext_nline = (line); \
496 } while (/* CONSTCOND */ 0)
497
498 #define MCLREFDEBUGO(m, file, line) \
499 do { \
500 (m)->m_ext.ext_ofile = (file); \
501 (m)->m_ext.ext_oline = (line); \
502 } while (/* CONSTCOND */ 0)
503 #else
504 #define MCLREFDEBUGN(m, file, line)
505 #define MCLREFDEBUGO(m, file, line)
506 #endif
507
508 #define MCLBUFREF(p)
509 #define MCLISREFERENCED(m) ((m)->m_ext.ext_nextref != (m))
510 #define _MCLDEREFERENCE(m) \
511 do { \
512 (m)->m_ext.ext_nextref->m_ext.ext_prevref = \
513 (m)->m_ext.ext_prevref; \
514 (m)->m_ext.ext_prevref->m_ext.ext_nextref = \
515 (m)->m_ext.ext_nextref; \
516 } while (/* CONSTCOND */ 0)
517
518 #define _MCLADDREFERENCE(o, n) \
519 do { \
520 (n)->m_flags |= ((o)->m_flags & M_EXTCOPYFLAGS); \
521 (n)->m_ext.ext_nextref = (o)->m_ext.ext_nextref; \
522 (n)->m_ext.ext_prevref = (o); \
523 (o)->m_ext.ext_nextref = (n); \
524 (n)->m_ext.ext_nextref->m_ext.ext_prevref = (n); \
525 _MOWNERREF((n), (n)->m_flags); \
526 MCLREFDEBUGN((n), __FILE__, __LINE__); \
527 } while (/* CONSTCOND */ 0)
528
529 #define MCLINITREFERENCE(m) \
530 do { \
531 (m)->m_ext.ext_prevref = (m); \
532 (m)->m_ext.ext_nextref = (m); \
533 MCLREFDEBUGO((m), __FILE__, __LINE__); \
534 MCLREFDEBUGN((m), NULL, 0); \
535 } while (/* CONSTCOND */ 0)
536
537 #define MCLADDREFERENCE(o, n) MBUFLOCK(_MCLADDREFERENCE((o), (n));)
538
539 /*
540 * Macros for mbuf external storage.
541 *
542 * MCLGET allocates and adds an mbuf cluster to a normal mbuf;
543 * the flag M_EXT is set upon success.
544 *
545 * MEXTMALLOC allocates external storage and adds it to
546 * a normal mbuf; the flag M_EXT is set upon success.
547 *
548 * MEXTADD adds pre-allocated external storage to
549 * a normal mbuf; the flag M_EXT is set upon success.
550 */
551 #define _MCLGET(m, pool_cache, size, how) \
552 do { \
553 MBUFLOCK( \
554 (m)->m_ext.ext_buf = \
555 pool_cache_get_paddr((pool_cache), \
556 (how) == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : 0, \
557 &(m)->m_ext.ext_paddr); \
558 if ((m)->m_ext.ext_buf != NULL) \
559 _MOWNERREF((m), M_EXT|M_CLUSTER); \
560 ); \
561 if ((m)->m_ext.ext_buf != NULL) { \
562 (m)->m_data = (m)->m_ext.ext_buf; \
563 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
564 M_EXT|M_CLUSTER|M_EXT_RW; \
565 (m)->m_ext.ext_size = (size); \
566 (m)->m_ext.ext_free = NULL; \
567 (m)->m_ext.ext_arg = (pool_cache); \
568 /* ext_paddr initialized above */ \
569 MCLINITREFERENCE(m); \
570 } \
571 } while (/* CONSTCOND */ 0)
572
573 /*
574 * The standard mbuf cluster pool.
575 */
576 #define MCLGET(m, how) _MCLGET((m), &mclpool_cache, MCLBYTES, (how))
577
578 #define MEXTMALLOC(m, size, how) \
579 do { \
580 (m)->m_ext.ext_buf = \
581 (caddr_t)malloc((size), mbtypes[(m)->m_type], (how)); \
582 if ((m)->m_ext.ext_buf != NULL) { \
583 (m)->m_data = (m)->m_ext.ext_buf; \
584 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
585 M_EXT|M_EXT_RW; \
586 (m)->m_ext.ext_size = (size); \
587 (m)->m_ext.ext_free = NULL; \
588 (m)->m_ext.ext_arg = NULL; \
589 (m)->m_ext.ext_type = mbtypes[(m)->m_type]; \
590 MCLINITREFERENCE(m); \
591 MOWNERREF((m), M_EXT); \
592 } \
593 } while (/* CONSTCOND */ 0)
594
595 #define MEXTADD(m, buf, size, type, free, arg) \
596 do { \
597 (m)->m_data = (m)->m_ext.ext_buf = (caddr_t)(buf); \
598 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | M_EXT; \
599 (m)->m_ext.ext_size = (size); \
600 (m)->m_ext.ext_free = (free); \
601 (m)->m_ext.ext_arg = (arg); \
602 (m)->m_ext.ext_type = (type); \
603 MCLINITREFERENCE(m); \
604 MOWNERREF((m), M_EXT); \
605 } while (/* CONSTCOND */ 0)
606
607 #define MEXTREMOVE(m) \
608 do { \
609 int _ms_ = splvm(); /* MBUFLOCK */ \
610 _MOWNERREVOKE((m), 0, (m)->m_flags); \
611 m_ext_free(m, FALSE); \
612 splx(_ms_); \
613 (m)->m_flags &= ~M_EXTCOPYFLAGS; \
614 (m)->m_ext.ext_size = 0; /* why ??? */ \
615 } while (/* CONSTCOND */ 0)
616
617 /*
618 * Reset the data pointer on an mbuf.
619 */
620 #define MRESETDATA(m) \
621 do { \
622 if ((m)->m_flags & M_EXT) \
623 (m)->m_data = (m)->m_ext.ext_buf; \
624 else if ((m)->m_flags & M_PKTHDR) \
625 (m)->m_data = (m)->m_pktdat; \
626 else \
627 (m)->m_data = (m)->m_dat; \
628 } while (/* CONSTCOND */ 0)
629
630 /*
631 * MFREE(struct mbuf *m, struct mbuf *n)
632 * Free a single mbuf and associated external storage.
633 * Place the successor, if any, in n.
634 */
635 #define MFREE(m, n) \
636 MBUFLOCK( \
637 mbstat.m_mtypes[(m)->m_type]--; \
638 if ((m)->m_flags & M_PKTHDR) \
639 m_tag_delete_chain((m), NULL); \
640 (n) = (m)->m_next; \
641 _MOWNERREVOKE((m), 1, m->m_flags); \
642 if ((m)->m_flags & M_EXT) { \
643 m_ext_free(m, TRUE); \
644 } else { \
645 pool_cache_put(&mbpool_cache, (m)); \
646 } \
647 )
648
649 /*
650 * Copy mbuf pkthdr from `from' to `to'.
651 * `from' must have M_PKTHDR set, and `to' must be empty.
652 */
653 #define M_COPY_PKTHDR(to, from) \
654 do { \
655 (to)->m_pkthdr = (from)->m_pkthdr; \
656 (to)->m_flags = (from)->m_flags & M_COPYFLAGS; \
657 SLIST_INIT(&(to)->m_pkthdr.tags); \
658 m_tag_copy_chain((to), (from)); \
659 (to)->m_data = (to)->m_pktdat; \
660 } while (/* CONSTCOND */ 0)
661
662 /*
663 * Move mbuf pkthdr from `from' to `to'.
664 * `from' must have M_PKTHDR set, and `to' must be empty.
665 */
666 #define M_MOVE_PKTHDR(to, from) m_move_pkthdr(to, from)
667
668 /*
669 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
670 * an object of the specified size at the end of the mbuf, longword aligned.
671 */
672 #define M_ALIGN(m, len) \
673 do { \
674 (m)->m_data += (MLEN - (len)) &~ (sizeof(long) - 1); \
675 } while (/* CONSTCOND */ 0)
676
677 /*
678 * As above, for mbufs allocated with m_gethdr/MGETHDR
679 * or initialized by M_COPY_PKTHDR.
680 */
681 #define MH_ALIGN(m, len) \
682 do { \
683 (m)->m_data += (MHLEN - (len)) &~ (sizeof(long) - 1); \
684 } while (/* CONSTCOND */ 0)
685
686 /*
687 * Determine if an mbuf's data area is read-only. This is true
688 * if external storage is read-only mapped, or not marked as R/W,
689 * or referenced by more than one mbuf.
690 */
691 #define M_READONLY(m) \
692 (((m)->m_flags & M_EXT) != 0 && \
693 (((m)->m_flags & (M_EXT_ROMAP|M_EXT_RW)) != M_EXT_RW || \
694 MCLISREFERENCED(m)))
695
696 #define M_UNWRITABLE(__m, __len) \
697 ((__m)->m_len < (__len) || M_READONLY((__m)))
698 /*
699 * Determine if an mbuf's data area is read-only at the MMU.
700 */
701 #define M_ROMAP(m) \
702 (((m)->m_flags & (M_EXT|M_EXT_ROMAP)) == (M_EXT|M_EXT_ROMAP))
703
704 /*
705 * Compute the amount of space available
706 * before the current start of data in an mbuf.
707 */
708 #define _M_LEADINGSPACE(m) \
709 ((m)->m_flags & M_EXT ? (m)->m_data - (m)->m_ext.ext_buf : \
710 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
711 (m)->m_data - (m)->m_dat)
712
713 #define M_LEADINGSPACE(m) \
714 (M_READONLY((m)) ? 0 : _M_LEADINGSPACE((m)))
715
716 /*
717 * Compute the amount of space available
718 * after the end of data in an mbuf.
719 */
720 #define _M_TRAILINGSPACE(m) \
721 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size - \
722 ((m)->m_data + (m)->m_len) : \
723 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
724
725 #define M_TRAILINGSPACE(m) \
726 (M_READONLY((m)) ? 0 : _M_TRAILINGSPACE((m)))
727
728 /*
729 * Compute the address of an mbuf's data area.
730 */
731 #define M_BUFADDR(m) \
732 (((m)->m_flags & M_PKTHDR) ? (m)->m_pktdat : (m)->m_dat)
733
734 /*
735 * Compute the offset of the beginning of the data buffer of a non-ext
736 * mbuf.
737 */
738 #define M_BUFOFFSET(m) \
739 (((m)->m_flags & M_PKTHDR) ? \
740 offsetof(struct mbuf, m_pktdat) : offsetof(struct mbuf, m_dat))
741
742 /*
743 * Arrange to prepend space of size plen to mbuf m.
744 * If a new mbuf must be allocated, how specifies whether to wait.
745 * If how is M_DONTWAIT and allocation fails, the original mbuf chain
746 * is freed and m is set to NULL.
747 */
748 #define M_PREPEND(m, plen, how) \
749 do { \
750 if (M_LEADINGSPACE(m) >= (plen)) { \
751 (m)->m_data -= (plen); \
752 (m)->m_len += (plen); \
753 } else \
754 (m) = m_prepend((m), (plen), (how)); \
755 if ((m) && (m)->m_flags & M_PKTHDR) \
756 (m)->m_pkthdr.len += (plen); \
757 } while (/* CONSTCOND */ 0)
758
759 /* change mbuf to new type */
760 #define MCHTYPE(m, t) \
761 do { \
762 MBUFLOCK(mbstat.m_mtypes[(m)->m_type]--; mbstat.m_mtypes[t]++;); \
763 (m)->m_type = t; \
764 } while (/* CONSTCOND */ 0)
765
766 /* length to m_copy to copy all */
767 #define M_COPYALL 1000000000
768
769 /* compatibility with 4.3 */
770 #define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
771
772 /*
773 * Allow drivers and/or protocols to use the rcvif member of
774 * PKTHDR mbufs to store private context information.
775 */
776 #define M_GETCTX(m, t) ((t)(m)->m_pkthdr.rcvif)
777 #define M_SETCTX(m, c) ((void)((m)->m_pkthdr.rcvif = (void *)(c)))
778
779 #endif /* defined(_KERNEL) */
780
781 /*
782 * Simple mbuf queueing system
783 *
784 * this is basically a SIMPLEQ adapted to mbuf use (ie using
785 * m_nextpkt instead of field.sqe_next).
786 *
787 * m_next is ignored, so queueing chains of mbufs is possible
788 */
789 #define MBUFQ_HEAD(name) \
790 struct name { \
791 struct mbuf *mq_first; \
792 struct mbuf **mq_last; \
793 }
794
795 #define MBUFQ_INIT(q) do { \
796 (q)->mq_first = NULL; \
797 (q)->mq_last = &(q)->mq_first; \
798 } while (/*CONSTCOND*/0)
799
800 #define MBUFQ_ENQUEUE(q, m) do { \
801 (m)->m_nextpkt = NULL; \
802 *(q)->mq_last = (m); \
803 (q)->mq_last = &(m)->m_nextpkt; \
804 } while (/*CONSTCOND*/0)
805
806 #define MBUFQ_PREPEND(q, m) do { \
807 if (((m)->m_nextpkt = (q)->mq_first) == NULL) \
808 (q)->mq_last = &(m)->m_nextpkt; \
809 (q)->mq_first = (m); \
810 } while (/*CONSTCOND*/0)
811
812 #define MBUFQ_DEQUEUE(q, m) do { \
813 if (((m) = (q)->mq_first) != NULL) { \
814 if (((q)->mq_first = (m)->m_nextpkt) == NULL) \
815 (q)->mq_last = &(q)->mq_first; \
816 else \
817 (m)->m_nextpkt = NULL; \
818 } \
819 } while (/*CONSTCOND*/0)
820
821 #define MBUFQ_DRAIN(q) do { \
822 struct mbuf *__m0; \
823 while ((__m0 = (q)->mq_first) != NULL) { \
824 (q)->mq_first = __m0->m_nextpkt; \
825 m_freem(__m0); \
826 } \
827 (q)->mq_last = &(q)->mq_first; \
828 } while (/*CONSTCOND*/0)
829
830 #define MBUFQ_FIRST(q) ((q)->mq_first)
831 #define MBUFQ_NEXT(m) ((m)->m_nextpkt)
832 #define MBUFQ_LAST(q) (*(q)->mq_last)
833
834 /*
835 * Mbuf statistics.
836 * For statistics related to mbuf and cluster allocations, see also the
837 * pool headers (mbpool and mclpool).
838 */
839 struct mbstat {
840 u_long _m_spare; /* formerly m_mbufs */
841 u_long _m_spare1; /* formerly m_clusters */
842 u_long _m_spare2; /* spare field */
843 u_long _m_spare3; /* formely m_clfree - free clusters */
844 u_long m_drops; /* times failed to find space */
845 u_long m_wait; /* times waited for space */
846 u_long m_drain; /* times drained protocols for space */
847 u_short m_mtypes[256]; /* type specific mbuf allocations */
848 };
849
850 /*
851 * Mbuf sysctl variables.
852 */
853 #define MBUF_MSIZE 1 /* int: mbuf base size */
854 #define MBUF_MCLBYTES 2 /* int: mbuf cluster size */
855 #define MBUF_NMBCLUSTERS 3 /* int: limit on the # of clusters */
856 #define MBUF_MBLOWAT 4 /* int: mbuf low water mark */
857 #define MBUF_MCLLOWAT 5 /* int: mbuf cluster low water mark */
858 #define MBUF_STATS 6 /* struct: mbstat */
859 #define MBUF_MOWNERS 7 /* struct: m_owner[] */
860 #define MBUF_MAXID 8 /* number of valid MBUF ids */
861
862 #define CTL_MBUF_NAMES { \
863 { 0, 0 }, \
864 { "msize", CTLTYPE_INT }, \
865 { "mclbytes", CTLTYPE_INT }, \
866 { "nmbclusters", CTLTYPE_INT }, \
867 { "mblowat", CTLTYPE_INT }, \
868 { "mcllowat", CTLTYPE_INT }, \
869 { 0 /* "stats" */, CTLTYPE_STRUCT }, \
870 { 0 /* "mowners" */, CTLTYPE_STRUCT }, \
871 }
872
873 #ifdef _KERNEL
874 extern struct mbstat mbstat;
875 extern int nmbclusters; /* limit on the # of clusters */
876 extern int mblowat; /* mbuf low water mark */
877 extern int mcllowat; /* mbuf cluster low water mark */
878 extern int max_linkhdr; /* largest link-level header */
879 extern int max_protohdr; /* largest protocol header */
880 extern int max_hdr; /* largest link+protocol header */
881 extern int max_datalen; /* MHLEN - max_hdr */
882 extern const int msize; /* mbuf base size */
883 extern const int mclbytes; /* mbuf cluster size */
884 extern struct pool mbpool;
885 extern struct pool mclpool;
886 extern struct pool_cache mbpool_cache;
887 extern struct pool_cache mclpool_cache;
888 #ifdef MBUFTRACE
889 LIST_HEAD(mownerhead, mowner);
890 extern struct mownerhead mowners;
891 extern struct mowner unknown_mowners[];
892 extern struct mowner revoked_mowner;
893 #endif
894
895 MALLOC_DECLARE(M_MBUF);
896 MALLOC_DECLARE(M_SONAME);
897 MALLOC_DECLARE(M_SOOPTS);
898
899 struct mbuf *m_copym(struct mbuf *, int, int, int);
900 struct mbuf *m_copypacket(struct mbuf *, int);
901 struct mbuf *m_devget(char *, int, int, struct ifnet *,
902 void (*copy)(const void *, void *, size_t));
903 struct mbuf *m_dup(struct mbuf *, int, int, int);
904 struct mbuf *m_free(struct mbuf *);
905 struct mbuf *m_get(int, int);
906 struct mbuf *m_getclr(int, int);
907 struct mbuf *m_gethdr(int, int);
908 struct mbuf *m_prepend(struct mbuf *,int, int);
909 struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
910 struct mbuf *m_pullup(struct mbuf *, int);
911 struct mbuf *m_copyup(struct mbuf *, int, int);
912 struct mbuf *m_split(struct mbuf *,int, int);
913 struct mbuf *m_getptr(struct mbuf *, int, int *);
914 void m_adj(struct mbuf *, int);
915 int m_apply(struct mbuf *, int, int,
916 int (*)(void *, caddr_t, unsigned int), void *);
917 void m_cat(struct mbuf *,struct mbuf *);
918 #ifdef MBUFTRACE
919 void m_claimm(struct mbuf *, struct mowner *);
920 #endif
921 void m_clget(struct mbuf *, int);
922 int m_mballoc(int, int);
923 void m_copyback(struct mbuf *, int, int, const void *);
924 struct mbuf *m_copyback_cow(struct mbuf *, int, int, const void *, int);
925 int m_makewritable(struct mbuf **, int, int, int);
926 struct mbuf *m_getcl(int, int, int);
927 void m_copydata(struct mbuf *, int, int, void *);
928 void m_freem(struct mbuf *);
929 void m_reclaim(void *, int);
930 void mbinit(void);
931 void m_move_pkthdr(struct mbuf *to, struct mbuf *from);
932
933 /* Inline routines. */
934 static __inline u_int m_length(struct mbuf *) __unused;
935 static __inline void m_ext_free(struct mbuf *, boolean_t) __unused;
936
937 /* Packet tag routines */
938 struct m_tag *m_tag_get(int, int, int);
939 void m_tag_free(struct m_tag *);
940 void m_tag_prepend(struct mbuf *, struct m_tag *);
941 void m_tag_unlink(struct mbuf *, struct m_tag *);
942 void m_tag_delete(struct mbuf *, struct m_tag *);
943 void m_tag_delete_chain(struct mbuf *, struct m_tag *);
944 void m_tag_delete_nonpersistent(struct mbuf *);
945 struct m_tag *m_tag_find(struct mbuf *, int, struct m_tag *);
946 struct m_tag *m_tag_copy(struct m_tag *);
947 int m_tag_copy_chain(struct mbuf *, struct mbuf *);
948 void m_tag_init(struct mbuf *);
949 struct m_tag *m_tag_first(struct mbuf *);
950 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
951
952 /* Packet tag types */
953 #define PACKET_TAG_NONE 0 /* Nothing */
954 #define PACKET_TAG_VLAN 1 /* VLAN ID */
955 #define PACKET_TAG_ENCAP 2 /* encapsulation data */
956 #define PACKET_TAG_ESP 3 /* ESP information */
957 #define PACKET_TAG_PF_GENERATED 11 /* PF generated, pass always */
958 #define PACKET_TAG_PF_ROUTED 12 /* PF routed, no route loops */
959 #define PACKET_TAG_PF_FRAGCACHE 13 /* PF fragment cached */
960 #define PACKET_TAG_PF_QID 14 /* PF queue id */
961 #define PACKET_TAG_PF_TAG 15 /* PF tags */
962
963 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 16
964 #define PACKET_TAG_IPSEC_IN_DONE 17
965 #define PACKET_TAG_IPSEC_OUT_DONE 18
966 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 19 /* NIC IPsec crypto req'ed */
967 #define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 20 /* NIC notifies IPsec */
968 #define PACKET_TAG_IPSEC_PENDING_TDB 21 /* Reminder to do IPsec */
969
970 #define PACKET_TAG_IPSEC_SOCKET 22 /* IPSEC socket ref */
971 #define PACKET_TAG_IPSEC_HISTORY 23 /* IPSEC history */
972
973 #define PACKET_TAG_PF_TRANSLATE_LOCALHOST 24 /* translated to localhost */
974 #define PACKET_TAG_IPSEC_NAT_T_PORTS 25 /* two uint16_t */
975
976 #define PACKET_TAG_INET6 26 /* IPv6 info */
977
978 #define PACKET_TAG_ECO_RETRYPARMS 27 /* Econet retry parameters */
979
980 /*
981 * Return the number of bytes in the mbuf chain, m.
982 */
983 static __inline u_int
984 m_length(struct mbuf *m)
985 {
986 struct mbuf *m0;
987 u_int pktlen;
988
989 if ((m->m_flags & M_PKTHDR) != 0)
990 return m->m_pkthdr.len;
991
992 pktlen = 0;
993 for (m0 = m; m0 != NULL; m0 = m0->m_next)
994 pktlen += m0->m_len;
995 return pktlen;
996 }
997
998 /*
999 * m_ext_free: release a reference to the mbuf external storage.
1000 *
1001 * => if 'dofree', free the mbuf m itsself as well.
1002 * => called at splvm.
1003 */
1004 static __inline void
1005 m_ext_free(struct mbuf *m, boolean_t dofree)
1006 {
1007
1008 if (MCLISREFERENCED(m)) {
1009 _MCLDEREFERENCE(m);
1010 } else if (m->m_flags & M_CLUSTER) {
1011 pool_cache_put_paddr((struct pool_cache *)m->m_ext.ext_arg,
1012 m->m_ext.ext_buf, m->m_ext.ext_paddr);
1013 } else if (m->m_ext.ext_free) {
1014 (*m->m_ext.ext_free)(dofree ? m : NULL, m->m_ext.ext_buf,
1015 m->m_ext.ext_size, m->m_ext.ext_arg);
1016 dofree = FALSE;
1017 } else {
1018 free(m->m_ext.ext_buf, m->m_ext.ext_type);
1019 }
1020 if (dofree)
1021 pool_cache_put(&mbpool_cache, m);
1022 }
1023
1024 void m_print(const struct mbuf *, const char *, void (*)(const char *, ...));
1025
1026 #endif /* _KERNEL */
1027 #endif /* !_SYS_MBUF_H_ */
1028
1029 #ifdef _KERNEL
1030 #ifdef MBTYPES
1031 struct malloc_type *mbtypes[] = { /* XXX */
1032 M_FREE, /* MT_FREE 0 should be on free list */
1033 M_MBUF, /* MT_DATA 1 dynamic (data) allocation */
1034 M_MBUF, /* MT_HEADER 2 packet header */
1035 M_SONAME, /* MT_SONAME 3 socket name */
1036 M_SOOPTS, /* MT_SOOPTS 4 socket options */
1037 M_FTABLE, /* MT_FTABLE 5 fragment reassembly header */
1038 M_MBUF, /* MT_CONTROL 6 extra-data protocol message */
1039 M_MBUF, /* MT_OOBDATA 7 expedited data */
1040 };
1041 #undef MBTYPES
1042 #else
1043 extern struct malloc_type *mbtypes[];
1044 #endif /* MBTYPES */
1045 #endif /* _KERNEL */
Cache object: 05e777146128794f0a0583f81d3985bc
|