1 /* $FreeBSD$ */
2 /* $KAME: esp_input.c,v 1.62 2002/01/07 11:39:57 kjc Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * RFC1827/2406 Encapsulated Security Payload.
35 */
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/domain.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/errno.h>
47 #include <sys/time.h>
48 #include <sys/syslog.h>
49
50 #include <net/if.h>
51 #include <net/route.h>
52 #include <net/netisr.h>
53 #include <machine/cpu.h>
54
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/ip.h>
58 #include <netinet/ip_var.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip_ecn.h>
61 #ifdef INET6
62 #include <netinet6/ip6_ecn.h>
63 #endif
64
65 #ifdef INET6
66 #include <netinet/ip6.h>
67 #include <netinet6/in6_pcb.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/icmp6.h>
70 #include <netinet6/ip6protosw.h>
71 #endif
72
73 #include <netinet6/ipsec.h>
74 #ifdef INET6
75 #include <netinet6/ipsec6.h>
76 #endif
77 #include <netinet6/ah.h>
78 #ifdef INET6
79 #include <netinet6/ah6.h>
80 #endif
81 #include <netinet6/esp.h>
82 #ifdef INET6
83 #include <netinet6/esp6.h>
84 #endif
85 #include <netkey/key.h>
86 #include <netkey/keydb.h>
87 #include <netkey/key_debug.h>
88
89 #include <machine/stdarg.h>
90
91 #include <net/net_osdep.h>
92
93 #define IPLEN_FLIPPED
94
95 #define ESPMAXLEN \
96 (sizeof(struct esp) < sizeof(struct newesp) \
97 ? sizeof(struct newesp) : sizeof(struct esp))
98
99 #ifdef INET
100 #include <netinet/ipprotosw.h>
101 extern struct ipprotosw inetsw[];
102
103 void
104 #if __STDC__
105 esp4_input(struct mbuf *m, ...)
106 #else
107 esp4_input(m, va_alist)
108 struct mbuf *m;
109 va_dcl
110 #endif
111 {
112 struct ip *ip;
113 struct esp *esp;
114 struct esptail esptail;
115 u_int32_t spi;
116 struct secasvar *sav = NULL;
117 size_t taillen;
118 u_int16_t nxt;
119 const struct esp_algorithm *algo;
120 int ivlen;
121 size_t hlen;
122 size_t esplen;
123 int s;
124 va_list ap;
125 int off, proto;
126
127 va_start(ap, m);
128 off = va_arg(ap, int);
129 proto = va_arg(ap, int);
130 va_end(ap);
131
132 /* sanity check for alignment. */
133 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
134 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
135 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
136 ipsecstat.in_inval++;
137 goto bad;
138 }
139
140 if (m->m_len < off + ESPMAXLEN) {
141 m = m_pullup(m, off + ESPMAXLEN);
142 if (!m) {
143 ipseclog((LOG_DEBUG,
144 "IPv4 ESP input: can't pullup in esp4_input\n"));
145 ipsecstat.in_inval++;
146 goto bad;
147 }
148 }
149
150 ip = mtod(m, struct ip *);
151 esp = (struct esp *)(((u_int8_t *)ip) + off);
152 #ifdef _IP_VHL
153 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
154 #else
155 hlen = ip->ip_hl << 2;
156 #endif
157
158 /* find the sassoc. */
159 spi = esp->esp_spi;
160
161 if ((sav = key_allocsa(AF_INET,
162 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
163 IPPROTO_ESP, spi)) == 0) {
164 ipseclog((LOG_WARNING,
165 "IPv4 ESP input: no key association found for spi %u\n",
166 (u_int32_t)ntohl(spi)));
167 ipsecstat.in_nosa++;
168 goto bad;
169 }
170 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
171 printf("DP esp4_input called to allocate SA:%p\n", sav));
172 if (sav->state != SADB_SASTATE_MATURE
173 && sav->state != SADB_SASTATE_DYING) {
174 ipseclog((LOG_DEBUG,
175 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
176 (u_int32_t)ntohl(spi)));
177 ipsecstat.in_badspi++;
178 goto bad;
179 }
180 algo = esp_algorithm_lookup(sav->alg_enc);
181 if (!algo) {
182 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
183 "unsupported encryption algorithm for spi %u\n",
184 (u_int32_t)ntohl(spi)));
185 ipsecstat.in_badspi++;
186 goto bad;
187 }
188
189 /* check if we have proper ivlen information */
190 ivlen = sav->ivlen;
191 if (ivlen < 0) {
192 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
193 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
194 ipsecstat.in_inval++;
195 goto bad;
196 }
197
198 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
199 && (sav->alg_auth && sav->key_auth)))
200 goto noreplaycheck;
201
202 if (sav->alg_auth == SADB_X_AALG_NULL ||
203 sav->alg_auth == SADB_AALG_NONE)
204 goto noreplaycheck;
205
206 /*
207 * check for sequence number.
208 */
209 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
210 ; /* okey */
211 else {
212 ipsecstat.in_espreplay++;
213 ipseclog((LOG_WARNING,
214 "replay packet in IPv4 ESP input: %s %s\n",
215 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
216 goto bad;
217 }
218
219 /* check ICV */
220 {
221 u_char sum0[AH_MAXSUMSIZE];
222 u_char sum[AH_MAXSUMSIZE];
223 const struct ah_algorithm *sumalgo;
224 size_t siz;
225
226 sumalgo = ah_algorithm_lookup(sav->alg_auth);
227 if (!sumalgo)
228 goto noreplaycheck;
229 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
230 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
231 ipsecstat.in_inval++;
232 goto bad;
233 }
234 if (AH_MAXSUMSIZE < siz) {
235 ipseclog((LOG_DEBUG,
236 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
237 (u_long)siz));
238 ipsecstat.in_inval++;
239 goto bad;
240 }
241
242 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
243
244 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
245 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
246 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
247 ipsecstat.in_espauthfail++;
248 goto bad;
249 }
250
251 if (bcmp(sum0, sum, siz) != 0) {
252 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
253 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
254 ipsecstat.in_espauthfail++;
255 goto bad;
256 }
257
258 /* strip off the authentication data */
259 m_adj(m, -siz);
260 ip = mtod(m, struct ip *);
261 #ifdef IPLEN_FLIPPED
262 ip->ip_len = ip->ip_len - siz;
263 #else
264 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
265 #endif
266 m->m_flags |= M_AUTHIPDGM;
267 ipsecstat.in_espauthsucc++;
268 }
269
270 /*
271 * update sequence number.
272 */
273 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
274 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
275 ipsecstat.in_espreplay++;
276 goto bad;
277 }
278 }
279
280 noreplaycheck:
281
282 /* process main esp header. */
283 if (sav->flags & SADB_X_EXT_OLD) {
284 /* RFC 1827 */
285 esplen = sizeof(struct esp);
286 } else {
287 /* RFC 2406 */
288 if (sav->flags & SADB_X_EXT_DERIV)
289 esplen = sizeof(struct esp);
290 else
291 esplen = sizeof(struct newesp);
292 }
293
294 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
295 ipseclog((LOG_WARNING,
296 "IPv4 ESP input: packet too short\n"));
297 ipsecstat.in_inval++;
298 goto bad;
299 }
300
301 if (m->m_len < off + esplen + ivlen) {
302 m = m_pullup(m, off + esplen + ivlen);
303 if (!m) {
304 ipseclog((LOG_DEBUG,
305 "IPv4 ESP input: can't pullup in esp4_input\n"));
306 ipsecstat.in_inval++;
307 goto bad;
308 }
309 }
310
311 /*
312 * pre-compute and cache intermediate key
313 */
314 if (esp_schedule(algo, sav) != 0) {
315 ipsecstat.in_inval++;
316 goto bad;
317 }
318
319 /*
320 * decrypt the packet.
321 */
322 if (!algo->decrypt)
323 panic("internal error: no decrypt function");
324 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
325 /* m is already freed */
326 m = NULL;
327 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
328 ipsec_logsastr(sav)));
329 ipsecstat.in_inval++;
330 goto bad;
331 }
332 ipsecstat.in_esphist[sav->alg_enc]++;
333
334 m->m_flags |= M_DECRYPTED;
335
336 /*
337 * find the trailer of the ESP.
338 */
339 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
340 (caddr_t)&esptail);
341 nxt = esptail.esp_nxt;
342 taillen = esptail.esp_padlen + sizeof(esptail);
343
344 if (m->m_pkthdr.len < taillen
345 || m->m_pkthdr.len - taillen < hlen) { /* ? */
346 ipseclog((LOG_WARNING,
347 "bad pad length in IPv4 ESP input: %s %s\n",
348 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
349 ipsecstat.in_inval++;
350 goto bad;
351 }
352
353 /* strip off the trailing pad area. */
354 m_adj(m, -taillen);
355
356 #ifdef IPLEN_FLIPPED
357 ip->ip_len = ip->ip_len - taillen;
358 #else
359 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
360 #endif
361
362 /* was it transmitted over the IPsec tunnel SA? */
363 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
364 /*
365 * strip off all the headers that precedes ESP header.
366 * IP4 xx ESP IP4' payload -> IP4' payload
367 *
368 * XXX more sanity checks
369 * XXX relationship with gif?
370 */
371 u_int8_t tos;
372
373 tos = ip->ip_tos;
374 m_adj(m, off + esplen + ivlen);
375 if (m->m_len < sizeof(*ip)) {
376 m = m_pullup(m, sizeof(*ip));
377 if (!m) {
378 ipsecstat.in_inval++;
379 goto bad;
380 }
381 }
382 ip = mtod(m, struct ip *);
383 /* ECN consideration. */
384 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
385 if (!key_checktunnelsanity(sav, AF_INET,
386 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
387 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
388 "in IPv4 ESP input: %s %s\n",
389 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
390 ipsecstat.in_inval++;
391 goto bad;
392 }
393
394 key_sa_recordxfer(sav, m);
395 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
396 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
397 ipsecstat.in_nomem++;
398 goto bad;
399 }
400
401 s = splimp();
402 if (IF_QFULL(&ipintrq)) {
403 ipsecstat.in_inval++;
404 splx(s);
405 goto bad;
406 }
407 IF_ENQUEUE(&ipintrq, m);
408 m = NULL;
409 schednetisr(NETISR_IP); /* can be skipped but to make sure */
410 splx(s);
411 nxt = IPPROTO_DONE;
412 } else {
413 /*
414 * strip off ESP header and IV.
415 * even in m_pulldown case, we need to strip off ESP so that
416 * we can always compute checksum for AH correctly.
417 */
418 size_t stripsiz;
419
420 stripsiz = esplen + ivlen;
421
422 ip = mtod(m, struct ip *);
423 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
424 m->m_data += stripsiz;
425 m->m_len -= stripsiz;
426 m->m_pkthdr.len -= stripsiz;
427
428 ip = mtod(m, struct ip *);
429 #ifdef IPLEN_FLIPPED
430 ip->ip_len = ip->ip_len - stripsiz;
431 #else
432 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
433 #endif
434 ip->ip_p = nxt;
435
436 key_sa_recordxfer(sav, m);
437 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
438 ipsecstat.in_nomem++;
439 goto bad;
440 }
441
442 if (nxt != IPPROTO_DONE) {
443 if ((inetsw[ip_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
444 ipsec4_in_reject(m, NULL)) {
445 ipsecstat.in_polvio++;
446 goto bad;
447 }
448 (*inetsw[ip_protox[nxt]].pr_input)(m, off, nxt);
449 } else
450 m_freem(m);
451 m = NULL;
452 }
453
454 if (sav) {
455 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
456 printf("DP esp4_input call free SA:%p\n", sav));
457 key_freesav(sav);
458 }
459 ipsecstat.in_success++;
460 return;
461
462 bad:
463 if (sav) {
464 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
465 printf("DP esp4_input call free SA:%p\n", sav));
466 key_freesav(sav);
467 }
468 if (m)
469 m_freem(m);
470 return;
471 }
472 #endif /* INET */
473
474 #ifdef INET6
475 int
476 esp6_input(mp, offp, proto)
477 struct mbuf **mp;
478 int *offp, proto;
479 {
480 struct mbuf *m = *mp;
481 int off = *offp;
482 struct ip6_hdr *ip6;
483 struct esp *esp;
484 struct esptail esptail;
485 u_int32_t spi;
486 struct secasvar *sav = NULL;
487 size_t taillen;
488 u_int16_t nxt;
489 const struct esp_algorithm *algo;
490 int ivlen;
491 size_t esplen;
492 int s;
493
494 /* sanity check for alignment. */
495 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
496 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
497 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
498 ipsec6stat.in_inval++;
499 goto bad;
500 }
501
502 #ifndef PULLDOWN_TEST
503 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, IPPROTO_DONE);
504 esp = (struct esp *)(mtod(m, caddr_t) + off);
505 #else
506 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
507 if (esp == NULL) {
508 ipsec6stat.in_inval++;
509 return IPPROTO_DONE;
510 }
511 #endif
512 ip6 = mtod(m, struct ip6_hdr *);
513
514 if (ntohs(ip6->ip6_plen) == 0) {
515 ipseclog((LOG_ERR, "IPv6 ESP input: "
516 "ESP with IPv6 jumbogram is not supported.\n"));
517 ipsec6stat.in_inval++;
518 goto bad;
519 }
520
521 /* find the sassoc. */
522 spi = esp->esp_spi;
523
524 if ((sav = key_allocsa(AF_INET6,
525 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
526 IPPROTO_ESP, spi)) == 0) {
527 ipseclog((LOG_WARNING,
528 "IPv6 ESP input: no key association found for spi %u\n",
529 (u_int32_t)ntohl(spi)));
530 ipsec6stat.in_nosa++;
531 goto bad;
532 }
533 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
534 printf("DP esp6_input called to allocate SA:%p\n", sav));
535 if (sav->state != SADB_SASTATE_MATURE
536 && sav->state != SADB_SASTATE_DYING) {
537 ipseclog((LOG_DEBUG,
538 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
539 (u_int32_t)ntohl(spi)));
540 ipsec6stat.in_badspi++;
541 goto bad;
542 }
543 algo = esp_algorithm_lookup(sav->alg_enc);
544 if (!algo) {
545 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
546 "unsupported encryption algorithm for spi %u\n",
547 (u_int32_t)ntohl(spi)));
548 ipsec6stat.in_badspi++;
549 goto bad;
550 }
551
552 /* check if we have proper ivlen information */
553 ivlen = sav->ivlen;
554 if (ivlen < 0) {
555 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
556 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
557 ipsec6stat.in_badspi++;
558 goto bad;
559 }
560
561 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
562 && (sav->alg_auth && sav->key_auth)))
563 goto noreplaycheck;
564
565 if (sav->alg_auth == SADB_X_AALG_NULL ||
566 sav->alg_auth == SADB_AALG_NONE)
567 goto noreplaycheck;
568
569 /*
570 * check for sequence number.
571 */
572 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
573 ; /* okey */
574 else {
575 ipsec6stat.in_espreplay++;
576 ipseclog((LOG_WARNING,
577 "replay packet in IPv6 ESP input: %s %s\n",
578 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
579 goto bad;
580 }
581
582 /* check ICV */
583 {
584 u_char sum0[AH_MAXSUMSIZE];
585 u_char sum[AH_MAXSUMSIZE];
586 const struct ah_algorithm *sumalgo;
587 size_t siz;
588
589 sumalgo = ah_algorithm_lookup(sav->alg_auth);
590 if (!sumalgo)
591 goto noreplaycheck;
592 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
593 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
594 ipsec6stat.in_inval++;
595 goto bad;
596 }
597 if (AH_MAXSUMSIZE < siz) {
598 ipseclog((LOG_DEBUG,
599 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
600 (u_long)siz));
601 ipsec6stat.in_inval++;
602 goto bad;
603 }
604
605 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
606
607 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
608 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
609 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
610 ipsec6stat.in_espauthfail++;
611 goto bad;
612 }
613
614 if (bcmp(sum0, sum, siz) != 0) {
615 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
616 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
617 ipsec6stat.in_espauthfail++;
618 goto bad;
619 }
620
621 /* strip off the authentication data */
622 m_adj(m, -siz);
623 ip6 = mtod(m, struct ip6_hdr *);
624 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
625
626 m->m_flags |= M_AUTHIPDGM;
627 ipsec6stat.in_espauthsucc++;
628 }
629
630 /*
631 * update sequence number.
632 */
633 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
634 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
635 ipsec6stat.in_espreplay++;
636 goto bad;
637 }
638 }
639
640 noreplaycheck:
641
642 /* process main esp header. */
643 if (sav->flags & SADB_X_EXT_OLD) {
644 /* RFC 1827 */
645 esplen = sizeof(struct esp);
646 } else {
647 /* RFC 2406 */
648 if (sav->flags & SADB_X_EXT_DERIV)
649 esplen = sizeof(struct esp);
650 else
651 esplen = sizeof(struct newesp);
652 }
653
654 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
655 ipseclog((LOG_WARNING,
656 "IPv6 ESP input: packet too short\n"));
657 ipsec6stat.in_inval++;
658 goto bad;
659 }
660
661 #ifndef PULLDOWN_TEST
662 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, IPPROTO_DONE); /* XXX */
663 #else
664 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
665 if (esp == NULL) {
666 ipsec6stat.in_inval++;
667 m = NULL;
668 goto bad;
669 }
670 #endif
671 ip6 = mtod(m, struct ip6_hdr *); /* set it again just in case */
672
673 /*
674 * pre-compute and cache intermediate key
675 */
676 if (esp_schedule(algo, sav) != 0) {
677 ipsec6stat.in_inval++;
678 goto bad;
679 }
680
681 /*
682 * decrypt the packet.
683 */
684 if (!algo->decrypt)
685 panic("internal error: no decrypt function");
686 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
687 /* m is already freed */
688 m = NULL;
689 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
690 ipsec_logsastr(sav)));
691 ipsec6stat.in_inval++;
692 goto bad;
693 }
694 ipsec6stat.in_esphist[sav->alg_enc]++;
695
696 m->m_flags |= M_DECRYPTED;
697
698 /*
699 * find the trailer of the ESP.
700 */
701 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
702 (caddr_t)&esptail);
703 nxt = esptail.esp_nxt;
704 taillen = esptail.esp_padlen + sizeof(esptail);
705
706 if (m->m_pkthdr.len < taillen
707 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /* ? */
708 ipseclog((LOG_WARNING,
709 "bad pad length in IPv6 ESP input: %s %s\n",
710 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
711 ipsec6stat.in_inval++;
712 goto bad;
713 }
714
715 /* strip off the trailing pad area. */
716 m_adj(m, -taillen);
717
718 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
719
720 /* was it transmitted over the IPsec tunnel SA? */
721 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
722 /*
723 * strip off all the headers that precedes ESP header.
724 * IP6 xx ESP IP6' payload -> IP6' payload
725 *
726 * XXX more sanity checks
727 * XXX relationship with gif?
728 */
729 u_int32_t flowinfo; /* net endian */
730 flowinfo = ip6->ip6_flow;
731 m_adj(m, off + esplen + ivlen);
732 if (m->m_len < sizeof(*ip6)) {
733 #ifndef PULLDOWN_TEST
734 /*
735 * m_pullup is prohibited in KAME IPv6 input processing
736 * but there's no other way!
737 */
738 #else
739 /* okay to pullup in m_pulldown style */
740 #endif
741 m = m_pullup(m, sizeof(*ip6));
742 if (!m) {
743 ipsec6stat.in_inval++;
744 goto bad;
745 }
746 }
747 ip6 = mtod(m, struct ip6_hdr *);
748 /* ECN consideration. */
749 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
750 if (!key_checktunnelsanity(sav, AF_INET6,
751 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
752 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
753 "in IPv6 ESP input: %s %s\n",
754 ipsec6_logpacketstr(ip6, spi),
755 ipsec_logsastr(sav)));
756 ipsec6stat.in_inval++;
757 goto bad;
758 }
759
760 key_sa_recordxfer(sav, m);
761 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
762 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
763 ipsec6stat.in_nomem++;
764 goto bad;
765 }
766
767 s = splimp();
768 if (IF_QFULL(&ip6intrq)) {
769 ipsec6stat.in_inval++;
770 splx(s);
771 goto bad;
772 }
773 IF_ENQUEUE(&ip6intrq, m);
774 m = NULL;
775 schednetisr(NETISR_IPV6); /* can be skipped but to make sure */
776 splx(s);
777 nxt = IPPROTO_DONE;
778 } else {
779 /*
780 * strip off ESP header and IV.
781 * even in m_pulldown case, we need to strip off ESP so that
782 * we can always compute checksum for AH correctly.
783 */
784 size_t stripsiz;
785 char *prvnxtp;
786
787 /*
788 * Set the next header field of the previous header correctly.
789 */
790 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
791 *prvnxtp = nxt;
792
793 stripsiz = esplen + ivlen;
794
795 ip6 = mtod(m, struct ip6_hdr *);
796 if (m->m_len >= stripsiz + off) {
797 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
798 m->m_data += stripsiz;
799 m->m_len -= stripsiz;
800 m->m_pkthdr.len -= stripsiz;
801 } else {
802 /*
803 * this comes with no copy if the boundary is on
804 * cluster
805 */
806 struct mbuf *n;
807
808 n = m_split(m, off, M_DONTWAIT);
809 if (n == NULL) {
810 /* m is retained by m_split */
811 goto bad;
812 }
813 m_adj(n, stripsiz);
814 m_cat(m, n);
815 /* m_cat does not update m_pkthdr.len */
816 m->m_pkthdr.len += n->m_pkthdr.len;
817 }
818
819 #ifndef PULLDOWN_TEST
820 /*
821 * KAME requires that the packet to be contiguous on the
822 * mbuf. We need to make that sure.
823 * this kind of code should be avoided.
824 * XXX other conditions to avoid running this part?
825 */
826 if (m->m_len != m->m_pkthdr.len) {
827 struct mbuf *n = NULL;
828 int maxlen;
829
830 MGETHDR(n, M_DONTWAIT, MT_HEADER);
831 maxlen = MHLEN;
832 if (n)
833 M_MOVE_PKTHDR(n, m);
834 if (n && n->m_pkthdr.len > maxlen) {
835 MCLGET(n, M_DONTWAIT);
836 maxlen = MCLBYTES;
837 if ((n->m_flags & M_EXT) == 0) {
838 m_free(n);
839 n = NULL;
840 }
841 }
842 if (!n) {
843 printf("esp6_input: mbuf allocation failed\n");
844 goto bad;
845 }
846
847 if (n->m_pkthdr.len <= maxlen) {
848 m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
849 n->m_len = n->m_pkthdr.len;
850 n->m_next = NULL;
851 m_freem(m);
852 } else {
853 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
854 n->m_len = maxlen;
855 n->m_next = m;
856 m_adj(m, maxlen);
857 }
858 m = n;
859 }
860 #endif
861
862 ip6 = mtod(m, struct ip6_hdr *);
863 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
864
865 key_sa_recordxfer(sav, m);
866 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
867 ipsec6stat.in_nomem++;
868 goto bad;
869 }
870 }
871
872 *offp = off;
873 *mp = m;
874
875 if (sav) {
876 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
877 printf("DP esp6_input call free SA:%p\n", sav));
878 key_freesav(sav);
879 }
880 ipsec6stat.in_success++;
881 return nxt;
882
883 bad:
884 if (sav) {
885 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
886 printf("DP esp6_input call free SA:%p\n", sav));
887 key_freesav(sav);
888 }
889 if (m)
890 m_freem(m);
891 return IPPROTO_DONE;
892 }
893
894 void
895 esp6_ctlinput(cmd, sa, d)
896 int cmd;
897 struct sockaddr *sa;
898 void *d;
899 {
900 const struct newesp *espp;
901 struct newesp esp;
902 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
903 struct secasvar *sav;
904 struct ip6_hdr *ip6;
905 struct mbuf *m;
906 int off;
907 struct sockaddr_in6 *sa6_src, *sa6_dst;
908
909 if (sa->sa_family != AF_INET6 ||
910 sa->sa_len != sizeof(struct sockaddr_in6))
911 return;
912 if ((unsigned)cmd >= PRC_NCMDS)
913 return;
914
915 /* if the parameter is from icmp6, decode it. */
916 if (d != NULL) {
917 ip6cp = (struct ip6ctlparam *)d;
918 m = ip6cp->ip6c_m;
919 ip6 = ip6cp->ip6c_ip6;
920 off = ip6cp->ip6c_off;
921 } else {
922 m = NULL;
923 ip6 = NULL;
924 }
925
926 if (ip6) {
927 /*
928 * Notify the error to all possible sockets via pfctlinput2.
929 * Since the upper layer information (such as protocol type,
930 * source and destination ports) is embedded in the encrypted
931 * data and might have been cut, we can't directly call
932 * an upper layer ctlinput function. However, the pcbnotify
933 * function will consider source and destination addresses
934 * as well as the flow info value, and may be able to find
935 * some PCB that should be notified.
936 * Although pfctlinput2 will call esp6_ctlinput(), there is
937 * no possibility of an infinite loop of function calls,
938 * because we don't pass the inner IPv6 header.
939 */
940 bzero(&ip6cp1, sizeof(ip6cp1));
941 ip6cp1.ip6c_src = ip6cp->ip6c_src;
942 pfctlinput2(cmd, sa, (void *)&ip6cp1);
943
944 /*
945 * Then go to special cases that need ESP header information.
946 * XXX: We assume that when ip6 is non NULL,
947 * M and OFF are valid.
948 */
949
950 /* check if we can safely examine src and dst ports */
951 if (m->m_pkthdr.len < off + sizeof(esp))
952 return;
953
954 if (m->m_len < off + sizeof(esp)) {
955 /*
956 * this should be rare case,
957 * so we compromise on this copy...
958 */
959 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
960 espp = &esp;
961 } else
962 espp = (struct newesp*)(mtod(m, caddr_t) + off);
963
964 if (cmd == PRC_MSGSIZE) {
965 int valid = 0;
966
967 /*
968 * Check to see if we have a valid SA corresponding to
969 * the address in the ICMP message payload.
970 */
971 sa6_src = ip6cp->ip6c_src;
972 sa6_dst = (struct sockaddr_in6 *)sa;
973 sav = key_allocsa(AF_INET6,
974 (caddr_t)&sa6_src->sin6_addr,
975 (caddr_t)&sa6_dst->sin6_addr,
976 IPPROTO_ESP, espp->esp_spi);
977 if (sav) {
978 if (sav->state == SADB_SASTATE_MATURE ||
979 sav->state == SADB_SASTATE_DYING)
980 valid++;
981 key_freesav(sav);
982 }
983
984 /* XXX Further validation? */
985
986 /*
987 * Depending on the value of "valid" and routing table
988 * size (mtudisc_{hi,lo}wat), we will:
989 * - recalcurate the new MTU and create the
990 * corresponding routing entry, or
991 * - ignore the MTU change notification.
992 */
993 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
994 }
995 } else {
996 /* we normally notify any pcb here */
997 }
998 }
999 #endif /* INET6 */
Cache object: abec6d7f9078642503701276453c26a5
|