1 /* $FreeBSD: releng/5.1/sys/netinet6/esp_input.c 111888 2003-03-04 23:19:55Z jlemon $ */
2 /* $KAME: esp_input.c,v 1.62 2002/01/07 11:39:57 kjc Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * RFC1827/2406 Encapsulated Security Payload.
35 */
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/domain.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/errno.h>
47 #include <sys/time.h>
48 #include <sys/syslog.h>
49
50 #include <net/if.h>
51 #include <net/route.h>
52 #include <net/netisr.h>
53 #include <machine/cpu.h>
54
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/ip.h>
58 #include <netinet/ip_var.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip_ecn.h>
61 #ifdef INET6
62 #include <netinet6/ip6_ecn.h>
63 #endif
64
65 #ifdef INET6
66 #include <netinet/ip6.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet6/in6_pcb.h>
69 #include <netinet6/ip6_var.h>
70 #include <netinet/icmp6.h>
71 #include <netinet6/ip6protosw.h>
72 #endif
73
74 #include <netinet6/ipsec.h>
75 #ifdef INET6
76 #include <netinet6/ipsec6.h>
77 #endif
78 #include <netinet6/ah.h>
79 #ifdef INET6
80 #include <netinet6/ah6.h>
81 #endif
82 #include <netinet6/esp.h>
83 #ifdef INET6
84 #include <netinet6/esp6.h>
85 #endif
86 #include <netkey/key.h>
87 #include <netkey/keydb.h>
88 #include <netkey/key_debug.h>
89
90 #include <machine/stdarg.h>
91
92 #include <net/net_osdep.h>
93
94 #define IPLEN_FLIPPED
95
96 #define ESPMAXLEN \
97 (sizeof(struct esp) < sizeof(struct newesp) \
98 ? sizeof(struct newesp) : sizeof(struct esp))
99
100 #ifdef INET
101 extern struct protosw inetsw[];
102
103 void
104 esp4_input(m, off)
105 struct mbuf *m;
106 int off;
107 {
108 struct ip *ip;
109 struct esp *esp;
110 struct esptail esptail;
111 u_int32_t spi;
112 struct secasvar *sav = NULL;
113 size_t taillen;
114 u_int16_t nxt;
115 const struct esp_algorithm *algo;
116 int ivlen;
117 size_t hlen;
118 size_t esplen;
119 int proto;
120
121 /* sanity check for alignment. */
122 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
123 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
124 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
125 ipsecstat.in_inval++;
126 goto bad;
127 }
128
129 if (m->m_len < off + ESPMAXLEN) {
130 m = m_pullup(m, off + ESPMAXLEN);
131 if (!m) {
132 ipseclog((LOG_DEBUG,
133 "IPv4 ESP input: can't pullup in esp4_input\n"));
134 ipsecstat.in_inval++;
135 goto bad;
136 }
137 }
138
139 ip = mtod(m, struct ip *);
140 proto = ip->ip_p;
141 esp = (struct esp *)(((u_int8_t *)ip) + off);
142 #ifdef _IP_VHL
143 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
144 #else
145 hlen = ip->ip_hl << 2;
146 #endif
147
148 /* find the sassoc. */
149 spi = esp->esp_spi;
150
151 if ((sav = key_allocsa(AF_INET,
152 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
153 IPPROTO_ESP, spi)) == 0) {
154 ipseclog((LOG_WARNING,
155 "IPv4 ESP input: no key association found for spi %u\n",
156 (u_int32_t)ntohl(spi)));
157 ipsecstat.in_nosa++;
158 goto bad;
159 }
160 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
161 printf("DP esp4_input called to allocate SA:%p\n", sav));
162 if (sav->state != SADB_SASTATE_MATURE
163 && sav->state != SADB_SASTATE_DYING) {
164 ipseclog((LOG_DEBUG,
165 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
166 (u_int32_t)ntohl(spi)));
167 ipsecstat.in_badspi++;
168 goto bad;
169 }
170 algo = esp_algorithm_lookup(sav->alg_enc);
171 if (!algo) {
172 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
173 "unsupported encryption algorithm for spi %u\n",
174 (u_int32_t)ntohl(spi)));
175 ipsecstat.in_badspi++;
176 goto bad;
177 }
178
179 /* check if we have proper ivlen information */
180 ivlen = sav->ivlen;
181 if (ivlen < 0) {
182 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
183 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
184 ipsecstat.in_inval++;
185 goto bad;
186 }
187
188 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
189 && (sav->alg_auth && sav->key_auth)))
190 goto noreplaycheck;
191
192 if (sav->alg_auth == SADB_X_AALG_NULL ||
193 sav->alg_auth == SADB_AALG_NONE)
194 goto noreplaycheck;
195
196 /*
197 * check for sequence number.
198 */
199 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
200 ; /* okey */
201 else {
202 ipsecstat.in_espreplay++;
203 ipseclog((LOG_WARNING,
204 "replay packet in IPv4 ESP input: %s %s\n",
205 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
206 goto bad;
207 }
208
209 /* check ICV */
210 {
211 u_char sum0[AH_MAXSUMSIZE];
212 u_char sum[AH_MAXSUMSIZE];
213 const struct ah_algorithm *sumalgo;
214 size_t siz;
215
216 sumalgo = ah_algorithm_lookup(sav->alg_auth);
217 if (!sumalgo)
218 goto noreplaycheck;
219 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
220 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
221 ipsecstat.in_inval++;
222 goto bad;
223 }
224 if (AH_MAXSUMSIZE < siz) {
225 ipseclog((LOG_DEBUG,
226 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
227 (u_long)siz));
228 ipsecstat.in_inval++;
229 goto bad;
230 }
231
232 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
233
234 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
235 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
236 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
237 ipsecstat.in_espauthfail++;
238 goto bad;
239 }
240
241 if (bcmp(sum0, sum, siz) != 0) {
242 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
243 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
244 ipsecstat.in_espauthfail++;
245 goto bad;
246 }
247
248 /* strip off the authentication data */
249 m_adj(m, -siz);
250 ip = mtod(m, struct ip *);
251 #ifdef IPLEN_FLIPPED
252 ip->ip_len = ip->ip_len - siz;
253 #else
254 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
255 #endif
256 m->m_flags |= M_AUTHIPDGM;
257 ipsecstat.in_espauthsucc++;
258 }
259
260 /*
261 * update sequence number.
262 */
263 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
264 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
265 ipsecstat.in_espreplay++;
266 goto bad;
267 }
268 }
269
270 noreplaycheck:
271
272 /* process main esp header. */
273 if (sav->flags & SADB_X_EXT_OLD) {
274 /* RFC 1827 */
275 esplen = sizeof(struct esp);
276 } else {
277 /* RFC 2406 */
278 if (sav->flags & SADB_X_EXT_DERIV)
279 esplen = sizeof(struct esp);
280 else
281 esplen = sizeof(struct newesp);
282 }
283
284 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
285 ipseclog((LOG_WARNING,
286 "IPv4 ESP input: packet too short\n"));
287 ipsecstat.in_inval++;
288 goto bad;
289 }
290
291 if (m->m_len < off + esplen + ivlen) {
292 m = m_pullup(m, off + esplen + ivlen);
293 if (!m) {
294 ipseclog((LOG_DEBUG,
295 "IPv4 ESP input: can't pullup in esp4_input\n"));
296 ipsecstat.in_inval++;
297 goto bad;
298 }
299 }
300
301 /*
302 * pre-compute and cache intermediate key
303 */
304 if (esp_schedule(algo, sav) != 0) {
305 ipsecstat.in_inval++;
306 goto bad;
307 }
308
309 /*
310 * decrypt the packet.
311 */
312 if (!algo->decrypt)
313 panic("internal error: no decrypt function");
314 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
315 /* m is already freed */
316 m = NULL;
317 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
318 ipsec_logsastr(sav)));
319 ipsecstat.in_inval++;
320 goto bad;
321 }
322 ipsecstat.in_esphist[sav->alg_enc]++;
323
324 m->m_flags |= M_DECRYPTED;
325
326 /*
327 * find the trailer of the ESP.
328 */
329 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
330 (caddr_t)&esptail);
331 nxt = esptail.esp_nxt;
332 taillen = esptail.esp_padlen + sizeof(esptail);
333
334 if (m->m_pkthdr.len < taillen
335 || m->m_pkthdr.len - taillen < hlen) { /* ? */
336 ipseclog((LOG_WARNING,
337 "bad pad length in IPv4 ESP input: %s %s\n",
338 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
339 ipsecstat.in_inval++;
340 goto bad;
341 }
342
343 /* strip off the trailing pad area. */
344 m_adj(m, -taillen);
345
346 #ifdef IPLEN_FLIPPED
347 ip->ip_len = ip->ip_len - taillen;
348 #else
349 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
350 #endif
351
352 /* was it transmitted over the IPsec tunnel SA? */
353 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
354 /*
355 * strip off all the headers that precedes ESP header.
356 * IP4 xx ESP IP4' payload -> IP4' payload
357 *
358 * XXX more sanity checks
359 * XXX relationship with gif?
360 */
361 u_int8_t tos;
362
363 tos = ip->ip_tos;
364 m_adj(m, off + esplen + ivlen);
365 if (m->m_len < sizeof(*ip)) {
366 m = m_pullup(m, sizeof(*ip));
367 if (!m) {
368 ipsecstat.in_inval++;
369 goto bad;
370 }
371 }
372 ip = mtod(m, struct ip *);
373 /* ECN consideration. */
374 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
375 if (!key_checktunnelsanity(sav, AF_INET,
376 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
377 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
378 "in IPv4 ESP input: %s %s\n",
379 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
380 ipsecstat.in_inval++;
381 goto bad;
382 }
383
384 key_sa_recordxfer(sav, m);
385 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
386 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
387 ipsecstat.in_nomem++;
388 goto bad;
389 }
390
391 if (! netisr_queue(NETISR_IP, m)) {
392 ipsecstat.in_inval++;
393 m = NULL;
394 goto bad;
395 }
396 m = NULL;
397 nxt = IPPROTO_DONE;
398 } else {
399 /*
400 * strip off ESP header and IV.
401 * even in m_pulldown case, we need to strip off ESP so that
402 * we can always compute checksum for AH correctly.
403 */
404 size_t stripsiz;
405
406 stripsiz = esplen + ivlen;
407
408 ip = mtod(m, struct ip *);
409 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
410 m->m_data += stripsiz;
411 m->m_len -= stripsiz;
412 m->m_pkthdr.len -= stripsiz;
413
414 ip = mtod(m, struct ip *);
415 #ifdef IPLEN_FLIPPED
416 ip->ip_len = ip->ip_len - stripsiz;
417 #else
418 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
419 #endif
420 ip->ip_p = nxt;
421
422 key_sa_recordxfer(sav, m);
423 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
424 ipsecstat.in_nomem++;
425 goto bad;
426 }
427
428 if (nxt != IPPROTO_DONE) {
429 if ((inetsw[ip_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
430 ipsec4_in_reject(m, NULL)) {
431 ipsecstat.in_polvio++;
432 goto bad;
433 }
434 (*inetsw[ip_protox[nxt]].pr_input)(m, off);
435 } else
436 m_freem(m);
437 m = NULL;
438 }
439
440 if (sav) {
441 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
442 printf("DP esp4_input call free SA:%p\n", sav));
443 key_freesav(sav);
444 }
445 ipsecstat.in_success++;
446 return;
447
448 bad:
449 if (sav) {
450 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
451 printf("DP esp4_input call free SA:%p\n", sav));
452 key_freesav(sav);
453 }
454 if (m)
455 m_freem(m);
456 return;
457 }
458 #endif /* INET */
459
460 #ifdef INET6
461 int
462 esp6_input(mp, offp, proto)
463 struct mbuf **mp;
464 int *offp, proto;
465 {
466 struct mbuf *m = *mp;
467 int off = *offp;
468 struct ip6_hdr *ip6;
469 struct esp *esp;
470 struct esptail esptail;
471 u_int32_t spi;
472 struct secasvar *sav = NULL;
473 size_t taillen;
474 u_int16_t nxt;
475 const struct esp_algorithm *algo;
476 int ivlen;
477 size_t esplen;
478
479 /* sanity check for alignment. */
480 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
481 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
482 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
483 ipsec6stat.in_inval++;
484 goto bad;
485 }
486
487 #ifndef PULLDOWN_TEST
488 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, IPPROTO_DONE);
489 esp = (struct esp *)(mtod(m, caddr_t) + off);
490 #else
491 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
492 if (esp == NULL) {
493 ipsec6stat.in_inval++;
494 return IPPROTO_DONE;
495 }
496 #endif
497 ip6 = mtod(m, struct ip6_hdr *);
498
499 if (ntohs(ip6->ip6_plen) == 0) {
500 ipseclog((LOG_ERR, "IPv6 ESP input: "
501 "ESP with IPv6 jumbogram is not supported.\n"));
502 ipsec6stat.in_inval++;
503 goto bad;
504 }
505
506 /* find the sassoc. */
507 spi = esp->esp_spi;
508
509 if ((sav = key_allocsa(AF_INET6,
510 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
511 IPPROTO_ESP, spi)) == 0) {
512 ipseclog((LOG_WARNING,
513 "IPv6 ESP input: no key association found for spi %u\n",
514 (u_int32_t)ntohl(spi)));
515 ipsec6stat.in_nosa++;
516 goto bad;
517 }
518 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
519 printf("DP esp6_input called to allocate SA:%p\n", sav));
520 if (sav->state != SADB_SASTATE_MATURE
521 && sav->state != SADB_SASTATE_DYING) {
522 ipseclog((LOG_DEBUG,
523 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
524 (u_int32_t)ntohl(spi)));
525 ipsec6stat.in_badspi++;
526 goto bad;
527 }
528 algo = esp_algorithm_lookup(sav->alg_enc);
529 if (!algo) {
530 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
531 "unsupported encryption algorithm for spi %u\n",
532 (u_int32_t)ntohl(spi)));
533 ipsec6stat.in_badspi++;
534 goto bad;
535 }
536
537 /* check if we have proper ivlen information */
538 ivlen = sav->ivlen;
539 if (ivlen < 0) {
540 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
541 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
542 ipsec6stat.in_badspi++;
543 goto bad;
544 }
545
546 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
547 && (sav->alg_auth && sav->key_auth)))
548 goto noreplaycheck;
549
550 if (sav->alg_auth == SADB_X_AALG_NULL ||
551 sav->alg_auth == SADB_AALG_NONE)
552 goto noreplaycheck;
553
554 /*
555 * check for sequence number.
556 */
557 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
558 ; /* okey */
559 else {
560 ipsec6stat.in_espreplay++;
561 ipseclog((LOG_WARNING,
562 "replay packet in IPv6 ESP input: %s %s\n",
563 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
564 goto bad;
565 }
566
567 /* check ICV */
568 {
569 u_char sum0[AH_MAXSUMSIZE];
570 u_char sum[AH_MAXSUMSIZE];
571 const struct ah_algorithm *sumalgo;
572 size_t siz;
573
574 sumalgo = ah_algorithm_lookup(sav->alg_auth);
575 if (!sumalgo)
576 goto noreplaycheck;
577 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
578 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
579 ipsecstat.in_inval++;
580 goto bad;
581 }
582 if (AH_MAXSUMSIZE < siz) {
583 ipseclog((LOG_DEBUG,
584 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
585 (u_long)siz));
586 ipsec6stat.in_inval++;
587 goto bad;
588 }
589
590 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
591
592 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
593 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
594 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
595 ipsec6stat.in_espauthfail++;
596 goto bad;
597 }
598
599 if (bcmp(sum0, sum, siz) != 0) {
600 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
601 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
602 ipsec6stat.in_espauthfail++;
603 goto bad;
604 }
605
606 /* strip off the authentication data */
607 m_adj(m, -siz);
608 ip6 = mtod(m, struct ip6_hdr *);
609 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
610
611 m->m_flags |= M_AUTHIPDGM;
612 ipsec6stat.in_espauthsucc++;
613 }
614
615 /*
616 * update sequence number.
617 */
618 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
619 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
620 ipsec6stat.in_espreplay++;
621 goto bad;
622 }
623 }
624
625 noreplaycheck:
626
627 /* process main esp header. */
628 if (sav->flags & SADB_X_EXT_OLD) {
629 /* RFC 1827 */
630 esplen = sizeof(struct esp);
631 } else {
632 /* RFC 2406 */
633 if (sav->flags & SADB_X_EXT_DERIV)
634 esplen = sizeof(struct esp);
635 else
636 esplen = sizeof(struct newesp);
637 }
638
639 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
640 ipseclog((LOG_WARNING,
641 "IPv6 ESP input: packet too short\n"));
642 ipsec6stat.in_inval++;
643 goto bad;
644 }
645
646 #ifndef PULLDOWN_TEST
647 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, IPPROTO_DONE); /* XXX */
648 #else
649 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
650 if (esp == NULL) {
651 ipsec6stat.in_inval++;
652 m = NULL;
653 goto bad;
654 }
655 #endif
656 ip6 = mtod(m, struct ip6_hdr *); /* set it again just in case */
657
658 /*
659 * pre-compute and cache intermediate key
660 */
661 if (esp_schedule(algo, sav) != 0) {
662 ipsec6stat.in_inval++;
663 goto bad;
664 }
665
666 /*
667 * decrypt the packet.
668 */
669 if (!algo->decrypt)
670 panic("internal error: no decrypt function");
671 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
672 /* m is already freed */
673 m = NULL;
674 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
675 ipsec_logsastr(sav)));
676 ipsec6stat.in_inval++;
677 goto bad;
678 }
679 ipsec6stat.in_esphist[sav->alg_enc]++;
680
681 m->m_flags |= M_DECRYPTED;
682
683 /*
684 * find the trailer of the ESP.
685 */
686 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
687 (caddr_t)&esptail);
688 nxt = esptail.esp_nxt;
689 taillen = esptail.esp_padlen + sizeof(esptail);
690
691 if (m->m_pkthdr.len < taillen
692 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /* ? */
693 ipseclog((LOG_WARNING,
694 "bad pad length in IPv6 ESP input: %s %s\n",
695 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
696 ipsec6stat.in_inval++;
697 goto bad;
698 }
699
700 /* strip off the trailing pad area. */
701 m_adj(m, -taillen);
702
703 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
704
705 /* was it transmitted over the IPsec tunnel SA? */
706 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
707 /*
708 * strip off all the headers that precedes ESP header.
709 * IP6 xx ESP IP6' payload -> IP6' payload
710 *
711 * XXX more sanity checks
712 * XXX relationship with gif?
713 */
714 u_int32_t flowinfo; /* net endian */
715 flowinfo = ip6->ip6_flow;
716 m_adj(m, off + esplen + ivlen);
717 if (m->m_len < sizeof(*ip6)) {
718 #ifndef PULLDOWN_TEST
719 /*
720 * m_pullup is prohibited in KAME IPv6 input processing
721 * but there's no other way!
722 */
723 #else
724 /* okay to pullup in m_pulldown style */
725 #endif
726 m = m_pullup(m, sizeof(*ip6));
727 if (!m) {
728 ipsec6stat.in_inval++;
729 goto bad;
730 }
731 }
732 ip6 = mtod(m, struct ip6_hdr *);
733 /* ECN consideration. */
734 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
735 if (!key_checktunnelsanity(sav, AF_INET6,
736 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
737 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
738 "in IPv6 ESP input: %s %s\n",
739 ipsec6_logpacketstr(ip6, spi),
740 ipsec_logsastr(sav)));
741 ipsec6stat.in_inval++;
742 goto bad;
743 }
744
745 key_sa_recordxfer(sav, m);
746 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
747 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
748 ipsec6stat.in_nomem++;
749 goto bad;
750 }
751
752 if (! netisr_queue(NETISR_IPV6, m)) {
753 ipsec6stat.in_inval++;
754 m = NULL;
755 goto bad;
756 }
757 m = NULL;
758 nxt = IPPROTO_DONE;
759 } else {
760 /*
761 * strip off ESP header and IV.
762 * even in m_pulldown case, we need to strip off ESP so that
763 * we can always compute checksum for AH correctly.
764 */
765 size_t stripsiz;
766 char *prvnxtp;
767
768 /*
769 * Set the next header field of the previous header correctly.
770 */
771 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
772 *prvnxtp = nxt;
773
774 stripsiz = esplen + ivlen;
775
776 ip6 = mtod(m, struct ip6_hdr *);
777 if (m->m_len >= stripsiz + off) {
778 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
779 m->m_data += stripsiz;
780 m->m_len -= stripsiz;
781 m->m_pkthdr.len -= stripsiz;
782 } else {
783 /*
784 * this comes with no copy if the boundary is on
785 * cluster
786 */
787 struct mbuf *n;
788
789 n = m_split(m, off, M_DONTWAIT);
790 if (n == NULL) {
791 /* m is retained by m_split */
792 goto bad;
793 }
794 m_adj(n, stripsiz);
795 m_cat(m, n);
796 /* m_cat does not update m_pkthdr.len */
797 m->m_pkthdr.len += n->m_pkthdr.len;
798 }
799
800 #ifndef PULLDOWN_TEST
801 /*
802 * KAME requires that the packet to be contiguous on the
803 * mbuf. We need to make that sure.
804 * this kind of code should be avoided.
805 * XXX other conditions to avoid running this part?
806 */
807 if (m->m_len != m->m_pkthdr.len) {
808 struct mbuf *n = NULL;
809 int maxlen;
810
811 MGETHDR(n, M_DONTWAIT, MT_HEADER);
812 maxlen = MHLEN;
813 if (n)
814 M_MOVE_PKTHDR(n, m);
815 if (n && n->m_pkthdr.len > maxlen) {
816 MCLGET(n, M_DONTWAIT);
817 maxlen = MCLBYTES;
818 if ((n->m_flags & M_EXT) == 0) {
819 m_free(n);
820 n = NULL;
821 }
822 }
823 if (!n) {
824 printf("esp6_input: mbuf allocation failed\n");
825 goto bad;
826 }
827
828 if (n->m_pkthdr.len <= maxlen) {
829 m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
830 n->m_len = n->m_pkthdr.len;
831 n->m_next = NULL;
832 m_freem(m);
833 } else {
834 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
835 n->m_len = maxlen;
836 n->m_next = m;
837 m_adj(m, maxlen);
838 }
839 m = n;
840 }
841 #endif
842
843 ip6 = mtod(m, struct ip6_hdr *);
844 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
845
846 key_sa_recordxfer(sav, m);
847 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
848 ipsec6stat.in_nomem++;
849 goto bad;
850 }
851 }
852
853 *offp = off;
854 *mp = m;
855
856 if (sav) {
857 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
858 printf("DP esp6_input call free SA:%p\n", sav));
859 key_freesav(sav);
860 }
861 ipsec6stat.in_success++;
862 return nxt;
863
864 bad:
865 if (sav) {
866 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
867 printf("DP esp6_input call free SA:%p\n", sav));
868 key_freesav(sav);
869 }
870 if (m)
871 m_freem(m);
872 return IPPROTO_DONE;
873 }
874
875 void
876 esp6_ctlinput(cmd, sa, d)
877 int cmd;
878 struct sockaddr *sa;
879 void *d;
880 {
881 const struct newesp *espp;
882 struct newesp esp;
883 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
884 struct secasvar *sav;
885 struct ip6_hdr *ip6;
886 struct mbuf *m;
887 int off;
888 struct sockaddr_in6 *sa6_src, *sa6_dst;
889
890 if (sa->sa_family != AF_INET6 ||
891 sa->sa_len != sizeof(struct sockaddr_in6))
892 return;
893 if ((unsigned)cmd >= PRC_NCMDS)
894 return;
895
896 /* if the parameter is from icmp6, decode it. */
897 if (d != NULL) {
898 ip6cp = (struct ip6ctlparam *)d;
899 m = ip6cp->ip6c_m;
900 ip6 = ip6cp->ip6c_ip6;
901 off = ip6cp->ip6c_off;
902 } else {
903 m = NULL;
904 ip6 = NULL;
905 off = 0; /* calm gcc */
906 }
907
908 if (ip6) {
909 /*
910 * Notify the error to all possible sockets via pfctlinput2.
911 * Since the upper layer information (such as protocol type,
912 * source and destination ports) is embedded in the encrypted
913 * data and might have been cut, we can't directly call
914 * an upper layer ctlinput function. However, the pcbnotify
915 * function will consider source and destination addresses
916 * as well as the flow info value, and may be able to find
917 * some PCB that should be notified.
918 * Although pfctlinput2 will call esp6_ctlinput(), there is
919 * no possibility of an infinite loop of function calls,
920 * because we don't pass the inner IPv6 header.
921 */
922 bzero(&ip6cp1, sizeof(ip6cp1));
923 ip6cp1.ip6c_src = ip6cp->ip6c_src;
924 pfctlinput2(cmd, sa, (void *)&ip6cp1);
925
926 /*
927 * Then go to special cases that need ESP header information.
928 * XXX: We assume that when ip6 is non NULL,
929 * M and OFF are valid.
930 */
931
932 /* check if we can safely examine src and dst ports */
933 if (m->m_pkthdr.len < off + sizeof(esp))
934 return;
935
936 if (m->m_len < off + sizeof(esp)) {
937 /*
938 * this should be rare case,
939 * so we compromise on this copy...
940 */
941 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
942 espp = &esp;
943 } else
944 espp = (struct newesp*)(mtod(m, caddr_t) + off);
945
946 if (cmd == PRC_MSGSIZE) {
947 int valid = 0;
948
949 /*
950 * Check to see if we have a valid SA corresponding to
951 * the address in the ICMP message payload.
952 */
953 sa6_src = ip6cp->ip6c_src;
954 sa6_dst = (struct sockaddr_in6 *)sa;
955 sav = key_allocsa(AF_INET6,
956 (caddr_t)&sa6_src->sin6_addr,
957 (caddr_t)&sa6_dst->sin6_addr,
958 IPPROTO_ESP, espp->esp_spi);
959 if (sav) {
960 if (sav->state == SADB_SASTATE_MATURE ||
961 sav->state == SADB_SASTATE_DYING)
962 valid++;
963 key_freesav(sav);
964 }
965
966 /* XXX Further validation? */
967
968 /*
969 * Depending on the value of "valid" and routing table
970 * size (mtudisc_{hi,lo}wat), we will:
971 * - recalcurate the new MTU and create the
972 * corresponding routing entry, or
973 * - ignore the MTU change notification.
974 */
975 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
976 }
977 } else {
978 /* we normally notify any pcb here */
979 }
980 }
981 #endif /* INET6 */
Cache object: fa5ac7f2f5ac2874634628dd1d69d3dc
|