1 /* $NetBSD: esp_input.c,v 1.35 2004/02/11 10:47:28 itojun Exp $ */
2 /* $KAME: esp_input.c,v 1.60 2001/09/04 08:43:19 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * RFC1827/2406 Encapsulated Security Payload.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: esp_input.c,v 1.35 2004/02/11 10:47:28 itojun Exp $");
39
40 #include "opt_inet.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/errno.h>
50 #include <sys/time.h>
51 #include <sys/kernel.h>
52 #include <sys/syslog.h>
53
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <net/netisr.h>
57 #include <machine/cpu.h>
58
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip_ecn.h>
65 #include <netinet/ip_icmp.h>
66
67 #ifdef INET6
68 #include <netinet/ip6.h>
69 #include <netinet6/ip6_var.h>
70 #include <netinet/icmp6.h>
71 #include <netinet6/ip6protosw.h>
72 #endif
73
74 #include <netinet6/ipsec.h>
75 #include <netinet6/ah.h>
76 #include <netinet6/esp.h>
77 #include <netkey/key.h>
78 #include <netkey/keydb.h>
79 #include <netkey/key_debug.h>
80
81 #include <machine/stdarg.h>
82
83 #include <net/net_osdep.h>
84
85 /*#define IPLEN_FLIPPED*/
86
87 #define ESPMAXLEN \
88 (sizeof(struct esp) < sizeof(struct newesp) \
89 ? sizeof(struct newesp) : sizeof(struct esp))
90
91 #ifdef INET
92 void
93 #if __STDC__
94 esp4_input(struct mbuf *m, ...)
95 #else
96 esp4_input(m, va_alist)
97 struct mbuf *m;
98 va_dcl
99 #endif
100 {
101 struct ip *ip;
102 struct esp *esp;
103 struct esptail esptail;
104 u_int32_t spi;
105 struct secasvar *sav = NULL;
106 size_t taillen;
107 u_int16_t nxt;
108 const struct esp_algorithm *algo;
109 int ivlen;
110 size_t hlen;
111 size_t esplen;
112 int s;
113 va_list ap;
114 int off;
115
116 va_start(ap, m);
117 off = va_arg(ap, int);
118 (void)va_arg(ap, int); /* ignore value, advance ap */
119 va_end(ap);
120
121 /* sanity check for alignment. */
122 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
123 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
124 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
125 ipsecstat.in_inval++;
126 goto bad;
127 }
128
129 if (m->m_len < off + ESPMAXLEN) {
130 m = m_pullup(m, off + ESPMAXLEN);
131 if (!m) {
132 ipseclog((LOG_DEBUG,
133 "IPv4 ESP input: can't pullup in esp4_input\n"));
134 ipsecstat.in_inval++;
135 goto bad;
136 }
137 }
138
139 ip = mtod(m, struct ip *);
140 esp = (struct esp *)(((u_int8_t *)ip) + off);
141 hlen = ip->ip_hl << 2;
142
143 /* find the sassoc. */
144 spi = esp->esp_spi;
145
146 if ((sav = key_allocsa(AF_INET,
147 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
148 IPPROTO_ESP, spi)) == 0) {
149 ipseclog((LOG_WARNING,
150 "IPv4 ESP input: no key association found for spi %u\n",
151 (u_int32_t)ntohl(spi)));
152 ipsecstat.in_nosa++;
153 goto bad;
154 }
155 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
156 printf("DP esp4_input called to allocate SA:%p\n", sav));
157 if (sav->state != SADB_SASTATE_MATURE &&
158 sav->state != SADB_SASTATE_DYING) {
159 ipseclog((LOG_DEBUG,
160 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
161 (u_int32_t)ntohl(spi)));
162 ipsecstat.in_badspi++;
163 goto bad;
164 }
165 algo = esp_algorithm_lookup(sav->alg_enc);
166 if (!algo) {
167 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
168 "unsupported encryption algorithm for spi %u\n",
169 (u_int32_t)ntohl(spi)));
170 ipsecstat.in_badspi++;
171 goto bad;
172 }
173
174 /* check if we have proper ivlen information */
175 ivlen = sav->ivlen;
176 if (ivlen < 0) {
177 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
178 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
179 ipsecstat.in_inval++;
180 goto bad;
181 }
182
183 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay &&
184 sav->alg_auth && sav->key_auth))
185 goto noreplaycheck;
186
187 if (sav->alg_auth == SADB_X_AALG_NULL ||
188 sav->alg_auth == SADB_AALG_NONE)
189 goto noreplaycheck;
190
191 /*
192 * check for sequence number.
193 */
194 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
195 ; /* okey */
196 else {
197 ipsecstat.in_espreplay++;
198 ipseclog((LOG_WARNING,
199 "replay packet in IPv4 ESP input: %s %s\n",
200 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
201 goto bad;
202 }
203
204 /* check ICV */
205 {
206 u_int8_t sum0[AH_MAXSUMSIZE];
207 u_int8_t sum[AH_MAXSUMSIZE];
208 const struct ah_algorithm *sumalgo;
209 size_t siz;
210
211 sumalgo = ah_algorithm_lookup(sav->alg_auth);
212 if (!sumalgo)
213 goto noreplaycheck;
214 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
215 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
216 ipsecstat.in_inval++;
217 goto bad;
218 }
219 if (AH_MAXSUMSIZE < siz) {
220 ipseclog((LOG_DEBUG,
221 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
222 (u_long)siz));
223 ipsecstat.in_inval++;
224 goto bad;
225 }
226
227 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t)&sum0[0]);
228
229 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
230 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
231 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
232 ipsecstat.in_espauthfail++;
233 goto bad;
234 }
235
236 if (bcmp(sum0, sum, siz) != 0) {
237 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
238 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
239 ipsecstat.in_espauthfail++;
240 goto bad;
241 }
242
243 /* strip off the authentication data */
244 m_adj(m, -siz);
245 ip = mtod(m, struct ip *);
246 #ifdef IPLEN_FLIPPED
247 ip->ip_len = ip->ip_len - siz;
248 #else
249 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
250 #endif
251 m->m_flags |= M_AUTHIPDGM;
252 ipsecstat.in_espauthsucc++;
253 }
254
255 /*
256 * update sequence number.
257 */
258 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
259 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
260 ipsecstat.in_espreplay++;
261 goto bad;
262 }
263 }
264
265 noreplaycheck:
266
267 /* process main esp header. */
268 if (sav->flags & SADB_X_EXT_OLD) {
269 /* RFC 1827 */
270 esplen = sizeof(struct esp);
271 } else {
272 /* RFC 2406 */
273 if (sav->flags & SADB_X_EXT_DERIV)
274 esplen = sizeof(struct esp);
275 else
276 esplen = sizeof(struct newesp);
277 }
278
279 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
280 ipseclog((LOG_WARNING,
281 "IPv4 ESP input: packet too short\n"));
282 ipsecstat.in_inval++;
283 goto bad;
284 }
285
286 if (m->m_len < off + esplen + ivlen) {
287 m = m_pullup(m, off + esplen + ivlen);
288 if (!m) {
289 ipseclog((LOG_DEBUG,
290 "IPv4 ESP input: can't pullup in esp4_input\n"));
291 ipsecstat.in_inval++;
292 goto bad;
293 }
294 }
295
296 /*
297 * pre-compute and cache intermediate key
298 */
299 if (esp_schedule(algo, sav) != 0) {
300 ipsecstat.in_inval++;
301 goto bad;
302 }
303
304 /*
305 * decrypt the packet.
306 */
307 if (!algo->decrypt)
308 panic("internal error: no decrypt function");
309 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
310 /* m is already freed */
311 m = NULL;
312 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
313 ipsec_logsastr(sav)));
314 ipsecstat.in_inval++;
315 goto bad;
316 }
317 ipsecstat.in_esphist[sav->alg_enc]++;
318
319 m->m_flags |= M_DECRYPTED;
320
321 /*
322 * find the trailer of the ESP.
323 */
324 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
325 (caddr_t)&esptail);
326 nxt = esptail.esp_nxt;
327 taillen = esptail.esp_padlen + sizeof(esptail);
328
329 if (m->m_pkthdr.len < taillen ||
330 m->m_pkthdr.len - taillen < off + esplen + ivlen + sizeof(esptail)) {
331 ipseclog((LOG_WARNING,
332 "bad pad length in IPv4 ESP input: %s %s\n",
333 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
334 ipsecstat.in_inval++;
335 goto bad;
336 }
337
338 /* strip off the trailing pad area. */
339 m_adj(m, -taillen);
340
341 #ifdef IPLEN_FLIPPED
342 ip->ip_len = ip->ip_len - taillen;
343 #else
344 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
345 #endif
346
347 /* was it transmitted over the IPsec tunnel SA? */
348 if (ipsec4_tunnel_validate(ip, nxt, sav)) {
349 /*
350 * strip off all the headers that precedes ESP header.
351 * IP4 xx ESP IP4' payload -> IP4' payload
352 *
353 * XXX more sanity checks
354 * XXX relationship with gif?
355 */
356 u_int8_t tos;
357
358 tos = ip->ip_tos;
359 m_adj(m, off + esplen + ivlen);
360 if (m->m_len < sizeof(*ip)) {
361 m = m_pullup(m, sizeof(*ip));
362 if (!m) {
363 ipsecstat.in_inval++;
364 goto bad;
365 }
366 }
367 ip = mtod(m, struct ip *);
368 /* ECN consideration. */
369 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
370 if (!key_checktunnelsanity(sav, AF_INET,
371 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
372 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
373 "in IPv4 ESP input: %s %s\n",
374 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
375 ipsecstat.in_inval++;
376 goto bad;
377 }
378
379 key_sa_recordxfer(sav, m);
380 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
381 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
382 ipsecstat.in_nomem++;
383 goto bad;
384 }
385
386 s = splnet();
387 if (IF_QFULL(&ipintrq)) {
388 ipsecstat.in_inval++;
389 splx(s);
390 goto bad;
391 }
392 IF_ENQUEUE(&ipintrq, m);
393 m = NULL;
394 schednetisr(NETISR_IP); /* can be skipped but to make sure */
395 splx(s);
396 nxt = IPPROTO_DONE;
397 } else {
398 /*
399 * strip off ESP header and IV.
400 * even in m_pulldown case, we need to strip off ESP so that
401 * we can always compute checksum for AH correctly.
402 */
403 size_t stripsiz;
404
405 stripsiz = esplen + ivlen;
406
407 ip = mtod(m, struct ip *);
408 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
409 m->m_data += stripsiz;
410 m->m_len -= stripsiz;
411 m->m_pkthdr.len -= stripsiz;
412
413 ip = mtod(m, struct ip *);
414 #ifdef IPLEN_FLIPPED
415 ip->ip_len = ip->ip_len - stripsiz;
416 #else
417 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
418 #endif
419 ip->ip_p = nxt;
420
421 key_sa_recordxfer(sav, m);
422 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
423 ipsecstat.in_nomem++;
424 goto bad;
425 }
426
427 if (nxt != IPPROTO_DONE) {
428 if ((inetsw[ip_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
429 ipsec4_in_reject(m, NULL)) {
430 ipsecstat.in_polvio++;
431 goto bad;
432 }
433 (*inetsw[ip_protox[nxt]].pr_input)(m, off, nxt);
434 } else
435 m_freem(m);
436 m = NULL;
437 }
438
439 if (sav) {
440 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
441 printf("DP esp4_input call free SA:%p\n", sav));
442 key_freesav(sav);
443 }
444 ipsecstat.in_success++;
445 return;
446
447 bad:
448 if (sav) {
449 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
450 printf("DP esp4_input call free SA:%p\n", sav));
451 key_freesav(sav);
452 }
453 if (m)
454 m_freem(m);
455 return;
456 }
457
458 /* assumes that ip header and esp header are contiguous on mbuf */
459 void *
460 esp4_ctlinput(cmd, sa, v)
461 int cmd;
462 struct sockaddr *sa;
463 void *v;
464 {
465 struct ip *ip = v;
466 struct esp *esp;
467 struct icmp *icp;
468 struct secasvar *sav;
469
470 if (sa->sa_family != AF_INET ||
471 sa->sa_len != sizeof(struct sockaddr_in))
472 return NULL;
473 if ((unsigned)cmd >= PRC_NCMDS)
474 return NULL;
475 if (cmd == PRC_MSGSIZE && ip_mtudisc && ip && ip->ip_v == 4) {
476 /*
477 * Check to see if we have a valid SA corresponding to
478 * the address in the ICMP message payload.
479 */
480 esp = (struct esp *)((caddr_t)ip + (ip->ip_hl << 2));
481 if ((sav = key_allocsa(AF_INET,
482 (caddr_t) &ip->ip_src,
483 (caddr_t) &ip->ip_dst,
484 IPPROTO_ESP, esp->esp_spi)) == NULL)
485 return NULL;
486 if (sav->state != SADB_SASTATE_MATURE &&
487 sav->state != SADB_SASTATE_DYING) {
488 key_freesav(sav);
489 return NULL;
490 }
491
492 /* XXX Further validation? */
493
494 key_freesav(sav);
495
496 /*
497 * Now that we've validated that we are actually communicating
498 * with the host indicated in the ICMP message, locate the
499 * ICMP header, recalculate the new MTU, and create the
500 * corresponding routing entry.
501 */
502 icp = (struct icmp *)((caddr_t)ip -
503 offsetof(struct icmp, icmp_ip));
504 icmp_mtudisc(icp, ip->ip_dst);
505
506 return NULL;
507 }
508
509 return NULL;
510 }
511
512 #endif /* INET */
513
514 #ifdef INET6
515 int
516 esp6_input(mp, offp, proto)
517 struct mbuf **mp;
518 int *offp, proto;
519 {
520 struct mbuf *m = *mp;
521 int off = *offp;
522 struct ip6_hdr *ip6;
523 struct esp *esp;
524 struct esptail esptail;
525 u_int32_t spi;
526 struct secasvar *sav = NULL;
527 size_t taillen;
528 u_int16_t nxt;
529 const struct esp_algorithm *algo;
530 int ivlen;
531 size_t esplen;
532 int s;
533
534 /* sanity check for alignment. */
535 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
536 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
537 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
538 ipsec6stat.in_inval++;
539 goto bad;
540 }
541
542 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
543 if (esp == NULL) {
544 ipsec6stat.in_inval++;
545 return IPPROTO_DONE;
546 }
547 ip6 = mtod(m, struct ip6_hdr *);
548
549 if (ntohs(ip6->ip6_plen) == 0) {
550 ipseclog((LOG_ERR, "IPv6 ESP input: "
551 "ESP with IPv6 jumbogram is not supported.\n"));
552 ipsec6stat.in_inval++;
553 goto bad;
554 }
555
556 /* find the sassoc. */
557 spi = esp->esp_spi;
558
559 if ((sav = key_allocsa(AF_INET6,
560 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
561 IPPROTO_ESP, spi)) == 0) {
562 ipseclog((LOG_WARNING,
563 "IPv6 ESP input: no key association found for spi %u\n",
564 (u_int32_t)ntohl(spi)));
565 ipsec6stat.in_nosa++;
566 goto bad;
567 }
568 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
569 printf("DP esp6_input called to allocate SA:%p\n", sav));
570 if (sav->state != SADB_SASTATE_MATURE &&
571 sav->state != SADB_SASTATE_DYING) {
572 ipseclog((LOG_DEBUG,
573 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
574 (u_int32_t)ntohl(spi)));
575 ipsec6stat.in_badspi++;
576 goto bad;
577 }
578 algo = esp_algorithm_lookup(sav->alg_enc);
579 if (!algo) {
580 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
581 "unsupported encryption algorithm for spi %u\n",
582 (u_int32_t)ntohl(spi)));
583 ipsec6stat.in_badspi++;
584 goto bad;
585 }
586
587 /* check if we have proper ivlen information */
588 ivlen = sav->ivlen;
589 if (ivlen < 0) {
590 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
591 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
592 ipsec6stat.in_badspi++;
593 goto bad;
594 }
595
596 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay &&
597 sav->alg_auth && sav->key_auth))
598 goto noreplaycheck;
599
600 if (sav->alg_auth == SADB_X_AALG_NULL ||
601 sav->alg_auth == SADB_AALG_NONE)
602 goto noreplaycheck;
603
604 /*
605 * check for sequence number.
606 */
607 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
608 ; /* okey */
609 else {
610 ipsec6stat.in_espreplay++;
611 ipseclog((LOG_WARNING,
612 "replay packet in IPv6 ESP input: %s %s\n",
613 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
614 goto bad;
615 }
616
617 /* check ICV */
618 {
619 u_char sum0[AH_MAXSUMSIZE];
620 u_char sum[AH_MAXSUMSIZE];
621 const struct ah_algorithm *sumalgo;
622 size_t siz;
623
624 sumalgo = ah_algorithm_lookup(sav->alg_auth);
625 if (!sumalgo)
626 goto noreplaycheck;
627 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
628 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
629 ipsec6stat.in_inval++;
630 goto bad;
631 }
632 if (AH_MAXSUMSIZE < siz) {
633 ipseclog((LOG_DEBUG,
634 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
635 (u_long)siz));
636 ipsec6stat.in_inval++;
637 goto bad;
638 }
639
640 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t)&sum0[0]);
641
642 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
643 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
644 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
645 ipsec6stat.in_espauthfail++;
646 goto bad;
647 }
648
649 if (bcmp(sum0, sum, siz) != 0) {
650 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
651 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
652 ipsec6stat.in_espauthfail++;
653 goto bad;
654 }
655
656 /* strip off the authentication data */
657 m_adj(m, -siz);
658 ip6 = mtod(m, struct ip6_hdr *);
659 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
660
661 m->m_flags |= M_AUTHIPDGM;
662 ipsec6stat.in_espauthsucc++;
663 }
664
665 /*
666 * update sequence number.
667 */
668 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
669 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
670 ipsec6stat.in_espreplay++;
671 goto bad;
672 }
673 }
674
675 noreplaycheck:
676
677 /* process main esp header. */
678 if (sav->flags & SADB_X_EXT_OLD) {
679 /* RFC 1827 */
680 esplen = sizeof(struct esp);
681 } else {
682 /* RFC 2406 */
683 if (sav->flags & SADB_X_EXT_DERIV)
684 esplen = sizeof(struct esp);
685 else
686 esplen = sizeof(struct newesp);
687 }
688
689 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
690 ipseclog((LOG_WARNING,
691 "IPv6 ESP input: packet too short\n"));
692 ipsec6stat.in_inval++;
693 goto bad;
694 }
695
696 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
697 if (esp == NULL) {
698 ipsec6stat.in_inval++;
699 m = NULL;
700 goto bad;
701 }
702 ip6 = mtod(m, struct ip6_hdr *); /* set it again just in case */
703
704 /*
705 * pre-compute and cache intermediate key
706 */
707 if (esp_schedule(algo, sav) != 0) {
708 ipsec6stat.in_inval++;
709 goto bad;
710 }
711
712 /*
713 * decrypt the packet.
714 */
715 if (!algo->decrypt)
716 panic("internal error: no decrypt function");
717 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
718 /* m is already freed */
719 m = NULL;
720 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
721 ipsec_logsastr(sav)));
722 ipsec6stat.in_inval++;
723 goto bad;
724 }
725 ipsec6stat.in_esphist[sav->alg_enc]++;
726
727 m->m_flags |= M_DECRYPTED;
728
729 /*
730 * find the trailer of the ESP.
731 */
732 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
733 (caddr_t)&esptail);
734 nxt = esptail.esp_nxt;
735 taillen = esptail.esp_padlen + sizeof(esptail);
736
737 if (m->m_pkthdr.len < taillen
738 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /* ? */
739 ipseclog((LOG_WARNING,
740 "bad pad length in IPv6 ESP input: %s %s\n",
741 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
742 ipsec6stat.in_inval++;
743 goto bad;
744 }
745
746 /* strip off the trailing pad area. */
747 m_adj(m, -taillen);
748
749 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
750
751 /* was it transmitted over the IPsec tunnel SA? */
752 if (ipsec6_tunnel_validate(ip6, nxt, sav)) {
753 /*
754 * strip off all the headers that precedes ESP header.
755 * IP6 xx ESP IP6' payload -> IP6' payload
756 *
757 * XXX more sanity checks
758 * XXX relationship with gif?
759 */
760 u_int32_t flowinfo; /* net endian */
761 flowinfo = ip6->ip6_flow;
762 m_adj(m, off + esplen + ivlen);
763 if (m->m_len < sizeof(*ip6)) {
764 m = m_pullup(m, sizeof(*ip6));
765 if (!m) {
766 ipsec6stat.in_inval++;
767 goto bad;
768 }
769 }
770 ip6 = mtod(m, struct ip6_hdr *);
771 /* ECN consideration. */
772 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
773 if (!key_checktunnelsanity(sav, AF_INET6,
774 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
775 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
776 "in IPv6 ESP input: %s %s\n",
777 ipsec6_logpacketstr(ip6, spi),
778 ipsec_logsastr(sav)));
779 ipsec6stat.in_inval++;
780 goto bad;
781 }
782
783 key_sa_recordxfer(sav, m);
784 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
785 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
786 ipsec6stat.in_nomem++;
787 goto bad;
788 }
789
790 s = splnet();
791 if (IF_QFULL(&ip6intrq)) {
792 ipsec6stat.in_inval++;
793 splx(s);
794 goto bad;
795 }
796 IF_ENQUEUE(&ip6intrq, m);
797 m = NULL;
798 schednetisr(NETISR_IPV6); /* can be skipped but to make sure */
799 splx(s);
800 nxt = IPPROTO_DONE;
801 } else {
802 /*
803 * strip off ESP header and IV.
804 * even in m_pulldown case, we need to strip off ESP so that
805 * we can always compute checksum for AH correctly.
806 */
807 size_t stripsiz;
808 u_int8_t *prvnxtp;
809
810 /*
811 * Set the next header field of the previous header correctly.
812 */
813 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
814 *prvnxtp = nxt;
815
816 stripsiz = esplen + ivlen;
817
818 ip6 = mtod(m, struct ip6_hdr *);
819 if (m->m_len >= stripsiz + off) {
820 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
821 m->m_data += stripsiz;
822 m->m_len -= stripsiz;
823 m->m_pkthdr.len -= stripsiz;
824 } else {
825 /*
826 * this comes with no copy if the boundary is on
827 * cluster
828 */
829 struct mbuf *n;
830
831 n = m_split(m, off, M_DONTWAIT);
832 if (n == NULL) {
833 /* m is retained by m_split */
834 goto bad;
835 }
836 m_adj(n, stripsiz);
837 /* m_cat does not update m_pkthdr.len */
838 m->m_pkthdr.len += n->m_pkthdr.len;
839 m_cat(m, n);
840 }
841
842 ip6 = mtod(m, struct ip6_hdr *);
843 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
844
845 key_sa_recordxfer(sav, m);
846 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
847 ipsec6stat.in_nomem++;
848 goto bad;
849 }
850 }
851
852 *offp = off;
853 *mp = m;
854
855 if (sav) {
856 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
857 printf("DP esp6_input call free SA:%p\n", sav));
858 key_freesav(sav);
859 }
860 ipsec6stat.in_success++;
861 return nxt;
862
863 bad:
864 if (sav) {
865 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
866 printf("DP esp6_input call free SA:%p\n", sav));
867 key_freesav(sav);
868 }
869 if (m)
870 m_freem(m);
871 return IPPROTO_DONE;
872 }
873
874 void
875 esp6_ctlinput(cmd, sa, d)
876 int cmd;
877 struct sockaddr *sa;
878 void *d;
879 {
880 const struct newesp *espp;
881 struct newesp esp;
882 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
883 struct secasvar *sav;
884 struct ip6_hdr *ip6;
885 struct mbuf *m;
886 int off;
887 struct sockaddr_in6 *sa6_src, *sa6_dst;
888
889 if (sa->sa_family != AF_INET6 ||
890 sa->sa_len != sizeof(struct sockaddr_in6))
891 return;
892 if ((unsigned)cmd >= PRC_NCMDS)
893 return;
894
895 /* if the parameter is from icmp6, decode it. */
896 if (d != NULL) {
897 ip6cp = (struct ip6ctlparam *)d;
898 m = ip6cp->ip6c_m;
899 ip6 = ip6cp->ip6c_ip6;
900 off = ip6cp->ip6c_off;
901 } else {
902 m = NULL;
903 ip6 = NULL;
904 off = 0;
905 }
906
907 if (ip6) {
908 /*
909 * Notify the error to all possible sockets via pfctlinput2.
910 * Since the upper layer information (such as protocol type,
911 * source and destination ports) is embedded in the encrypted
912 * data and might have been cut, we can't directly call
913 * an upper layer ctlinput function. However, the pcbnotify
914 * function will consider source and destination addresses
915 * as well as the flow info value, and may be able to find
916 * some PCB that should be notified.
917 * Although pfctlinput2 will call esp6_ctlinput(), there is
918 * no possibility of an infinite loop of function calls,
919 * because we don't pass the inner IPv6 header.
920 */
921 bzero(&ip6cp1, sizeof(ip6cp1));
922 ip6cp1.ip6c_src = ip6cp->ip6c_src;
923 pfctlinput2(cmd, sa, (void *)&ip6cp1);
924
925 /*
926 * Then go to special cases that need ESP header information.
927 * XXX: We assume that when ip6 is non NULL,
928 * M and OFF are valid.
929 */
930
931 /* check if we can safely examine src and dst ports */
932 if (m->m_pkthdr.len < off + sizeof(esp))
933 return;
934
935 if (m->m_len < off + sizeof(esp)) {
936 /*
937 * this should be rare case,
938 * so we compromise on this copy...
939 */
940 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
941 espp = &esp;
942 } else
943 espp = (struct newesp*)(mtod(m, caddr_t) + off);
944
945 if (cmd == PRC_MSGSIZE) {
946 int valid = 0;
947
948 /*
949 * Check to see if we have a valid SA corresponding to
950 * the address in the ICMP message payload.
951 */
952 sa6_src = ip6cp->ip6c_src;
953 sa6_dst = (struct sockaddr_in6 *)sa;
954 sav = key_allocsa(AF_INET6,
955 (caddr_t)&sa6_src->sin6_addr,
956 (caddr_t)&sa6_dst->sin6_addr,
957 IPPROTO_ESP, espp->esp_spi);
958 if (sav) {
959 if (sav->state == SADB_SASTATE_MATURE ||
960 sav->state == SADB_SASTATE_DYING)
961 valid++;
962 key_freesav(sav);
963 }
964
965 /* XXX Further validation? */
966
967 /*
968 * Depending on the value of "valid" and routing table
969 * size (mtudisc_{hi,lo}wat), we will:
970 * - recalcurate the new MTU and create the
971 * corresponding routing entry, or
972 * - ignore the MTU change notification.
973 */
974 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
975 }
976 } else {
977 /* we normally notify any pcb here */
978 }
979 }
980 #endif /* INET6 */
Cache object: d6b6b6abb20f277f6b148b9571549766
|