1 /* $NetBSD: esp_input.c,v 1.35.14.2 2005/07/18 21:06:45 riz Exp $ */
2 /* $KAME: esp_input.c,v 1.60 2001/09/04 08:43:19 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * RFC1827/2406 Encapsulated Security Payload.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: esp_input.c,v 1.35.14.2 2005/07/18 21:06:45 riz Exp $");
39
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/errno.h>
51 #include <sys/time.h>
52 #include <sys/kernel.h>
53 #include <sys/syslog.h>
54
55 #include <net/if.h>
56 #include <net/route.h>
57 #include <net/netisr.h>
58 #include <machine/cpu.h>
59
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/ip_var.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip_ecn.h>
66 #include <netinet/ip_icmp.h>
67
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #include <netinet6/ip6_var.h>
71 #include <netinet/icmp6.h>
72 #include <netinet6/ip6protosw.h>
73 #endif
74
75 #include <netinet6/ipsec.h>
76 #include <netinet6/ah.h>
77 #include <netinet6/esp.h>
78 #include <netkey/key.h>
79 #include <netkey/keydb.h>
80 #include <netkey/key_debug.h>
81
82 #include <machine/stdarg.h>
83
84 #include <net/net_osdep.h>
85
86 /*#define IPLEN_FLIPPED*/
87
88 #define ESPMAXLEN \
89 (sizeof(struct esp) < sizeof(struct newesp) \
90 ? sizeof(struct newesp) : sizeof(struct esp))
91
92 #ifdef INET
93 void
94 #if __STDC__
95 esp4_input(struct mbuf *m, ...)
96 #else
97 esp4_input(m, va_alist)
98 struct mbuf *m;
99 va_dcl
100 #endif
101 {
102 struct ip *ip;
103 struct esp *esp;
104 struct esptail esptail;
105 u_int32_t spi;
106 struct secasvar *sav = NULL;
107 size_t taillen;
108 u_int16_t nxt;
109 const struct esp_algorithm *algo;
110 int ivlen;
111 size_t hlen;
112 size_t esplen;
113 int s;
114 va_list ap;
115 int off;
116 u_int16_t sport = 0;
117 u_int16_t dport = 0;
118 #ifdef IPSEC_NAT_T
119 struct m_tag *tag = NULL;
120 #endif
121
122 va_start(ap, m);
123 off = va_arg(ap, int);
124 (void)va_arg(ap, int); /* ignore value, advance ap */
125 va_end(ap);
126
127 /* sanity check for alignment. */
128 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
129 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
130 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
131 ipsecstat.in_inval++;
132 goto bad;
133 }
134
135 if (m->m_len < off + ESPMAXLEN) {
136 m = m_pullup(m, off + ESPMAXLEN);
137 if (!m) {
138 ipseclog((LOG_DEBUG,
139 "IPv4 ESP input: can't pullup in esp4_input\n"));
140 ipsecstat.in_inval++;
141 goto bad;
142 }
143 }
144
145 #ifdef IPSEC_NAT_T
146 /* find the source port for NAT_T */
147 if ((tag = m_tag_find(m, PACKET_TAG_IPSEC_NAT_T_PORTS, NULL)) != NULL) {
148 sport = ((u_int16_t *)(tag + 1))[0];
149 dport = ((u_int16_t *)(tag + 1))[1];
150 }
151 #endif
152
153 ip = mtod(m, struct ip *);
154 esp = (struct esp *)(((u_int8_t *)ip) + off);
155 hlen = ip->ip_hl << 2;
156
157 /* find the sassoc. */
158 spi = esp->esp_spi;
159
160 if ((sav = key_allocsa(AF_INET,
161 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
162 IPPROTO_ESP, spi, sport, dport)) == 0) {
163 ipseclog((LOG_WARNING,
164 "IPv4 ESP input: no key association found for spi %u\n",
165 (u_int32_t)ntohl(spi)));
166 ipsecstat.in_nosa++;
167 goto bad;
168 }
169 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
170 printf("DP esp4_input called to allocate SA:%p\n", sav));
171 if (sav->state != SADB_SASTATE_MATURE &&
172 sav->state != SADB_SASTATE_DYING) {
173 ipseclog((LOG_DEBUG,
174 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
175 (u_int32_t)ntohl(spi)));
176 ipsecstat.in_badspi++;
177 goto bad;
178 }
179 algo = esp_algorithm_lookup(sav->alg_enc);
180 if (!algo) {
181 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
182 "unsupported encryption algorithm for spi %u\n",
183 (u_int32_t)ntohl(spi)));
184 ipsecstat.in_badspi++;
185 goto bad;
186 }
187
188 /* check if we have proper ivlen information */
189 ivlen = sav->ivlen;
190 if (ivlen < 0) {
191 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
192 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
193 ipsecstat.in_inval++;
194 goto bad;
195 }
196
197 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay &&
198 sav->alg_auth && sav->key_auth))
199 goto noreplaycheck;
200
201 if (sav->alg_auth == SADB_X_AALG_NULL ||
202 sav->alg_auth == SADB_AALG_NONE)
203 goto noreplaycheck;
204
205 /*
206 * check for sequence number.
207 */
208 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
209 ; /* okey */
210 else {
211 ipsecstat.in_espreplay++;
212 ipseclog((LOG_WARNING,
213 "replay packet in IPv4 ESP input: %s %s\n",
214 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
215 goto bad;
216 }
217
218 /* check ICV */
219 {
220 u_int8_t sum0[AH_MAXSUMSIZE];
221 u_int8_t sum[AH_MAXSUMSIZE];
222 const struct ah_algorithm *sumalgo;
223 size_t siz;
224
225 sumalgo = ah_algorithm_lookup(sav->alg_auth);
226 if (!sumalgo)
227 goto noreplaycheck;
228 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
229 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
230 ipsecstat.in_inval++;
231 goto bad;
232 }
233 if (AH_MAXSUMSIZE < siz) {
234 ipseclog((LOG_DEBUG,
235 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
236 (u_long)siz));
237 ipsecstat.in_inval++;
238 goto bad;
239 }
240
241 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t)&sum0[0]);
242
243 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
244 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
245 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
246 ipsecstat.in_espauthfail++;
247 goto bad;
248 }
249
250 if (bcmp(sum0, sum, siz) != 0) {
251 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
252 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
253 ipsecstat.in_espauthfail++;
254 goto bad;
255 }
256
257 /* strip off the authentication data */
258 m_adj(m, -siz);
259 ip = mtod(m, struct ip *);
260 #ifdef IPLEN_FLIPPED
261 ip->ip_len = ip->ip_len - siz;
262 #else
263 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
264 #endif
265 m->m_flags |= M_AUTHIPDGM;
266 ipsecstat.in_espauthsucc++;
267 }
268
269 /*
270 * update sequence number.
271 */
272 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
273 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
274 ipsecstat.in_espreplay++;
275 goto bad;
276 }
277 }
278
279 noreplaycheck:
280
281 /* process main esp header. */
282 if (sav->flags & SADB_X_EXT_OLD) {
283 /* RFC 1827 */
284 esplen = sizeof(struct esp);
285 } else {
286 /* RFC 2406 */
287 if (sav->flags & SADB_X_EXT_DERIV)
288 esplen = sizeof(struct esp);
289 else
290 esplen = sizeof(struct newesp);
291 }
292
293 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
294 ipseclog((LOG_WARNING,
295 "IPv4 ESP input: packet too short\n"));
296 ipsecstat.in_inval++;
297 goto bad;
298 }
299
300 if (m->m_len < off + esplen + ivlen) {
301 m = m_pullup(m, off + esplen + ivlen);
302 if (!m) {
303 ipseclog((LOG_DEBUG,
304 "IPv4 ESP input: can't pullup in esp4_input\n"));
305 ipsecstat.in_inval++;
306 goto bad;
307 }
308 }
309
310 /*
311 * pre-compute and cache intermediate key
312 */
313 if (esp_schedule(algo, sav) != 0) {
314 ipsecstat.in_inval++;
315 goto bad;
316 }
317
318 /*
319 * decrypt the packet.
320 */
321 if (!algo->decrypt)
322 panic("internal error: no decrypt function");
323 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
324 /* m is already freed */
325 m = NULL;
326 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
327 ipsec_logsastr(sav)));
328 ipsecstat.in_inval++;
329 goto bad;
330 }
331 ipsecstat.in_esphist[sav->alg_enc]++;
332
333 m->m_flags |= M_DECRYPTED;
334
335 /*
336 * find the trailer of the ESP.
337 */
338 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
339 (caddr_t)&esptail);
340 nxt = esptail.esp_nxt;
341 taillen = esptail.esp_padlen + sizeof(esptail);
342
343 if (m->m_pkthdr.len < taillen ||
344 m->m_pkthdr.len - taillen < off + esplen + ivlen + sizeof(esptail)) {
345 ipseclog((LOG_WARNING,
346 "bad pad length in IPv4 ESP input: %s %s\n",
347 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
348 ipsecstat.in_inval++;
349 goto bad;
350 }
351
352 /* strip off the trailing pad area. */
353 m_adj(m, -taillen);
354
355 #ifdef IPLEN_FLIPPED
356 ip->ip_len = ip->ip_len - taillen;
357 #else
358 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
359 #endif
360
361 /* was it transmitted over the IPsec tunnel SA? */
362 if (ipsec4_tunnel_validate(ip, nxt, sav)) {
363 /*
364 * strip off all the headers that precedes ESP header.
365 * IP4 xx ESP IP4' payload -> IP4' payload
366 *
367 * XXX more sanity checks
368 * XXX relationship with gif?
369 */
370 u_int8_t tos;
371
372 tos = ip->ip_tos;
373 m_adj(m, off + esplen + ivlen);
374 if (m->m_len < sizeof(*ip)) {
375 m = m_pullup(m, sizeof(*ip));
376 if (!m) {
377 ipsecstat.in_inval++;
378 goto bad;
379 }
380 }
381 ip = mtod(m, struct ip *);
382 /* ECN consideration. */
383 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
384 if (!key_checktunnelsanity(sav, AF_INET,
385 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
386 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
387 "in IPv4 ESP input: %s %s\n",
388 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
389 ipsecstat.in_inval++;
390 goto bad;
391 }
392
393 key_sa_recordxfer(sav, m);
394 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
395 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
396 ipsecstat.in_nomem++;
397 goto bad;
398 }
399
400 s = splnet();
401 if (IF_QFULL(&ipintrq)) {
402 ipsecstat.in_inval++;
403 splx(s);
404 goto bad;
405 }
406 IF_ENQUEUE(&ipintrq, m);
407 m = NULL;
408 schednetisr(NETISR_IP); /* can be skipped but to make sure */
409 splx(s);
410 nxt = IPPROTO_DONE;
411 } else {
412 /*
413 * strip off ESP header and IV.
414 * even in m_pulldown case, we need to strip off ESP so that
415 * we can always compute checksum for AH correctly.
416 */
417 size_t stripsiz;
418
419 stripsiz = esplen + ivlen;
420
421 ip = mtod(m, struct ip *);
422 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
423 m->m_data += stripsiz;
424 m->m_len -= stripsiz;
425 m->m_pkthdr.len -= stripsiz;
426
427 ip = mtod(m, struct ip *);
428 #ifdef IPLEN_FLIPPED
429 ip->ip_len = ip->ip_len - stripsiz;
430 #else
431 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
432 #endif
433 ip->ip_p = nxt;
434
435 key_sa_recordxfer(sav, m);
436 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
437 ipsecstat.in_nomem++;
438 goto bad;
439 }
440
441 if (nxt != IPPROTO_DONE) {
442 if ((inetsw[ip_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
443 ipsec4_in_reject(m, NULL)) {
444 ipsecstat.in_polvio++;
445 goto bad;
446 }
447 (*inetsw[ip_protox[nxt]].pr_input)(m, off, nxt);
448 } else
449 m_freem(m);
450 m = NULL;
451 }
452
453 if (sav) {
454 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
455 printf("DP esp4_input call free SA:%p\n", sav));
456 key_freesav(sav);
457 }
458 ipsecstat.in_success++;
459 return;
460
461 bad:
462 if (sav) {
463 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
464 printf("DP esp4_input call free SA:%p\n", sav));
465 key_freesav(sav);
466 }
467 if (m)
468 m_freem(m);
469 return;
470 }
471
472 /* assumes that ip header and esp header are contiguous on mbuf */
473 void *
474 esp4_ctlinput(cmd, sa, v)
475 int cmd;
476 struct sockaddr *sa;
477 void *v;
478 {
479 struct ip *ip = v;
480 struct esp *esp;
481 struct icmp *icp;
482 struct secasvar *sav;
483
484 if (sa->sa_family != AF_INET ||
485 sa->sa_len != sizeof(struct sockaddr_in))
486 return NULL;
487 if ((unsigned)cmd >= PRC_NCMDS)
488 return NULL;
489 if (cmd == PRC_MSGSIZE && ip_mtudisc && ip && ip->ip_v == 4) {
490 /*
491 * Check to see if we have a valid SA corresponding to
492 * the address in the ICMP message payload.
493 */
494 esp = (struct esp *)((caddr_t)ip + (ip->ip_hl << 2));
495 if ((sav = key_allocsa(AF_INET,
496 (caddr_t) &ip->ip_src,
497 (caddr_t) &ip->ip_dst,
498 IPPROTO_ESP, esp->esp_spi,
499 0, 0)) == NULL)
500 return NULL;
501 if (sav->state != SADB_SASTATE_MATURE &&
502 sav->state != SADB_SASTATE_DYING) {
503 key_freesav(sav);
504 return NULL;
505 }
506
507 /* XXX Further validation? */
508
509 key_freesav(sav);
510
511 /*
512 * Now that we've validated that we are actually communicating
513 * with the host indicated in the ICMP message, locate the
514 * ICMP header, recalculate the new MTU, and create the
515 * corresponding routing entry.
516 */
517 icp = (struct icmp *)((caddr_t)ip -
518 offsetof(struct icmp, icmp_ip));
519 icmp_mtudisc(icp, ip->ip_dst);
520
521 return NULL;
522 }
523
524 return NULL;
525 }
526
527 #endif /* INET */
528
529 #ifdef INET6
530 int
531 esp6_input(mp, offp, proto)
532 struct mbuf **mp;
533 int *offp, proto;
534 {
535 struct mbuf *m = *mp;
536 int off = *offp;
537 struct ip6_hdr *ip6;
538 struct esp *esp;
539 struct esptail esptail;
540 u_int32_t spi;
541 struct secasvar *sav = NULL;
542 size_t taillen;
543 u_int16_t nxt;
544 const struct esp_algorithm *algo;
545 int ivlen;
546 size_t esplen;
547 int s;
548
549 /* sanity check for alignment. */
550 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
551 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
552 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
553 ipsec6stat.in_inval++;
554 goto bad;
555 }
556
557 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
558 if (esp == NULL) {
559 ipsec6stat.in_inval++;
560 return IPPROTO_DONE;
561 }
562 ip6 = mtod(m, struct ip6_hdr *);
563
564 if (ntohs(ip6->ip6_plen) == 0) {
565 ipseclog((LOG_ERR, "IPv6 ESP input: "
566 "ESP with IPv6 jumbogram is not supported.\n"));
567 ipsec6stat.in_inval++;
568 goto bad;
569 }
570
571 /* find the sassoc. */
572 spi = esp->esp_spi;
573
574 if ((sav = key_allocsa(AF_INET6,
575 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
576 IPPROTO_ESP, spi, 0, 0)) == 0) {
577 ipseclog((LOG_WARNING,
578 "IPv6 ESP input: no key association found for spi %u\n",
579 (u_int32_t)ntohl(spi)));
580 ipsec6stat.in_nosa++;
581 goto bad;
582 }
583 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
584 printf("DP esp6_input called to allocate SA:%p\n", sav));
585 if (sav->state != SADB_SASTATE_MATURE &&
586 sav->state != SADB_SASTATE_DYING) {
587 ipseclog((LOG_DEBUG,
588 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
589 (u_int32_t)ntohl(spi)));
590 ipsec6stat.in_badspi++;
591 goto bad;
592 }
593 algo = esp_algorithm_lookup(sav->alg_enc);
594 if (!algo) {
595 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
596 "unsupported encryption algorithm for spi %u\n",
597 (u_int32_t)ntohl(spi)));
598 ipsec6stat.in_badspi++;
599 goto bad;
600 }
601
602 /* check if we have proper ivlen information */
603 ivlen = sav->ivlen;
604 if (ivlen < 0) {
605 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
606 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
607 ipsec6stat.in_badspi++;
608 goto bad;
609 }
610
611 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay &&
612 sav->alg_auth && sav->key_auth))
613 goto noreplaycheck;
614
615 if (sav->alg_auth == SADB_X_AALG_NULL ||
616 sav->alg_auth == SADB_AALG_NONE)
617 goto noreplaycheck;
618
619 /*
620 * check for sequence number.
621 */
622 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
623 ; /* okey */
624 else {
625 ipsec6stat.in_espreplay++;
626 ipseclog((LOG_WARNING,
627 "replay packet in IPv6 ESP input: %s %s\n",
628 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
629 goto bad;
630 }
631
632 /* check ICV */
633 {
634 u_char sum0[AH_MAXSUMSIZE];
635 u_char sum[AH_MAXSUMSIZE];
636 const struct ah_algorithm *sumalgo;
637 size_t siz;
638
639 sumalgo = ah_algorithm_lookup(sav->alg_auth);
640 if (!sumalgo)
641 goto noreplaycheck;
642 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
643 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
644 ipsec6stat.in_inval++;
645 goto bad;
646 }
647 if (AH_MAXSUMSIZE < siz) {
648 ipseclog((LOG_DEBUG,
649 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
650 (u_long)siz));
651 ipsec6stat.in_inval++;
652 goto bad;
653 }
654
655 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t)&sum0[0]);
656
657 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
658 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
659 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
660 ipsec6stat.in_espauthfail++;
661 goto bad;
662 }
663
664 if (bcmp(sum0, sum, siz) != 0) {
665 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
666 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
667 ipsec6stat.in_espauthfail++;
668 goto bad;
669 }
670
671 /* strip off the authentication data */
672 m_adj(m, -siz);
673 ip6 = mtod(m, struct ip6_hdr *);
674 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
675
676 m->m_flags |= M_AUTHIPDGM;
677 ipsec6stat.in_espauthsucc++;
678 }
679
680 /*
681 * update sequence number.
682 */
683 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
684 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
685 ipsec6stat.in_espreplay++;
686 goto bad;
687 }
688 }
689
690 noreplaycheck:
691
692 /* process main esp header. */
693 if (sav->flags & SADB_X_EXT_OLD) {
694 /* RFC 1827 */
695 esplen = sizeof(struct esp);
696 } else {
697 /* RFC 2406 */
698 if (sav->flags & SADB_X_EXT_DERIV)
699 esplen = sizeof(struct esp);
700 else
701 esplen = sizeof(struct newesp);
702 }
703
704 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
705 ipseclog((LOG_WARNING,
706 "IPv6 ESP input: packet too short\n"));
707 ipsec6stat.in_inval++;
708 goto bad;
709 }
710
711 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
712 if (esp == NULL) {
713 ipsec6stat.in_inval++;
714 m = NULL;
715 goto bad;
716 }
717 ip6 = mtod(m, struct ip6_hdr *); /* set it again just in case */
718
719 /*
720 * pre-compute and cache intermediate key
721 */
722 if (esp_schedule(algo, sav) != 0) {
723 ipsec6stat.in_inval++;
724 goto bad;
725 }
726
727 /*
728 * decrypt the packet.
729 */
730 if (!algo->decrypt)
731 panic("internal error: no decrypt function");
732 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
733 /* m is already freed */
734 m = NULL;
735 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
736 ipsec_logsastr(sav)));
737 ipsec6stat.in_inval++;
738 goto bad;
739 }
740 ipsec6stat.in_esphist[sav->alg_enc]++;
741
742 m->m_flags |= M_DECRYPTED;
743
744 /*
745 * find the trailer of the ESP.
746 */
747 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
748 (caddr_t)&esptail);
749 nxt = esptail.esp_nxt;
750 taillen = esptail.esp_padlen + sizeof(esptail);
751
752 if (m->m_pkthdr.len < taillen
753 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /* ? */
754 ipseclog((LOG_WARNING,
755 "bad pad length in IPv6 ESP input: %s %s\n",
756 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
757 ipsec6stat.in_inval++;
758 goto bad;
759 }
760
761 /* strip off the trailing pad area. */
762 m_adj(m, -taillen);
763
764 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
765
766 /* was it transmitted over the IPsec tunnel SA? */
767 if (ipsec6_tunnel_validate(ip6, nxt, sav)) {
768 /*
769 * strip off all the headers that precedes ESP header.
770 * IP6 xx ESP IP6' payload -> IP6' payload
771 *
772 * XXX more sanity checks
773 * XXX relationship with gif?
774 */
775 u_int32_t flowinfo; /* net endian */
776 flowinfo = ip6->ip6_flow;
777 m_adj(m, off + esplen + ivlen);
778 if (m->m_len < sizeof(*ip6)) {
779 m = m_pullup(m, sizeof(*ip6));
780 if (!m) {
781 ipsec6stat.in_inval++;
782 goto bad;
783 }
784 }
785 ip6 = mtod(m, struct ip6_hdr *);
786 /* ECN consideration. */
787 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
788 if (!key_checktunnelsanity(sav, AF_INET6,
789 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
790 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
791 "in IPv6 ESP input: %s %s\n",
792 ipsec6_logpacketstr(ip6, spi),
793 ipsec_logsastr(sav)));
794 ipsec6stat.in_inval++;
795 goto bad;
796 }
797
798 key_sa_recordxfer(sav, m);
799 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
800 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
801 ipsec6stat.in_nomem++;
802 goto bad;
803 }
804
805 s = splnet();
806 if (IF_QFULL(&ip6intrq)) {
807 ipsec6stat.in_inval++;
808 splx(s);
809 goto bad;
810 }
811 IF_ENQUEUE(&ip6intrq, m);
812 m = NULL;
813 schednetisr(NETISR_IPV6); /* can be skipped but to make sure */
814 splx(s);
815 nxt = IPPROTO_DONE;
816 } else {
817 /*
818 * strip off ESP header and IV.
819 * even in m_pulldown case, we need to strip off ESP so that
820 * we can always compute checksum for AH correctly.
821 */
822 size_t stripsiz;
823 u_int8_t *prvnxtp;
824
825 /*
826 * Set the next header field of the previous header correctly.
827 */
828 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
829 *prvnxtp = nxt;
830
831 stripsiz = esplen + ivlen;
832
833 ip6 = mtod(m, struct ip6_hdr *);
834 if (m->m_len >= stripsiz + off) {
835 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
836 m->m_data += stripsiz;
837 m->m_len -= stripsiz;
838 m->m_pkthdr.len -= stripsiz;
839 } else {
840 /*
841 * this comes with no copy if the boundary is on
842 * cluster
843 */
844 struct mbuf *n;
845
846 n = m_split(m, off, M_DONTWAIT);
847 if (n == NULL) {
848 /* m is retained by m_split */
849 goto bad;
850 }
851 m_adj(n, stripsiz);
852 /* m_cat does not update m_pkthdr.len */
853 m->m_pkthdr.len += n->m_pkthdr.len;
854 m_cat(m, n);
855 }
856
857 ip6 = mtod(m, struct ip6_hdr *);
858 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
859
860 key_sa_recordxfer(sav, m);
861 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
862 ipsec6stat.in_nomem++;
863 goto bad;
864 }
865 }
866
867 *offp = off;
868 *mp = m;
869
870 if (sav) {
871 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
872 printf("DP esp6_input call free SA:%p\n", sav));
873 key_freesav(sav);
874 }
875 ipsec6stat.in_success++;
876 return nxt;
877
878 bad:
879 if (sav) {
880 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
881 printf("DP esp6_input call free SA:%p\n", sav));
882 key_freesav(sav);
883 }
884 if (m)
885 m_freem(m);
886 return IPPROTO_DONE;
887 }
888
889 void
890 esp6_ctlinput(cmd, sa, d)
891 int cmd;
892 struct sockaddr *sa;
893 void *d;
894 {
895 const struct newesp *espp;
896 struct newesp esp;
897 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
898 struct secasvar *sav;
899 struct ip6_hdr *ip6;
900 struct mbuf *m;
901 int off;
902 struct sockaddr_in6 *sa6_src, *sa6_dst;
903
904 if (sa->sa_family != AF_INET6 ||
905 sa->sa_len != sizeof(struct sockaddr_in6))
906 return;
907 if ((unsigned)cmd >= PRC_NCMDS)
908 return;
909
910 /* if the parameter is from icmp6, decode it. */
911 if (d != NULL) {
912 ip6cp = (struct ip6ctlparam *)d;
913 m = ip6cp->ip6c_m;
914 ip6 = ip6cp->ip6c_ip6;
915 off = ip6cp->ip6c_off;
916 } else {
917 m = NULL;
918 ip6 = NULL;
919 off = 0;
920 }
921
922 if (ip6) {
923 /*
924 * Notify the error to all possible sockets via pfctlinput2.
925 * Since the upper layer information (such as protocol type,
926 * source and destination ports) is embedded in the encrypted
927 * data and might have been cut, we can't directly call
928 * an upper layer ctlinput function. However, the pcbnotify
929 * function will consider source and destination addresses
930 * as well as the flow info value, and may be able to find
931 * some PCB that should be notified.
932 * Although pfctlinput2 will call esp6_ctlinput(), there is
933 * no possibility of an infinite loop of function calls,
934 * because we don't pass the inner IPv6 header.
935 */
936 bzero(&ip6cp1, sizeof(ip6cp1));
937 ip6cp1.ip6c_src = ip6cp->ip6c_src;
938 pfctlinput2(cmd, sa, (void *)&ip6cp1);
939
940 /*
941 * Then go to special cases that need ESP header information.
942 * XXX: We assume that when ip6 is non NULL,
943 * M and OFF are valid.
944 */
945
946 /* check if we can safely examine src and dst ports */
947 if (m->m_pkthdr.len < off + sizeof(esp))
948 return;
949
950 if (m->m_len < off + sizeof(esp)) {
951 /*
952 * this should be rare case,
953 * so we compromise on this copy...
954 */
955 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
956 espp = &esp;
957 } else
958 espp = (struct newesp*)(mtod(m, caddr_t) + off);
959
960 if (cmd == PRC_MSGSIZE) {
961 int valid = 0;
962
963 /*
964 * Check to see if we have a valid SA corresponding to
965 * the address in the ICMP message payload.
966 */
967 sa6_src = ip6cp->ip6c_src;
968 sa6_dst = (struct sockaddr_in6 *)sa;
969 sav = key_allocsa(AF_INET6,
970 (caddr_t)&sa6_src->sin6_addr,
971 (caddr_t)&sa6_dst->sin6_addr,
972 IPPROTO_ESP, espp->esp_spi, 0, 0);
973 if (sav) {
974 if (sav->state == SADB_SASTATE_MATURE ||
975 sav->state == SADB_SASTATE_DYING)
976 valid++;
977 key_freesav(sav);
978 }
979
980 /* XXX Further validation? */
981
982 /*
983 * Depending on the value of "valid" and routing table
984 * size (mtudisc_{hi,lo}wat), we will:
985 * - recalcurate the new MTU and create the
986 * corresponding routing entry, or
987 * - ignore the MTU change notification.
988 */
989 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
990 }
991 } else {
992 /* we normally notify any pcb here */
993 }
994 }
995 #endif /* INET6 */
Cache object: 7a8d9a5693065136e2a87596fc76cdcd
|