1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2017 Jason King.
14 * Copyright 2017 Joyent, Inc.
15 */
16 #include <stddef.h>
17 #include <assert.h>
18 #include <umem.h>
19 #include <string.h>
20 #include <errno.h>
21 #include <sys/types.h>
22 #include <sys/byteorder.h>
23 #include <ipsec_util.h>
24 #include <locale.h>
25 #include <netinet/in.h>
26 #include <security/cryptoki.h>
27 #include <errno.h>
28 #include <sys/socket.h>
29 #include <pthread.h>
30 #include <sys/debug.h>
31 #include <note.h>
32 #include <err.h>
33 #include <limits.h>
34 #include "ikev1.h"
35 #include "ikev2.h"
36 #include "ikev2_sa.h"
37 #include "pkt.h"
38 #include "pkt_impl.h"
39 #include "pkcs11.h"
40
41 static umem_cache_t *pkt_cache;
42
43 static size_t pkt_item_rank(pkt_stack_item_t);
44 static boolean_t pkt_finish(pkt_t *restrict, uchar_t *restrict, uintptr_t,
45 size_t);
46 static int pkt_reset(void *);
47
48 pkt_t *
49 pkt_out_alloc(uint64_t i_spi, uint64_t r_spi, uint8_t version,
50 uint8_t exch_type, uint32_t msgid)
51 {
52 pkt_t *pkt = umem_cache_alloc(pkt_cache, UMEM_DEFAULT);
53
54 if (pkt == NULL)
55 return (NULL);
56
57 pkt->pkt_header.initiator_spi = i_spi;
58 pkt->pkt_header.responder_spi = r_spi;
59 pkt->pkt_header.version = version;
60 pkt->pkt_header.exch_type = exch_type;
61 pkt->pkt_header.msgid = msgid;
62
63 /*
64 * Skip over bytes in pkt->raw for the header -- we keep
65 * pkt->header (the local byte order copy) updated and then
66 * write out the final version (in network byte order) in this
67 * space once we're done building the packet by stacking a
68 * finish callback before anything else.
69 */
70 pkt_stack_push(pkt, PSI_PACKET, pkt_finish, 0);
71 pkt->pkt_ptr += sizeof (ike_header_t);
72 return (pkt);
73 }
74
75 static boolean_t
76 pkt_finish(pkt_t *restrict pkt, uchar_t *restrict ptr, uintptr_t swaparg,
77 size_t numpay)
78 {
79 NOTE(ARGUNUSED(swaparg, numpay))
80
81 ike_header_t *rawhdr;
82
83 rawhdr = (ike_header_t *)ptr;
84 pkt->pkt_header.length = pkt_len(pkt);
85 pkt_hdr_hton(rawhdr, &pkt->pkt_header);
86 return (B_TRUE);
87 }
88
89 struct pkt_count_s {
90 pkt_t *pkt;
91 size_t paycount;
92 size_t ncount;
93 };
94
95 static pkt_walk_ret_t
96 pkt_count_cb(uint8_t paytype, uint8_t resv, uchar_t *restrict ptr, size_t len,
97 void *restrict cookie)
98 {
99 struct pkt_count_s *data = cookie;
100
101 data->paycount++;
102 if (paytype == IKEV1_PAYLOAD_NOTIFY ||
103 paytype == IKEV2_PAYLOAD_NOTIFY)
104 data->ncount++;
105 return (PKT_WALK_OK);
106 }
107
108 boolean_t
109 pkt_count_payloads(uchar_t *restrict buf, size_t buflen, uint8_t first,
110 size_t *paycount, size_t *ncount)
111 {
112 struct pkt_count_s count = { 0 };
113 if (pkt_payload_walk(buf, buflen, pkt_count_cb, first,
114 &count) != PKT_WALK_OK)
115 return (B_FALSE);
116
117 if (paycount != NULL)
118 *paycount = count.paycount;
119 if (ncount != NULL)
120 *ncount = count.ncount;
121 return (B_TRUE);
122 }
123
124 static pkt_walk_ret_t
125 pkt_payload_cb(uint8_t paytype, uint8_t resv, uchar_t *restrict ptr, size_t len,
126 void *restrict cookie)
127 {
128 struct pkt_count_s *data = cookie;
129 pkt_payload_t *payp = NULL;
130
131 payp = pkt_payload(data->pkt, data->paycount++);
132 payp->pp_ptr = ptr;
133 payp->pp_len = len;
134 payp->pp_type = paytype;
135 return (PKT_WALK_OK);
136 }
137
138 boolean_t
139 pkt_index_payloads(pkt_t *pkt, uchar_t *buf, size_t buflen, uint8_t first,
140 size_t start_idx)
141 {
142 struct pkt_count_s data = {
143 .pkt = pkt,
144 .paycount = start_idx
145 };
146
147 ASSERT3P((uchar_t *)&pkt->raw, <, buf);
148 ASSERT3P(pkt->pkt_ptr, >=, buf + buflen);
149
150 if (pkt_payload_walk(buf, buflen, pkt_payload_cb, first,
151 &data) != PKT_WALK_OK)
152 return (B_FALSE);
153 return (B_TRUE);
154 }
155
156 /* make sure pkt can hold paycount payloads indexes and ncount notify indexes */
157 boolean_t
158 pkt_size_index(pkt_t *pkt, size_t paycount, size_t ncount)
159 {
160 size_t amt = 0;
161
162 if (paycount > PKT_PAYLOAD_NUM && paycount != pkt->pkt_payload_count) {
163 pkt_payload_t *payp = NULL;
164
165 amt = paycount - PKT_PAYLOAD_NUM;
166 amt *= sizeof (pkt_payload_t);
167 payp = umem_zalloc(amt, UMEM_DEFAULT);
168 if (payp == NULL)
169 return (B_FALSE);
170
171 if (pkt->pkt_payload_extra != NULL) {
172 /* XXX: ASSERT() / VERIFY() on shrink? */
173 if (paycount > pkt->pkt_payload_count) {
174 amt = pkt->pkt_payload_count - PKT_PAYLOAD_NUM;
175 amt *= sizeof (pkt_payload_t);
176 }
177 (void) memcpy(payp, pkt->pkt_payload_extra, amt);
178
179 amt = pkt->pkt_payload_count - PKT_PAYLOAD_NUM;
180 amt *= sizeof (pkt_payload_t);
181 umem_free(pkt->pkt_payload_extra, amt);
182 }
183 pkt->pkt_payload_extra = payp;
184 pkt->pkt_payload_count = paycount;
185 }
186
187 if (ncount > PKT_NOTIFY_NUM && ncount != pkt->pkt_notify_count) {
188 pkt_notify_t *np;
189
190 amt = ncount - PKT_NOTIFY_NUM;
191 amt *= sizeof (pkt_notify_t);
192 np = umem_zalloc(amt, UMEM_DEFAULT);
193 if (np == NULL)
194 return (B_FALSE);
195
196 if (pkt->pkt_notify_extra != NULL) {
197 /* XXX: ASSERT() / VERIFY() on shrink? */
198 if (ncount > pkt->pkt_notify_count) {
199 amt = pkt->pkt_notify_count - PKT_NOTIFY_NUM;
200 amt *= sizeof (pkt_notify_t);
201 }
202 (void) memcpy(np, pkt->pkt_notify_extra, amt);
203
204 amt = pkt->pkt_notify_count - PKT_NOTIFY_NUM;
205 amt *= sizeof (pkt_notify_t);
206 umem_free(pkt->pkt_notify_extra, amt);
207 }
208 pkt->pkt_notify_extra = np;
209 pkt->pkt_notify_count = ncount;
210 }
211
212 return (B_TRUE);
213 }
214
215 /*
216 * Allocate an pkt_t for an inbound packet, populate the local byte order
217 * header, and cache the location of the payloads in the payload field.
218 */
219 pkt_t *
220 pkt_in_alloc(uchar_t *buf, size_t buflen)
221 {
222 ike_header_t *hdr = (ike_header_t *)buf;
223 pkt_t *pkt = NULL;
224 size_t paycount = 0, ncount = 0;
225 uint8_t first;
226
227 if (buflen > MAX_PACKET_SIZE) {
228 /* XXX: msg */
229 errno = EOVERFLOW;
230 return (NULL);
231 }
232 if (buflen != ntohl(hdr->length)) {
233 /* XXX: msg */
234 return (NULL);
235 }
236
237 first = hdr->next_payload;
238 buf += sizeof (*hdr);
239 buflen -= sizeof (*hdr);
240 if (!pkt_count_payloads(buf, buflen, first, &paycount, &ncount))
241 return (NULL);
242
243 if ((pkt = umem_cache_alloc(pkt_cache, UMEM_DEFAULT)) == NULL) {
244 /* XXX: msg */
245 return (NULL);
246 }
247
248 if (paycount > PKT_PAYLOAD_NUM) {
249 size_t len = paycount - PKT_PAYLOAD_NUM;
250 len *= sizeof (pkt_payload_t);
251 pkt->pkt_payload_extra = umem_zalloc(len, UMEM_DEFAULT);
252 if (pkt->pkt_payload_extra == NULL) {
253 pkt_free(pkt);
254 return (NULL);
255 }
256 }
257
258 if (ncount > PKT_NOTIFY_NUM) {
259 size_t len = ncount - PKT_NOTIFY_NUM;
260 len *= sizeof (pkt_notify_t);
261 pkt->pkt_notify_extra = umem_zalloc(len, UMEM_DEFAULT);
262 if (pkt->pkt_notify_extra == NULL) {
263 pkt_free(pkt);
264 return (NULL);
265 }
266 }
267
268 (void) memcpy(&pkt->pkt_raw, buf, buflen);
269 pkt->pkt_payload_count = paycount;
270 pkt->pkt_notify_count = ncount;
271 pkt_hdr_ntoh(&pkt->pkt_header, (const ike_header_t *)&pkt->pkt_raw);
272 pkt->pkt_ptr += buflen;
273
274 VERIFY(pkt_index_payloads(pkt, pkt_start(pkt), pkt_len(pkt), first, 0));
275 return (pkt);
276 }
277
278 static boolean_t
279 payload_finish(pkt_t *restrict pkt, uchar_t *restrict ptr, uintptr_t arg,
280 size_t numsub)
281 {
282 NOTE(ARGUNUSED(numsub))
283 ike_payload_t pay = { 0 };
284 size_t len = (size_t)(pkt->pkt_ptr - ptr);
285
286 ASSERT3P(pkt->pkt_ptr, >, ptr);
287 ASSERT3U(len, <, MAX_PACKET_SIZE);
288
289 (void) memcpy(&pay, ptr, sizeof (pay));
290 pay.pay_next = (uint8_t)arg;
291 pay.pay_length = htons((uint16_t)len);
292 (void) memcpy(ptr, &pay, sizeof (pay));
293 return (B_TRUE);
294 }
295
296 boolean_t
297 pkt_add_payload(pkt_t *pkt, uint8_t ptype, uint8_t resv)
298 {
299 ike_payload_t pay = { 0 };
300
301 if (pkt_write_left(pkt) < sizeof (pay))
302 return (B_FALSE);
303
304 /* Special case for first payload */
305 if (pkt->pkt_ptr - (uchar_t *)&pkt->pkt_raw == sizeof (ike_header_t))
306 pkt->pkt_header.next_payload = (uint8_t)ptype;
307
308 /*
309 * Otherwise we'll set it when we replace the current top of
310 * the stack
311 */
312 pkt_stack_item_t type =
313 (ptype == IKEV2_PAYLOAD_SA) ? PSI_SA : PSI_PAYLOAD;
314 pkt_stack_push(pkt, type, payload_finish, (uintptr_t)ptype);
315 pay.pay_next = IKE_PAYLOAD_NONE;
316 pay.pay_reserved = resv;
317 PKT_APPEND_STRUCT(pkt, pay);
318 return (B_TRUE);
319 }
320
321 static boolean_t prop_finish(pkt_t *restrict, uchar_t *restrict, uintptr_t,
322 size_t);
323
324 boolean_t
325 pkt_add_prop(pkt_t *pkt, uint8_t propnum, uint8_t proto, size_t spilen,
326 uint64_t spi)
327 {
328 ike_prop_t prop = { 0 };
329
330 if (pkt_write_left(pkt) < sizeof (prop) + spilen)
331 return (B_FALSE);
332
333 pkt_stack_push(pkt, PSI_PROP, prop_finish, (uintptr_t)IKE_PROP_MORE);
334
335 prop.prop_more = IKE_PROP_NONE;
336 prop.prop_num = propnum;
337 prop.prop_proto = (uint8_t)proto;
338 prop.prop_spilen = spilen;
339 PKT_APPEND_STRUCT(pkt, prop);
340
341 switch (spilen) {
342 case sizeof (uint32_t):
343 ASSERT3U(spi, <, UINT_MAX);
344 put32(pkt, (uint32_t)spi);
345 break;
346 case sizeof (uint64_t):
347 put64(pkt, spi);
348 break;
349 case 0:
350 break;
351 default:
352 INVALID(spilen);
353 }
354
355 return (B_TRUE);
356 }
357
358 static boolean_t
359 prop_finish(pkt_t *restrict pkt, uchar_t *restrict ptr, uintptr_t more,
360 size_t numxform)
361 {
362 ike_prop_t prop = { 0 };
363
364 (void) memcpy(&prop, ptr, sizeof (prop));
365 prop.prop_more = (uint8_t)more;
366 prop.prop_len = htons((uint16_t)(pkt->pkt_ptr - ptr));
367 prop.prop_numxform = (uint8_t)numxform;
368 (void) memcpy(ptr, &prop, sizeof (prop));
369 return (B_TRUE);
370 }
371
372 static boolean_t pkt_xform_finish(pkt_t *restrict, uchar_t *restrict, uintptr_t,
373 size_t);
374
375 boolean_t
376 pkt_add_xform(pkt_t *pkt, uint8_t xftype, uint8_t xfid)
377 {
378 ike_xform_t xf = { 0 };
379
380 if (pkt_write_left(pkt) < sizeof (xf))
381 return (B_FALSE);
382
383 pkt_stack_push(pkt, PSI_XFORM, pkt_xform_finish,
384 (uintptr_t)IKE_XFORM_MORE);
385
386 ASSERT3U(xfid, <, USHRT_MAX);
387
388 /* mostly for completeness */
389 xf.xf_more = IKE_XFORM_NONE;
390 xf.xf_type = xftype;
391 xf.xf_id = htons((uint16_t)xfid);
392 PKT_APPEND_STRUCT(pkt, xf);
393 return (B_TRUE);
394 }
395
396 static boolean_t
397 pkt_xform_finish(pkt_t *restrict pkt, uchar_t *restrict ptr, uintptr_t more,
398 size_t numattr)
399 {
400 ike_xform_t xf = { 0 };
401
402 (void) memcpy(&xf, ptr, sizeof (xf));
403 xf.xf_more = more;
404 xf.xf_len = htons((uint16_t)(pkt->pkt_ptr - ptr));
405 (void) memcpy(ptr, &xf, sizeof (xf));
406 return (B_TRUE);
407 }
408
409 boolean_t
410 pkt_add_xform_attr_tv(pkt_t *pkt, uint16_t type, uint16_t val)
411 {
412 ike_xf_attr_t attr = { 0 };
413
414 ASSERT3U(type, <, 0x8000);
415 ASSERT3U(val, <, 0x10000);
416
417 if (pkt_write_left(pkt) < sizeof (attr))
418 return (B_FALSE);
419
420 pkt_stack_push(pkt, PSI_XFORM_ATTR, NULL, 0);
421 attr.attr_type = htons(IKE_ATTR_TYPE(IKE_ATTR_TV, type));
422 attr.attr_len = htons(val);
423 PKT_APPEND_STRUCT(pkt, attr);
424 return (B_TRUE);
425 }
426
427 boolean_t
428 pkt_add_xform_attr_tlv(pkt_t *pkt, uint16_t type, const uchar_t *attrp,
429 size_t attrlen)
430 {
431 ike_xf_attr_t attr = { 0 };
432
433 ASSERT3U(type, <, 0x8000);
434 ASSERT3U(attrlen, <, 0x10000);
435
436 if (pkt_write_left(pkt) < sizeof (attr) + attrlen)
437 return (B_FALSE);
438
439 pkt_stack_push(pkt, PSI_XFORM_ATTR, NULL, 0);
440 attr.attr_type = htons(IKE_ATTR_TYPE(IKE_ATTR_TLV, type));
441 attr.attr_len = htons(attrlen);
442 PKT_APPEND_STRUCT(pkt, attr);
443 (void) memcpy(pkt->pkt_ptr, attrp, attrlen);
444 pkt->pkt_ptr += attrlen;
445 return (B_TRUE);
446 }
447
448 boolean_t
449 pkt_add_cert(pkt_t *restrict pkt, uint8_t encoding, const uchar_t *data,
450 size_t datalen)
451 {
452 if (pkt_write_left(pkt) < 1 + datalen)
453 return (B_FALSE);
454
455 *(pkt->pkt_ptr++) = encoding;
456 (void) memcpy(pkt->pkt_ptr, data, datalen);
457 pkt->pkt_ptr += datalen;
458 return (B_TRUE);
459 }
460
461 void
462 pkt_hdr_ntoh(ike_header_t *restrict dest,
463 const ike_header_t *restrict src)
464 {
465 ASSERT(IS_P2ALIGNED(dest, sizeof (uint64_t)));
466 ASSERT(IS_P2ALIGNED(src, sizeof (uint64_t)));
467 ASSERT3P(src, !=, dest);
468
469 dest->initiator_spi = ntohll(src->initiator_spi);
470 dest->responder_spi = ntohll(src->responder_spi);
471 dest->msgid = ntohl(src->msgid);
472 dest->length = ntohl(src->length);
473 dest->next_payload = src->next_payload;
474 dest->exch_type = src->exch_type;
475 dest->flags = src->flags;
476 dest->version = src->version;
477 }
478
479 void
480 pkt_hdr_hton(ike_header_t *restrict dest,
481 const ike_header_t *restrict src)
482 {
483 ASSERT(IS_P2ALIGNED(dest, sizeof (uint64_t)));
484 ASSERT(IS_P2ALIGNED(src, sizeof (uint64_t)));
485 ASSERT3P(src, !=, dest);
486
487 dest->initiator_spi = htonll(src->initiator_spi);
488 dest->responder_spi = htonll(src->responder_spi);
489 dest->msgid = htonl(src->msgid);
490 dest->length = htonl(src->length);
491 dest->next_payload = src->next_payload;
492 dest->exch_type = src->exch_type;
493 dest->flags = src->flags;
494 dest->version = src->version;
495 }
496
497 /*
498 * Move all the payloads over amt to insert a payload at the front of the
499 * packet. Currently used for IKEV2 cookies.
500 */
501 boolean_t
502 pkt_pay_shift(pkt_t *pkt, uint8_t type, size_t num, ssize_t amt)
503 {
504 uchar_t *start = pkt_start(pkt) + sizeof (ike_header_t);
505 pkt_payload_t *pay = NULL;
506
507 if (pkt_write_left(pkt) < amt)
508 return (B_FALSE);
509 if (!pkt_size_index(pkt, pkt->pkt_payload_count + num,
510 pkt->pkt_notify_count))
511 return (B_FALSE);
512
513 (void) memmove(start + amt, start, (size_t)(pkt->pkt_ptr - start));
514 pkt->pkt_ptr += amt;
515
516 for (uint16_t i = 0; i < pkt->pkt_payload_count; i++) {
517 pay = pkt_payload(pkt, pkt->pkt_payload_count - i);
518 pay[0] = pay[-1];
519 pay[0].pp_ptr += amt;
520 }
521 pay[-1].pp_type = type;
522 pay[-1].pp_len = amt;
523
524 ike_header_t *hdr = (ike_header_t *)&pkt->pkt_raw;
525 ike_payload_t pld = {
526 .pay_next = pkt->pkt_header.next_payload,
527 .pay_length = htons((uint16_t)amt)
528 };
529
530 /* Keep the two in sync for sanity's sake */
531 pkt->pkt_header.next_payload = hdr->next_payload = type;
532
533 (void) memcpy(start, &pld, sizeof (pld));
534 pkt->pkt_ptr += amt;
535 return (B_TRUE);
536 }
537
538 /*
539 * The packet structure of both IKEv1 and IKEv2 consists of a packet with
540 * a header that contains a possibly arbitrary number of payloads. Certain
541 * payloads can contain an arbitrary number of sub structures. Some of those
542 * substructures can themselves contain a potentially arbitrary number of
543 * sub-sub structures.
544 *
545 * One of the vexing aspects of the IKE specification is that the design of
546 * these structures makes it cumbersome to know a priori what some of the values
547 * should be until all the embedded structures have been added. For example
548 * the payload header looks like this (taken from RFC 7296):
549 *
550 * 1 2 3
551 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
552 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
553 * | Next Payload |C| RESERVED | Payload Length |
554 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
555 *
556 * The 'next payload' field (the type of payload in the payload structure
557 * that immediately follows this payload) cannot be set until it is known
558 * what the * next payload will be (if any). Similarly, the length of the
559 * current payload cannot be easily calculated until all embedded structures
560 * for this payload are known. As Bruce Schneier et. al. have pointed out,
561 * this design is overly complicated and does not provide any improvement in
562 * security. However, we are (sadly) stuck with it.
563 *
564 * To keep the code involved in a given exchange from being buried in
565 * loads of complicated tedium dealing with this, we have created a
566 * hopefully not too clever way to handle setting those fields so
567 * the exhanging handling code rarely needs to worry about it. It allows
568 * for code similar to:
569 *
570 * ikev2_add_sa(pkt, ...);
571 * ikev2_add_proposal(pkt, IKEV2_PROTO_ESP, ...);
572 * ikev2_add_xform(pkt, IKEV2_XF_ENCR, IKEV2_ENCR_AES_CBC);
573 * ikev2_add_xform_attr(pkt, IKEV2_XF_ATTR_KEYLEN, 256)
574 * ikev2_add_nonce(pkt, ....)
575 * ...
576 * ikev2_send(pkt, ...);
577 *
578 * For any of the IKE structures, generally one or more of the following
579 * questions cannot be answered until subsequent structures are added:
580 *
581 * 1. Some sort of information about the next structure of the same
582 * type (e.g. type of the next payload, if another proposal is present
583 * after the current proposal, etc).
584 * 2. What is the size of this structure (with all embedded structures)?
585 * 3. How many substructures are present in this structure?
586 *
587 * To be able to answer these questions, the general approach is that
588 * information about the state of the datagram at the time a structure is
589 * appeneded is saved in the pkt_t, and a post-processing function/callback is
590 * invoked after any embedded structures have been added. This callback
591 * is given the number of embedded structures that have been added, as well as
592 * the position of the start of the structure this callback is invoked for.
593 * This allows the callback to determine both the size of the structure
594 * as well as the number of substructures. In addition, an argument given
595 * while pushing a subsequent structure of the same type (payload, xform, etc)
596 * is passed to the callback -- this allows the callback to answer question
597 * #1 e.g. when pushing a new payload, it's type is the argument that's
598 * given to the callback invoked for the previous payload.
599 *
600 * To make this all work, each type of structure is assigned a 'rank' for
601 * lack of a better term (suggestions welcome). Lower ranked structures can
602 * embed compatible higher ranked structures. Since IKE structures cannot
603 * embed themselves, when we attempt to append a structure of equal or lower
604 * rank to the last structure appended, we know we are done embedding
605 * structures into the previous structure of equal or lower rank. If we are
606 * adding a structure of higher rank than the last structure added, we know we
607 * are embedding a structure. If it's the same rank, we are pushing a simiar
608 * type of object and should bump the count of objects.
609 */
610
611 static size_t
612 pkt_item_rank(pkt_stack_item_t type)
613 {
614 switch (type) {
615 case PSI_NONE:
616 return (0);
617 case PSI_PACKET:
618 return (1);
619 case PSI_SK:
620 return (2);
621 /*
622 * same rank, but distinct to allow verification that SA payloads
623 * can only occur either at the start of a datagram, or as the
624 * first item inside an SK payload
625 */
626 case PSI_SA:
627 case PSI_PAYLOAD:
628 return (3);
629 case PSI_DEL:
630 case PSI_TSP:
631 case PSI_PROP:
632 return (4);
633 case PSI_TS:
634 case PSI_XFORM:
635 return (5);
636 case PSI_XFORM_ATTR:
637 return (6);
638 }
639 /*NOTREACHED*/
640 return (SIZE_MAX);
641 }
642
643 static boolean_t
644 pkt_stack_empty(pkt_t *pkt)
645 {
646 return (pkt->stksize == 0 ? B_TRUE : B_FALSE);
647 }
648
649 static pkt_stack_t *
650 pkt_stack_top(pkt_t *pkt)
651 {
652 if (pkt_stack_empty(pkt))
653 return (NULL);
654 return (&pkt->stack[pkt->stksize - 1]);
655 }
656
657 static pkt_stack_t *
658 pkt_stack_pop(pkt_t *pkt)
659 {
660 if (pkt_stack_empty(pkt))
661 return (NULL);
662 return (&pkt->stack[--pkt->stksize]);
663 }
664
665 static int
666 pkt_stack_rank(pkt_t *pkt)
667 {
668 pkt_stack_t *stk = pkt_stack_top(pkt);
669 pkt_stack_item_t type = (stk != NULL) ? stk->stk_type : PSI_NONE;
670
671 return (pkt_item_rank(type));
672 }
673
674 static size_t pkt_stack_unwind(pkt_t *, pkt_stack_item_t, uintptr_t);
675
676 /*
677 * Save structure information as we append a new payload
678 * Args:
679 * pkt The packet in question
680 * type The type of structure being added
681 * finish The post-processing callback to run for this structure
682 * swaparg The argument passed to the callback function of the previous
683 * post-processing callback for the same type of structure.
684 */
685 void
686 pkt_stack_push(pkt_t *pkt, pkt_stack_item_t type, pkt_finish_fn finish,
687 uintptr_t swaparg)
688 {
689 pkt_stack_t *stk;
690 size_t count;
691 pkt_stack_item_t top_type = PSI_NONE;
692
693 if (pkt_stack_top(pkt) != NULL)
694 top_type = pkt_stack_top(pkt)->stk_type;
695
696 /*
697 * If we're adding stuff in the wrong spot, that's a very egregious
698 * bug, so die if we do
699 */
700 switch (type) {
701 case PSI_PACKET:
702 VERIFY3S(top_type, ==, PSI_NONE);
703 break;
704 case PSI_SK:
705 VERIFY3S(top_type, ==, PSI_PACKET);
706 break;
707 case PSI_SA:
708 VERIFY(top_type == PSI_PACKET || top_type == PSI_SK);
709 break;
710 case PSI_PAYLOAD:
711 VERIFY(top_type != PSI_PACKET && top_type != PSI_NONE);
712 break;
713 case PSI_PROP:
714 VERIFY(top_type == PSI_SA || top_type == PSI_PROP ||
715 top_type == PSI_XFORM || top_type == PSI_XFORM_ATTR);
716 break;
717 case PSI_XFORM:
718 VERIFY(top_type == PSI_XFORM || top_type == PSI_PROP ||
719 top_type == PSI_XFORM_ATTR);
720 break;
721 case PSI_XFORM_ATTR:
722 VERIFY(top_type == PSI_XFORM || top_type == PSI_XFORM_ATTR);
723 break;
724 case PSI_DEL:
725 VERIFY(top_type == PSI_SA || top_type == PSI_DEL);
726 break;
727 case PSI_TSP:
728 VERIFY3U(top_type, ==, PSI_SA);
729 break;
730 case PSI_TS:
731 VERIFY(top_type == PSI_TSP || top_type == PSI_TS);
732 break;
733 case PSI_NONE:
734 INVALID("type");
735 break;
736 }
737
738 count = pkt_stack_unwind(pkt, type, swaparg);
739
740 ASSERT3U(pkt_stack_rank(pkt), <, pkt_item_rank(type));
741 ASSERT3U(pkt->stksize, <, PKT_STACK_DEPTH);
742
743 stk = &pkt->stack[pkt->stksize++];
744
745 stk->stk_finish = finish;
746 stk->stk_ptr = pkt->pkt_ptr;
747 stk->stk_count = count + 1;
748 stk->stk_type = type;
749 }
750
751 /*
752 * This is where the magic happens. Pop off what's saved in pkt->stack
753 * and run all the post processing until the rank of the top item in
754 * the stack is lower than the rank of what we're about to add (contained in
755 * type). Return the running count of structures of the same rank as type.
756 */
757 static size_t
758 pkt_stack_unwind(pkt_t *pkt, pkt_stack_item_t type, uintptr_t swaparg)
759 {
760 pkt_stack_t *stk = NULL;
761 size_t count = 0;
762 size_t rank = pkt_item_rank(type);
763 size_t stk_rank = 0;
764
765 while (!pkt_stack_empty(pkt) &&
766 (stk_rank = pkt_stack_rank(pkt)) >= rank) {
767 stk = pkt_stack_pop(pkt);
768 if (stk->stk_finish != NULL) {
769 boolean_t ret;
770
771 ret = stk->stk_finish(pkt, stk->stk_ptr,
772 (stk_rank == rank) ? swaparg : 0, count);
773 pkt->pkt_stk_error |= !ret;
774 }
775
776 /*
777 * This was initialized to 0, and is deliberately set after
778 * calling the post-processing callback so that the
779 * post-processing callback called in the next iteration
780 * of the loop (if it happens) gets the count of embedded
781 * structures (e.g. proposal post-processing function gets
782 * the count of embedded transform structures).
783 */
784 count = stk->stk_count;
785 }
786
787 ASSERT3U(pkt_stack_rank(pkt), <, rank);
788
789 if (stk != NULL && stk_rank == rank)
790 return (stk->stk_count);
791 return (0);
792 }
793
794 /* pops off all the callbacks in preparation for sending */
795 boolean_t
796 pkt_done(pkt_t *pkt)
797 {
798 (void) pkt_stack_unwind(pkt, PSI_NONE, 0);
799 return (pkt->pkt_stk_error);
800 }
801
802 /*
803 * Call cb on each encountered payload.
804 * data - the first payload to walk
805 * len - total size of the buffer to walk (should end on payload boundary)
806 * cb - callback function to invoke on each payload
807 * first - payload type of the first payload
808 * cookie - data passed to callback
809 */
810 pkt_walk_ret_t
811 pkt_payload_walk(uchar_t *restrict data, size_t len, pkt_walk_fn_t cb,
812 uint8_t first, void *restrict cookie)
813 {
814 uchar_t *ptr = data;
815 uint8_t paytype = first;
816 pkt_walk_ret_t ret = PKT_WALK_OK;
817
818 /* 0 is used for both IKEv1 and IKEv2 to indicate last payload */
819 while (len > 0 && paytype != 0) {
820 ike_payload_t pay = { 0 };
821
822 if (len < sizeof (pay)) {
823 /* XXX: truncated */
824 return (PKT_WALK_ERROR);
825 }
826
827 (void) memcpy(&pay, ptr, sizeof (pay));
828 ptr += sizeof (pay);
829
830 /* this length includes the size of the header */
831 pay.pay_length = ntohs(pay.pay_length);
832
833 if (pay.pay_length > len) {
834 /* XXX: truncated */
835 return (PKT_WALK_ERROR);
836 }
837
838 if (cb != NULL) {
839 ret = cb(paytype, pay.pay_reserved, ptr, pay.pay_length,
840 cookie);
841 if (ret != PKT_WALK_OK)
842 break;
843 }
844
845 paytype = pay.pay_next;
846 ptr += pay.pay_length;
847 len -= pay.pay_length;
848 }
849
850 if (ret == PKT_WALK_OK && len > 0) {
851 /* XXX: extra data */
852 return (PKT_WALK_ERROR);
853 }
854
855 return ((ret != PKT_WALK_OK) ? PKT_WALK_ERROR : PKT_WALK_OK);
856 }
857
858 static size_t
859 pay_to_idx(pkt_t *pkt, pkt_payload_t *pay)
860 {
861 if (pay == NULL)
862 return (0);
863
864 size_t idx = 0;
865 if (pay >= pkt->pkt_payloads &&
866 pay < &pkt->pkt_payloads[PKT_PAYLOAD_NUM]) {
867 idx = (size_t)(pay - pkt->pkt_payloads);
868 VERIFY3U(idx, <, pkt->pkt_payload_count);
869 return (idx);
870 }
871
872 VERIFY3P(pay, >=, pkt->pkt_payload_extra);
873 VERIFY3P(pay, <, pkt->pkt_payload_extra + pkt->pkt_payload_count -
874 PKT_PAYLOAD_NUM);
875 idx = (size_t)(pay - pkt->pkt_payload_extra);
876 return (idx);
877 }
878
879 pkt_payload_t *
880 pkt_get_payload(pkt_t *pkt, int type, pkt_payload_t *start)
881 {
882 size_t idx = pay_to_idx(pkt, start);
883
884 VERIFY3S(type, >=, 0);
885 VERIFY3S(type, <, 0xff);
886
887 if (start != NULL)
888 idx++;
889
890 for (size_t i = idx; i < pkt->pkt_payload_count; i++) {
891 pkt_payload_t *pay = pkt_payload(pkt, i);
892
893 if (pay->pp_type == (uint8_t)type)
894 return (pay);
895 }
896 return (NULL);
897 }
898
899 static size_t
900 notify_to_idx(pkt_t *pkt, pkt_notify_t *n)
901 {
902 if (n == NULL)
903 return (0);
904
905 size_t idx = 0;
906
907 if (n >= pkt->pkt_notify &&
908 n < &pkt->pkt_notify[PKT_NOTIFY_NUM]) {
909 idx = (size_t)(n - pkt->pkt_notify);
910 VERIFY3U(idx, <, pkt->pkt_notify_count);
911 return (idx);
912 }
913
914 VERIFY3P(n, >=, pkt->pkt_notify_extra);
915 VERIFY3P(n, <, pkt->pkt_notify_extra + pkt->pkt_notify_count -
916 PKT_NOTIFY_NUM);
917
918 idx = (size_t)(n - pkt->pkt_notify_extra);
919 return (idx);
920 }
921
922 pkt_notify_t *
923 pkt_get_notify(pkt_t *pkt, int type, pkt_notify_t *start)
924 {
925 size_t idx = notify_to_idx(pkt, start);
926
927 VERIFY3S(type, >=, 0);
928 VERIFY3S(type, <=, USHRT_MAX);
929
930 if (start != NULL)
931 idx++;
932
933 for (size_t i = idx; i < pkt->pkt_notify_count; i++) {
934 pkt_notify_t *n = pkt_notify(pkt, i);
935
936 if (n->pn_type == (uint16_t)type)
937 return (n);
938 }
939 return (NULL);
940 }
941
942 static int
943 pkt_ctor(void *buf, void *ignore, int flags)
944 {
945 _NOTE(ARGUNUSUED(ignore, flags))
946
947 pkt_t *pkt = buf;
948 (void) memset(pkt, 0, sizeof (pkt_t));
949 pkt->pkt_ptr = pkt_start(pkt);
950 return (0);
951 }
952
953 void
954 pkt_free(pkt_t *pkt)
955 {
956 if (pkt == NULL)
957 return;
958
959 size_t len = 0;
960 if (pkt->pkt_payload_extra != NULL) {
961 len = pkt->pkt_payload_count - PKT_PAYLOAD_NUM;
962 len *= sizeof (pkt_payload_t);
963 umem_free(pkt->pkt_payload_extra, len);
964 }
965
966 if (pkt->pkt_notify_extra != NULL) {
967 len = pkt->pkt_notify_count - PKT_NOTIFY_NUM;
968 len *= sizeof (pkt_notify_t);
969 umem_free(pkt->pkt_notify_extra, len);
970 }
971
972 pkt_ctor(pkt, NULL, 0);
973 umem_cache_free(pkt_cache, pkt);
974 }
975
976 void
977 pkt_init(void)
978 {
979 pkt_cache = umem_cache_create("pkt cache", sizeof (pkt_t),
980 sizeof (uint64_t), pkt_ctor, NULL, NULL, NULL, NULL, 0);
981 if (pkt_cache == NULL)
982 err(EXIT_FAILURE, "Unable to create pkt umem cache");
983 }
984
985 void
986 pkt_fini(void)
987 {
988 umem_cache_destroy(pkt_cache);
989 }
990
991 void
992 put32(pkt_t *pkt, uint32_t val)
993 {
994 ASSERT3U(pkt_write_left(pkt), >=, sizeof (uint32_t));
995 *(pkt->pkt_ptr++) = (uchar_t)((val >> 24) & (uint32_t)0xff);
996 *(pkt->pkt_ptr++) = (uchar_t)((val >> 16) & (uint32_t)0xff);
997 *(pkt->pkt_ptr++) = (uchar_t)((val >> 8) & (uint32_t)0xff);
998 *(pkt->pkt_ptr++) = (uchar_t)(val & (uint32_t)0xff);
999 }
1000
1001 void
1002 put64(pkt_t *pkt, uint64_t val)
1003 {
1004 ASSERT3U(pkt_write_left(pkt), >=, sizeof (uint64_t));
1005 *(pkt->pkt_ptr++) = (uchar_t)((val >> 56) & (uint64_t)0xff);
1006 *(pkt->pkt_ptr++) = (uchar_t)((val >> 48) & (uint64_t)0xff);
1007 *(pkt->pkt_ptr++) = (uchar_t)((val >> 40) & (uint64_t)0xff);
1008 *(pkt->pkt_ptr++) = (uchar_t)((val >> 32) & (uint64_t)0xff);
1009 *(pkt->pkt_ptr++) = (uchar_t)((val >> 24) & (uint64_t)0xff);
1010 *(pkt->pkt_ptr++) = (uchar_t)((val >> 16) & (uint64_t)0xff);
1011 *(pkt->pkt_ptr++) = (uchar_t)((val >> 8) & (uint64_t)0xff);
1012 *(pkt->pkt_ptr++) = (uchar_t)(val & (uint64_t)0xff);
1013 }
1014
1015 extern uchar_t *pkt_start(pkt_t *);
1016 extern size_t pkt_len(const pkt_t *);
1017 extern size_t pkt_write_left(const pkt_t *);
1018 extern size_t pkt_read_left(const pkt_t *, const uchar_t *);
1019 extern pkt_payload_t *pkt_payload(pkt_t *, uint16_t);
1020 extern pkt_notify_t *pkt_notify(pkt_t *, uint16_t);