Print this page
8529 Extended and regular SADB_ACQUIREs should share address extension code
Portions contributed by: Bayard Bell <buffer.g.overflow@gmail.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ip/spd.c
+++ new/usr/src/uts/common/inet/ip/spd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 + * Copyright (c) 2017, Joyent, Inc.
26 27 */
27 28
28 29 /*
29 30 * IPsec Security Policy Database.
30 31 *
31 32 * This module maintains the SPD and provides routines used by ip and ip6
32 33 * to apply IPsec policy to inbound and outbound datagrams.
33 34 */
34 35
35 36 #include <sys/types.h>
36 37 #include <sys/stream.h>
37 38 #include <sys/stropts.h>
38 39 #include <sys/sysmacros.h>
39 40 #include <sys/strsubr.h>
40 41 #include <sys/strsun.h>
41 42 #include <sys/strlog.h>
42 43 #include <sys/strsun.h>
43 44 #include <sys/cmn_err.h>
44 45 #include <sys/zone.h>
45 46
46 47 #include <sys/systm.h>
47 48 #include <sys/param.h>
48 49 #include <sys/kmem.h>
49 50 #include <sys/ddi.h>
50 51
51 52 #include <sys/crypto/api.h>
52 53
53 54 #include <inet/common.h>
54 55 #include <inet/mi.h>
55 56
56 57 #include <netinet/ip6.h>
57 58 #include <netinet/icmp6.h>
58 59 #include <netinet/udp.h>
59 60
60 61 #include <inet/ip.h>
61 62 #include <inet/ip6.h>
62 63
63 64 #include <net/pfkeyv2.h>
64 65 #include <net/pfpolicy.h>
65 66 #include <inet/sadb.h>
66 67 #include <inet/ipsec_impl.h>
67 68
68 69 #include <inet/ip_impl.h> /* For IP_MOD_ID */
69 70
70 71 #include <inet/ipsecah.h>
71 72 #include <inet/ipsecesp.h>
72 73 #include <inet/ipdrop.h>
73 74 #include <inet/ipclassifier.h>
74 75 #include <inet/iptun.h>
75 76 #include <inet/iptun/iptun_impl.h>
76 77
77 78 static void ipsec_update_present_flags(ipsec_stack_t *);
78 79 static ipsec_act_t *ipsec_act_wildcard_expand(ipsec_act_t *, uint_t *,
79 80 netstack_t *);
80 81 static mblk_t *ipsec_check_ipsecin_policy(mblk_t *, ipsec_policy_t *,
81 82 ipha_t *, ip6_t *, uint64_t, ip_recv_attr_t *, netstack_t *);
82 83 static void ipsec_action_free_table(ipsec_action_t *);
83 84 static void ipsec_action_reclaim(void *);
84 85 static void ipsec_action_reclaim_stack(ipsec_stack_t *);
85 86 static void ipsid_init(netstack_t *);
86 87 static void ipsid_fini(netstack_t *);
87 88
88 89 /* sel_flags values for ipsec_init_inbound_sel(). */
89 90 #define SEL_NONE 0x0000
90 91 #define SEL_PORT_POLICY 0x0001
91 92 #define SEL_IS_ICMP 0x0002
92 93 #define SEL_TUNNEL_MODE 0x0004
93 94 #define SEL_POST_FRAG 0x0008
94 95
95 96 /* Return values for ipsec_init_inbound_sel(). */
96 97 typedef enum { SELRET_NOMEM, SELRET_BADPKT, SELRET_SUCCESS, SELRET_TUNFRAG}
97 98 selret_t;
98 99
99 100 static selret_t ipsec_init_inbound_sel(ipsec_selector_t *, mblk_t *,
100 101 ipha_t *, ip6_t *, uint8_t);
101 102
102 103 static boolean_t ipsec_check_ipsecin_action(ip_recv_attr_t *, mblk_t *,
103 104 struct ipsec_action_s *, ipha_t *ipha, ip6_t *ip6h, const char **,
104 105 kstat_named_t **, netstack_t *);
105 106 static void ipsec_unregister_prov_update(void);
106 107 static void ipsec_prov_update_callback_stack(uint32_t, void *, netstack_t *);
107 108 static boolean_t ipsec_compare_action(ipsec_policy_t *, ipsec_policy_t *);
108 109 static uint32_t selector_hash(ipsec_selector_t *, ipsec_policy_root_t *);
109 110 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
110 111 static void ipsec_kstat_destroy(ipsec_stack_t *);
111 112 static int ipsec_free_tables(ipsec_stack_t *);
112 113 static int tunnel_compare(const void *, const void *);
113 114 static void ipsec_freemsg_chain(mblk_t *);
114 115 static void ip_drop_packet_chain(mblk_t *, boolean_t, ill_t *,
115 116 struct kstat_named *, ipdropper_t *);
116 117 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
117 118 static void ipsec_kstat_destroy(ipsec_stack_t *);
118 119 static int ipsec_free_tables(ipsec_stack_t *);
119 120 static int tunnel_compare(const void *, const void *);
120 121 static void ipsec_freemsg_chain(mblk_t *);
121 122
122 123 /*
123 124 * Selector hash table is statically sized at module load time.
124 125 * we default to 251 buckets, which is the largest prime number under 255
125 126 */
126 127
127 128 #define IPSEC_SPDHASH_DEFAULT 251
128 129
129 130 /* SPD hash-size tunable per tunnel. */
130 131 #define TUN_SPDHASH_DEFAULT 5
131 132
132 133 uint32_t ipsec_spd_hashsize;
133 134 uint32_t tun_spd_hashsize;
134 135
135 136 #define IPSEC_SEL_NOHASH ((uint32_t)(~0))
136 137
137 138 /*
138 139 * Handle global across all stack instances
139 140 */
140 141 static crypto_notify_handle_t prov_update_handle = NULL;
141 142
142 143 static kmem_cache_t *ipsec_action_cache;
143 144 static kmem_cache_t *ipsec_sel_cache;
144 145 static kmem_cache_t *ipsec_pol_cache;
145 146
146 147 /* Frag cache prototypes */
147 148 static void ipsec_fragcache_clean(ipsec_fragcache_t *, ipsec_stack_t *);
148 149 static ipsec_fragcache_entry_t *fragcache_delentry(int,
149 150 ipsec_fragcache_entry_t *, ipsec_fragcache_t *, ipsec_stack_t *);
150 151 boolean_t ipsec_fragcache_init(ipsec_fragcache_t *);
151 152 void ipsec_fragcache_uninit(ipsec_fragcache_t *, ipsec_stack_t *ipss);
152 153 mblk_t *ipsec_fragcache_add(ipsec_fragcache_t *, mblk_t *, mblk_t *,
153 154 int, ipsec_stack_t *);
154 155
155 156 int ipsec_hdr_pullup_needed = 0;
156 157 int ipsec_weird_null_inbound_policy = 0;
157 158
158 159 #define ALGBITS_ROUND_DOWN(x, align) (((x)/(align))*(align))
159 160 #define ALGBITS_ROUND_UP(x, align) ALGBITS_ROUND_DOWN((x)+(align)-1, align)
160 161
161 162 /*
162 163 * Inbound traffic should have matching identities for both SA's.
163 164 */
164 165
165 166 #define SA_IDS_MATCH(sa1, sa2) \
166 167 (((sa1) == NULL) || ((sa2) == NULL) || \
167 168 (((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) && \
168 169 (((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid))))
169 170
170 171 /*
171 172 * IPv6 Fragments
172 173 */
173 174 #define IS_V6_FRAGMENT(ipp) (ipp.ipp_fields & IPPF_FRAGHDR)
174 175
175 176 /*
176 177 * Policy failure messages.
177 178 */
178 179 static char *ipsec_policy_failure_msgs[] = {
179 180
180 181 /* IPSEC_POLICY_NOT_NEEDED */
181 182 "%s: Dropping the datagram because the incoming packet "
182 183 "is %s, but the recipient expects clear; Source %s, "
183 184 "Destination %s.\n",
184 185
185 186 /* IPSEC_POLICY_MISMATCH */
186 187 "%s: Policy Failure for the incoming packet (%s); Source %s, "
187 188 "Destination %s.\n",
188 189
189 190 /* IPSEC_POLICY_AUTH_NOT_NEEDED */
190 191 "%s: Authentication present while not expected in the "
191 192 "incoming %s packet; Source %s, Destination %s.\n",
192 193
193 194 /* IPSEC_POLICY_ENCR_NOT_NEEDED */
194 195 "%s: Encryption present while not expected in the "
195 196 "incoming %s packet; Source %s, Destination %s.\n",
196 197
197 198 /* IPSEC_POLICY_SE_NOT_NEEDED */
198 199 "%s: Self-Encapsulation present while not expected in the "
199 200 "incoming %s packet; Source %s, Destination %s.\n",
200 201 };
201 202
202 203 /*
203 204 * General overviews:
204 205 *
205 206 * Locking:
206 207 *
207 208 * All of the system policy structures are protected by a single
208 209 * rwlock. These structures are threaded in a
209 210 * fairly complex fashion and are not expected to change on a
210 211 * regular basis, so this should not cause scaling/contention
211 212 * problems. As a result, policy checks should (hopefully) be MT-hot.
212 213 *
213 214 * Allocation policy:
214 215 *
215 216 * We use custom kmem cache types for the various
216 217 * bits & pieces of the policy data structures. All allocations
217 218 * use KM_NOSLEEP instead of KM_SLEEP for policy allocation. The
218 219 * policy table is of potentially unbounded size, so we don't
219 220 * want to provide a way to hog all system memory with policy
220 221 * entries..
221 222 */
222 223
223 224 /* Convenient functions for freeing or dropping a b_next linked mblk chain */
224 225
225 226 /* Free all messages in an mblk chain */
226 227 static void
227 228 ipsec_freemsg_chain(mblk_t *mp)
228 229 {
229 230 mblk_t *mpnext;
230 231 while (mp != NULL) {
231 232 ASSERT(mp->b_prev == NULL);
232 233 mpnext = mp->b_next;
233 234 mp->b_next = NULL;
234 235 freemsg(mp);
235 236 mp = mpnext;
236 237 }
237 238 }
238 239
239 240 /*
240 241 * ip_drop all messages in an mblk chain
241 242 * Can handle a b_next chain of ip_recv_attr_t mblks, or just a b_next chain
242 243 * of data.
243 244 */
244 245 static void
245 246 ip_drop_packet_chain(mblk_t *mp, boolean_t inbound, ill_t *ill,
246 247 struct kstat_named *counter, ipdropper_t *who_called)
247 248 {
248 249 mblk_t *mpnext;
249 250 while (mp != NULL) {
250 251 ASSERT(mp->b_prev == NULL);
251 252 mpnext = mp->b_next;
252 253 mp->b_next = NULL;
253 254 if (ip_recv_attr_is_mblk(mp))
254 255 mp = ip_recv_attr_free_mblk(mp);
255 256 ip_drop_packet(mp, inbound, ill, counter, who_called);
256 257 mp = mpnext;
257 258 }
258 259 }
259 260
260 261 /*
261 262 * AVL tree comparison function.
262 263 * the in-kernel avl assumes unique keys for all objects.
263 264 * Since sometimes policy will duplicate rules, we may insert
264 265 * multiple rules with the same rule id, so we need a tie-breaker.
265 266 */
266 267 static int
267 268 ipsec_policy_cmpbyid(const void *a, const void *b)
268 269 {
269 270 const ipsec_policy_t *ipa, *ipb;
270 271 uint64_t idxa, idxb;
271 272
272 273 ipa = (const ipsec_policy_t *)a;
273 274 ipb = (const ipsec_policy_t *)b;
274 275 idxa = ipa->ipsp_index;
275 276 idxb = ipb->ipsp_index;
276 277
277 278 if (idxa < idxb)
278 279 return (-1);
279 280 if (idxa > idxb)
280 281 return (1);
281 282 /*
282 283 * Tie-breaker #1: All installed policy rules have a non-NULL
283 284 * ipsl_sel (selector set), so an entry with a NULL ipsp_sel is not
284 285 * actually in-tree but rather a template node being used in
285 286 * an avl_find query; see ipsec_policy_delete(). This gives us
286 287 * a placeholder in the ordering just before the first entry with
287 288 * a key >= the one we're looking for, so we can walk forward from
288 289 * that point to get the remaining entries with the same id.
289 290 */
290 291 if ((ipa->ipsp_sel == NULL) && (ipb->ipsp_sel != NULL))
291 292 return (-1);
292 293 if ((ipb->ipsp_sel == NULL) && (ipa->ipsp_sel != NULL))
293 294 return (1);
294 295 /*
295 296 * At most one of the arguments to the comparison should have a
296 297 * NULL selector pointer; if not, the tree is broken.
297 298 */
298 299 ASSERT(ipa->ipsp_sel != NULL);
299 300 ASSERT(ipb->ipsp_sel != NULL);
300 301 /*
301 302 * Tie-breaker #2: use the virtual address of the policy node
302 303 * to arbitrarily break ties. Since we use the new tree node in
303 304 * the avl_find() in ipsec_insert_always, the new node will be
304 305 * inserted into the tree in the right place in the sequence.
305 306 */
306 307 if (ipa < ipb)
307 308 return (-1);
308 309 if (ipa > ipb)
309 310 return (1);
310 311 return (0);
311 312 }
312 313
313 314 /*
314 315 * Free what ipsec_alloc_table allocated.
315 316 */
316 317 void
317 318 ipsec_polhead_free_table(ipsec_policy_head_t *iph)
318 319 {
319 320 int dir;
320 321 int i;
321 322
322 323 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
323 324 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
324 325
325 326 if (ipr->ipr_hash == NULL)
326 327 continue;
327 328
328 329 for (i = 0; i < ipr->ipr_nchains; i++) {
329 330 ASSERT(ipr->ipr_hash[i].hash_head == NULL);
330 331 }
331 332 kmem_free(ipr->ipr_hash, ipr->ipr_nchains *
332 333 sizeof (ipsec_policy_hash_t));
333 334 ipr->ipr_hash = NULL;
334 335 }
335 336 }
336 337
337 338 void
338 339 ipsec_polhead_destroy(ipsec_policy_head_t *iph)
339 340 {
340 341 int dir;
341 342
342 343 avl_destroy(&iph->iph_rulebyid);
343 344 rw_destroy(&iph->iph_lock);
344 345
345 346 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
346 347 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
347 348 int chain;
348 349
349 350 for (chain = 0; chain < ipr->ipr_nchains; chain++)
350 351 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
351 352
352 353 }
353 354 ipsec_polhead_free_table(iph);
354 355 }
355 356
356 357 /*
357 358 * Free the IPsec stack instance.
358 359 */
359 360 /* ARGSUSED */
360 361 static void
361 362 ipsec_stack_fini(netstackid_t stackid, void *arg)
362 363 {
363 364 ipsec_stack_t *ipss = (ipsec_stack_t *)arg;
364 365 void *cookie;
365 366 ipsec_tun_pol_t *node;
366 367 netstack_t *ns = ipss->ipsec_netstack;
367 368 int i;
368 369 ipsec_algtype_t algtype;
369 370
370 371 ipsec_loader_destroy(ipss);
371 372
372 373 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
373 374 /*
374 375 * It's possible we can just ASSERT() the tree is empty. After all,
375 376 * we aren't called until IP is ready to unload (and presumably all
376 377 * tunnels have been unplumbed). But we'll play it safe for now, the
377 378 * loop will just exit immediately if it's empty.
378 379 */
379 380 cookie = NULL;
380 381 while ((node = (ipsec_tun_pol_t *)
381 382 avl_destroy_nodes(&ipss->ipsec_tunnel_policies,
382 383 &cookie)) != NULL) {
383 384 ITP_REFRELE(node, ns);
384 385 }
385 386 avl_destroy(&ipss->ipsec_tunnel_policies);
386 387 rw_exit(&ipss->ipsec_tunnel_policy_lock);
387 388 rw_destroy(&ipss->ipsec_tunnel_policy_lock);
388 389
389 390 ipsec_config_flush(ns);
390 391
391 392 ipsec_kstat_destroy(ipss);
392 393
393 394 ip_drop_unregister(&ipss->ipsec_dropper);
394 395
395 396 ip_drop_unregister(&ipss->ipsec_spd_dropper);
396 397 ip_drop_destroy(ipss);
397 398 /*
398 399 * Globals start with ref == 1 to prevent IPPH_REFRELE() from
399 400 * attempting to free them, hence they should have 1 now.
400 401 */
401 402 ipsec_polhead_destroy(&ipss->ipsec_system_policy);
402 403 ASSERT(ipss->ipsec_system_policy.iph_refs == 1);
403 404 ipsec_polhead_destroy(&ipss->ipsec_inactive_policy);
404 405 ASSERT(ipss->ipsec_inactive_policy.iph_refs == 1);
405 406
406 407 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
407 408 ipsec_action_free_table(ipss->ipsec_action_hash[i].hash_head);
408 409 ipss->ipsec_action_hash[i].hash_head = NULL;
409 410 mutex_destroy(&(ipss->ipsec_action_hash[i].hash_lock));
410 411 }
411 412
412 413 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
413 414 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
414 415 mutex_destroy(&(ipss->ipsec_sel_hash[i].hash_lock));
415 416 }
416 417
417 418 rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
418 419 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype ++) {
419 420 int nalgs = ipss->ipsec_nalgs[algtype];
420 421
421 422 for (i = 0; i < nalgs; i++) {
422 423 if (ipss->ipsec_alglists[algtype][i] != NULL)
423 424 ipsec_alg_unreg(algtype, i, ns);
424 425 }
425 426 }
426 427 rw_exit(&ipss->ipsec_alg_lock);
427 428 rw_destroy(&ipss->ipsec_alg_lock);
428 429
429 430 ipsid_gc(ns);
430 431 ipsid_fini(ns);
431 432
432 433 (void) ipsec_free_tables(ipss);
433 434 kmem_free(ipss, sizeof (*ipss));
434 435 }
435 436
436 437 void
437 438 ipsec_policy_g_destroy(void)
438 439 {
439 440 kmem_cache_destroy(ipsec_action_cache);
440 441 kmem_cache_destroy(ipsec_sel_cache);
441 442 kmem_cache_destroy(ipsec_pol_cache);
442 443
443 444 ipsec_unregister_prov_update();
444 445
445 446 netstack_unregister(NS_IPSEC);
446 447 }
447 448
448 449
449 450 /*
450 451 * Free what ipsec_alloc_tables allocated.
451 452 * Called when table allocation fails to free the table.
452 453 */
453 454 static int
454 455 ipsec_free_tables(ipsec_stack_t *ipss)
455 456 {
456 457 int i;
457 458
458 459 if (ipss->ipsec_sel_hash != NULL) {
459 460 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
460 461 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
461 462 }
462 463 kmem_free(ipss->ipsec_sel_hash, ipss->ipsec_spd_hashsize *
463 464 sizeof (*ipss->ipsec_sel_hash));
464 465 ipss->ipsec_sel_hash = NULL;
465 466 ipss->ipsec_spd_hashsize = 0;
466 467 }
467 468 ipsec_polhead_free_table(&ipss->ipsec_system_policy);
468 469 ipsec_polhead_free_table(&ipss->ipsec_inactive_policy);
469 470
470 471 return (ENOMEM);
471 472 }
472 473
473 474 /*
474 475 * Attempt to allocate the tables in a single policy head.
475 476 * Return nonzero on failure after cleaning up any work in progress.
476 477 */
477 478 int
478 479 ipsec_alloc_table(ipsec_policy_head_t *iph, int nchains, int kmflag,
479 480 boolean_t global_cleanup, netstack_t *ns)
480 481 {
481 482 int dir;
482 483
483 484 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
484 485 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
485 486
486 487 ipr->ipr_nchains = nchains;
487 488 ipr->ipr_hash = kmem_zalloc(nchains *
488 489 sizeof (ipsec_policy_hash_t), kmflag);
489 490 if (ipr->ipr_hash == NULL)
490 491 return (global_cleanup ?
491 492 ipsec_free_tables(ns->netstack_ipsec) :
492 493 ENOMEM);
493 494 }
494 495 return (0);
495 496 }
496 497
497 498 /*
498 499 * Attempt to allocate the various tables. Return nonzero on failure
499 500 * after cleaning up any work in progress.
500 501 */
501 502 static int
502 503 ipsec_alloc_tables(int kmflag, netstack_t *ns)
503 504 {
504 505 int error;
505 506 ipsec_stack_t *ipss = ns->netstack_ipsec;
506 507
507 508 error = ipsec_alloc_table(&ipss->ipsec_system_policy,
508 509 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
509 510 if (error != 0)
510 511 return (error);
511 512
512 513 error = ipsec_alloc_table(&ipss->ipsec_inactive_policy,
513 514 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
514 515 if (error != 0)
515 516 return (error);
516 517
517 518 ipss->ipsec_sel_hash = kmem_zalloc(ipss->ipsec_spd_hashsize *
518 519 sizeof (*ipss->ipsec_sel_hash), kmflag);
519 520
520 521 if (ipss->ipsec_sel_hash == NULL)
521 522 return (ipsec_free_tables(ipss));
522 523
523 524 return (0);
524 525 }
525 526
526 527 /*
527 528 * After table allocation, initialize a policy head.
528 529 */
529 530 void
530 531 ipsec_polhead_init(ipsec_policy_head_t *iph, int nchains)
531 532 {
532 533 int dir, chain;
533 534
534 535 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
535 536 avl_create(&iph->iph_rulebyid, ipsec_policy_cmpbyid,
536 537 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
537 538
538 539 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
539 540 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
540 541 ipr->ipr_nchains = nchains;
541 542
542 543 for (chain = 0; chain < nchains; chain++) {
543 544 mutex_init(&(ipr->ipr_hash[chain].hash_lock),
544 545 NULL, MUTEX_DEFAULT, NULL);
545 546 }
546 547 }
547 548 }
548 549
549 550 static boolean_t
550 551 ipsec_kstat_init(ipsec_stack_t *ipss)
551 552 {
552 553 ipss->ipsec_ksp = kstat_create_netstack("ip", 0, "ipsec_stat", "net",
553 554 KSTAT_TYPE_NAMED, sizeof (ipsec_kstats_t) / sizeof (kstat_named_t),
554 555 KSTAT_FLAG_PERSISTENT, ipss->ipsec_netstack->netstack_stackid);
555 556
556 557 if (ipss->ipsec_ksp == NULL || ipss->ipsec_ksp->ks_data == NULL)
557 558 return (B_FALSE);
558 559
559 560 ipss->ipsec_kstats = ipss->ipsec_ksp->ks_data;
560 561
561 562 #define KI(x) kstat_named_init(&ipss->ipsec_kstats->x, #x, KSTAT_DATA_UINT64)
562 563 KI(esp_stat_in_requests);
563 564 KI(esp_stat_in_discards);
564 565 KI(esp_stat_lookup_failure);
565 566 KI(ah_stat_in_requests);
566 567 KI(ah_stat_in_discards);
567 568 KI(ah_stat_lookup_failure);
568 569 KI(sadb_acquire_maxpackets);
569 570 KI(sadb_acquire_qhiwater);
570 571 #undef KI
571 572
572 573 kstat_install(ipss->ipsec_ksp);
573 574 return (B_TRUE);
574 575 }
575 576
576 577 static void
577 578 ipsec_kstat_destroy(ipsec_stack_t *ipss)
578 579 {
579 580 kstat_delete_netstack(ipss->ipsec_ksp,
580 581 ipss->ipsec_netstack->netstack_stackid);
581 582 ipss->ipsec_kstats = NULL;
582 583
583 584 }
584 585
585 586 /*
586 587 * Initialize the IPsec stack instance.
587 588 */
588 589 /* ARGSUSED */
589 590 static void *
590 591 ipsec_stack_init(netstackid_t stackid, netstack_t *ns)
591 592 {
592 593 ipsec_stack_t *ipss;
593 594 int i;
594 595
595 596 ipss = (ipsec_stack_t *)kmem_zalloc(sizeof (*ipss), KM_SLEEP);
596 597 ipss->ipsec_netstack = ns;
597 598
598 599 /*
599 600 * FIXME: netstack_ipsec is used by some of the routines we call
600 601 * below, but it isn't set until this routine returns.
601 602 * Either we introduce optional xxx_stack_alloc() functions
602 603 * that will be called by the netstack framework before xxx_stack_init,
603 604 * or we switch spd.c and sadb.c to operate on ipsec_stack_t
604 605 * (latter has some include file order issues for sadb.h, but makes
605 606 * sense if we merge some of the ipsec related stack_t's together.
606 607 */
607 608 ns->netstack_ipsec = ipss;
608 609
609 610 /*
610 611 * Make two attempts to allocate policy hash tables; try it at
611 612 * the "preferred" size (may be set in /etc/system) first,
612 613 * then fall back to the default size.
613 614 */
614 615 ipss->ipsec_spd_hashsize = (ipsec_spd_hashsize == 0) ?
615 616 IPSEC_SPDHASH_DEFAULT : ipsec_spd_hashsize;
616 617
617 618 if (ipsec_alloc_tables(KM_NOSLEEP, ns) != 0) {
618 619 cmn_err(CE_WARN,
619 620 "Unable to allocate %d entry IPsec policy hash table",
620 621 ipss->ipsec_spd_hashsize);
621 622 ipss->ipsec_spd_hashsize = IPSEC_SPDHASH_DEFAULT;
622 623 cmn_err(CE_WARN, "Falling back to %d entries",
623 624 ipss->ipsec_spd_hashsize);
624 625 (void) ipsec_alloc_tables(KM_SLEEP, ns);
625 626 }
626 627
627 628 /* Just set a default for tunnels. */
628 629 ipss->ipsec_tun_spd_hashsize = (tun_spd_hashsize == 0) ?
629 630 TUN_SPDHASH_DEFAULT : tun_spd_hashsize;
630 631
631 632 ipsid_init(ns);
632 633 /*
633 634 * Globals need ref == 1 to prevent IPPH_REFRELE() from attempting
634 635 * to free them.
635 636 */
636 637 ipss->ipsec_system_policy.iph_refs = 1;
637 638 ipss->ipsec_inactive_policy.iph_refs = 1;
638 639 ipsec_polhead_init(&ipss->ipsec_system_policy,
639 640 ipss->ipsec_spd_hashsize);
640 641 ipsec_polhead_init(&ipss->ipsec_inactive_policy,
641 642 ipss->ipsec_spd_hashsize);
642 643 rw_init(&ipss->ipsec_tunnel_policy_lock, NULL, RW_DEFAULT, NULL);
643 644 avl_create(&ipss->ipsec_tunnel_policies, tunnel_compare,
644 645 sizeof (ipsec_tun_pol_t), 0);
645 646
646 647 ipss->ipsec_next_policy_index = 1;
647 648
648 649 rw_init(&ipss->ipsec_system_policy.iph_lock, NULL, RW_DEFAULT, NULL);
649 650 rw_init(&ipss->ipsec_inactive_policy.iph_lock, NULL, RW_DEFAULT, NULL);
650 651
651 652 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++)
652 653 mutex_init(&(ipss->ipsec_action_hash[i].hash_lock),
653 654 NULL, MUTEX_DEFAULT, NULL);
654 655
655 656 for (i = 0; i < ipss->ipsec_spd_hashsize; i++)
656 657 mutex_init(&(ipss->ipsec_sel_hash[i].hash_lock),
657 658 NULL, MUTEX_DEFAULT, NULL);
658 659
659 660 rw_init(&ipss->ipsec_alg_lock, NULL, RW_DEFAULT, NULL);
660 661 for (i = 0; i < IPSEC_NALGTYPES; i++) {
661 662 ipss->ipsec_nalgs[i] = 0;
662 663 }
663 664
664 665 ip_drop_init(ipss);
665 666 ip_drop_register(&ipss->ipsec_spd_dropper, "IPsec SPD");
666 667
667 668 /* IP's IPsec code calls the packet dropper */
668 669 ip_drop_register(&ipss->ipsec_dropper, "IP IPsec processing");
669 670
670 671 (void) ipsec_kstat_init(ipss);
671 672
672 673 ipsec_loader_init(ipss);
673 674 ipsec_loader_start(ipss);
674 675
675 676 return (ipss);
676 677 }
677 678
678 679 /* Global across all stack instances */
679 680 void
680 681 ipsec_policy_g_init(void)
681 682 {
682 683 ipsec_action_cache = kmem_cache_create("ipsec_actions",
683 684 sizeof (ipsec_action_t), _POINTER_ALIGNMENT, NULL, NULL,
684 685 ipsec_action_reclaim, NULL, NULL, 0);
685 686 ipsec_sel_cache = kmem_cache_create("ipsec_selectors",
686 687 sizeof (ipsec_sel_t), _POINTER_ALIGNMENT, NULL, NULL,
687 688 NULL, NULL, NULL, 0);
688 689 ipsec_pol_cache = kmem_cache_create("ipsec_policy",
689 690 sizeof (ipsec_policy_t), _POINTER_ALIGNMENT, NULL, NULL,
690 691 NULL, NULL, NULL, 0);
691 692
692 693 /*
693 694 * We want to be informed each time a stack is created or
694 695 * destroyed in the kernel, so we can maintain the
695 696 * set of ipsec_stack_t's.
696 697 */
697 698 netstack_register(NS_IPSEC, ipsec_stack_init, NULL, ipsec_stack_fini);
698 699 }
699 700
700 701 /*
701 702 * Sort algorithm lists.
702 703 *
703 704 * I may need to split this based on
704 705 * authentication/encryption, and I may wish to have an administrator
705 706 * configure this list. Hold on to some NDD variables...
706 707 *
707 708 * XXX For now, sort on minimum key size (GAG!). While minimum key size is
708 709 * not the ideal metric, it's the only quantifiable measure available.
709 710 * We need a better metric for sorting algorithms by preference.
710 711 */
711 712 static void
712 713 alg_insert_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
713 714 {
714 715 ipsec_stack_t *ipss = ns->netstack_ipsec;
715 716 ipsec_alginfo_t *ai = ipss->ipsec_alglists[at][algid];
716 717 uint8_t holder, swap;
717 718 uint_t i;
718 719 uint_t count = ipss->ipsec_nalgs[at];
719 720 ASSERT(ai != NULL);
720 721 ASSERT(algid == ai->alg_id);
721 722
722 723 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
723 724
724 725 holder = algid;
725 726
726 727 for (i = 0; i < count - 1; i++) {
727 728 ipsec_alginfo_t *alt;
728 729
729 730 alt = ipss->ipsec_alglists[at][ipss->ipsec_sortlist[at][i]];
730 731 /*
731 732 * If you want to give precedence to newly added algs,
732 733 * add the = in the > comparison.
733 734 */
734 735 if ((holder != algid) || (ai->alg_minbits > alt->alg_minbits)) {
735 736 /* Swap sortlist[i] and holder. */
736 737 swap = ipss->ipsec_sortlist[at][i];
737 738 ipss->ipsec_sortlist[at][i] = holder;
738 739 holder = swap;
739 740 ai = alt;
740 741 } /* Else just continue. */
741 742 }
742 743
743 744 /* Store holder in last slot. */
744 745 ipss->ipsec_sortlist[at][i] = holder;
745 746 }
746 747
747 748 /*
748 749 * Remove an algorithm from a sorted algorithm list.
749 750 * This should be considerably easier, even with complex sorting.
750 751 */
751 752 static void
752 753 alg_remove_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
753 754 {
754 755 boolean_t copyback = B_FALSE;
755 756 int i;
756 757 ipsec_stack_t *ipss = ns->netstack_ipsec;
757 758 int newcount = ipss->ipsec_nalgs[at];
758 759
759 760 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
760 761
761 762 for (i = 0; i <= newcount; i++) {
762 763 if (copyback) {
763 764 ipss->ipsec_sortlist[at][i-1] =
764 765 ipss->ipsec_sortlist[at][i];
765 766 } else if (ipss->ipsec_sortlist[at][i] == algid) {
766 767 copyback = B_TRUE;
767 768 }
768 769 }
769 770 }
770 771
771 772 /*
772 773 * Add the specified algorithm to the algorithm tables.
773 774 * Must be called while holding the algorithm table writer lock.
774 775 */
775 776 void
776 777 ipsec_alg_reg(ipsec_algtype_t algtype, ipsec_alginfo_t *alg, netstack_t *ns)
777 778 {
778 779 ipsec_stack_t *ipss = ns->netstack_ipsec;
779 780
780 781 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
781 782
782 783 ASSERT(ipss->ipsec_alglists[algtype][alg->alg_id] == NULL);
783 784 ipsec_alg_fix_min_max(alg, algtype, ns);
784 785 ipss->ipsec_alglists[algtype][alg->alg_id] = alg;
785 786
786 787 ipss->ipsec_nalgs[algtype]++;
787 788 alg_insert_sortlist(algtype, alg->alg_id, ns);
788 789 }
789 790
790 791 /*
791 792 * Remove the specified algorithm from the algorithm tables.
792 793 * Must be called while holding the algorithm table writer lock.
793 794 */
794 795 void
795 796 ipsec_alg_unreg(ipsec_algtype_t algtype, uint8_t algid, netstack_t *ns)
796 797 {
797 798 ipsec_stack_t *ipss = ns->netstack_ipsec;
798 799
799 800 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
800 801
801 802 ASSERT(ipss->ipsec_alglists[algtype][algid] != NULL);
802 803 ipsec_alg_free(ipss->ipsec_alglists[algtype][algid]);
803 804 ipss->ipsec_alglists[algtype][algid] = NULL;
804 805
805 806 ipss->ipsec_nalgs[algtype]--;
806 807 alg_remove_sortlist(algtype, algid, ns);
807 808 }
808 809
809 810 /*
810 811 * Hooks for spdsock to get a grip on system policy.
811 812 */
812 813
813 814 ipsec_policy_head_t *
814 815 ipsec_system_policy(netstack_t *ns)
815 816 {
816 817 ipsec_stack_t *ipss = ns->netstack_ipsec;
817 818 ipsec_policy_head_t *h = &ipss->ipsec_system_policy;
818 819
819 820 IPPH_REFHOLD(h);
820 821 return (h);
821 822 }
822 823
823 824 ipsec_policy_head_t *
824 825 ipsec_inactive_policy(netstack_t *ns)
825 826 {
826 827 ipsec_stack_t *ipss = ns->netstack_ipsec;
827 828 ipsec_policy_head_t *h = &ipss->ipsec_inactive_policy;
828 829
829 830 IPPH_REFHOLD(h);
830 831 return (h);
831 832 }
832 833
833 834 /*
834 835 * Lock inactive policy, then active policy, then exchange policy root
835 836 * pointers.
836 837 */
837 838 void
838 839 ipsec_swap_policy(ipsec_policy_head_t *active, ipsec_policy_head_t *inactive,
839 840 netstack_t *ns)
840 841 {
841 842 int af, dir;
842 843 avl_tree_t r1, r2;
843 844
844 845 rw_enter(&inactive->iph_lock, RW_WRITER);
845 846 rw_enter(&active->iph_lock, RW_WRITER);
846 847
847 848 r1 = active->iph_rulebyid;
848 849 r2 = inactive->iph_rulebyid;
849 850 active->iph_rulebyid = r2;
850 851 inactive->iph_rulebyid = r1;
851 852
852 853 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
853 854 ipsec_policy_hash_t *h1, *h2;
854 855
855 856 h1 = active->iph_root[dir].ipr_hash;
856 857 h2 = inactive->iph_root[dir].ipr_hash;
857 858 active->iph_root[dir].ipr_hash = h2;
858 859 inactive->iph_root[dir].ipr_hash = h1;
859 860
860 861 for (af = 0; af < IPSEC_NAF; af++) {
861 862 ipsec_policy_t *t1, *t2;
862 863
863 864 t1 = active->iph_root[dir].ipr_nonhash[af];
864 865 t2 = inactive->iph_root[dir].ipr_nonhash[af];
865 866 active->iph_root[dir].ipr_nonhash[af] = t2;
866 867 inactive->iph_root[dir].ipr_nonhash[af] = t1;
867 868 if (t1 != NULL) {
868 869 t1->ipsp_hash.hash_pp =
869 870 &(inactive->iph_root[dir].ipr_nonhash[af]);
870 871 }
871 872 if (t2 != NULL) {
872 873 t2->ipsp_hash.hash_pp =
873 874 &(active->iph_root[dir].ipr_nonhash[af]);
874 875 }
875 876
876 877 }
877 878 }
878 879 active->iph_gen++;
879 880 inactive->iph_gen++;
880 881 ipsec_update_present_flags(ns->netstack_ipsec);
881 882 rw_exit(&active->iph_lock);
882 883 rw_exit(&inactive->iph_lock);
883 884 }
884 885
885 886 /*
886 887 * Swap global policy primary/secondary.
887 888 */
888 889 void
889 890 ipsec_swap_global_policy(netstack_t *ns)
890 891 {
891 892 ipsec_stack_t *ipss = ns->netstack_ipsec;
892 893
893 894 ipsec_swap_policy(&ipss->ipsec_system_policy,
894 895 &ipss->ipsec_inactive_policy, ns);
895 896 }
896 897
897 898 /*
898 899 * Clone one policy rule..
899 900 */
900 901 static ipsec_policy_t *
901 902 ipsec_copy_policy(const ipsec_policy_t *src)
902 903 {
903 904 ipsec_policy_t *dst = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
904 905
905 906 if (dst == NULL)
906 907 return (NULL);
907 908
908 909 /*
909 910 * Adjust refcounts of cloned state.
910 911 */
911 912 IPACT_REFHOLD(src->ipsp_act);
912 913 src->ipsp_sel->ipsl_refs++;
913 914
914 915 HASH_NULL(dst, ipsp_hash);
915 916 dst->ipsp_netstack = src->ipsp_netstack;
916 917 dst->ipsp_refs = 1;
917 918 dst->ipsp_sel = src->ipsp_sel;
918 919 dst->ipsp_act = src->ipsp_act;
919 920 dst->ipsp_prio = src->ipsp_prio;
920 921 dst->ipsp_index = src->ipsp_index;
921 922
922 923 return (dst);
923 924 }
924 925
925 926 void
926 927 ipsec_insert_always(avl_tree_t *tree, void *new_node)
927 928 {
928 929 void *node;
929 930 avl_index_t where;
930 931
931 932 node = avl_find(tree, new_node, &where);
932 933 ASSERT(node == NULL);
933 934 avl_insert(tree, new_node, where);
934 935 }
935 936
936 937
937 938 static int
938 939 ipsec_copy_chain(ipsec_policy_head_t *dph, ipsec_policy_t *src,
939 940 ipsec_policy_t **dstp)
940 941 {
941 942 for (; src != NULL; src = src->ipsp_hash.hash_next) {
942 943 ipsec_policy_t *dst = ipsec_copy_policy(src);
943 944 if (dst == NULL)
944 945 return (ENOMEM);
945 946
946 947 HASHLIST_INSERT(dst, ipsp_hash, *dstp);
947 948 ipsec_insert_always(&dph->iph_rulebyid, dst);
948 949 }
949 950 return (0);
950 951 }
951 952
952 953
953 954
954 955 /*
955 956 * Make one policy head look exactly like another.
956 957 *
957 958 * As with ipsec_swap_policy, we lock the destination policy head first, then
958 959 * the source policy head. Note that we only need to read-lock the source
959 960 * policy head as we are not changing it.
960 961 */
961 962 int
962 963 ipsec_copy_polhead(ipsec_policy_head_t *sph, ipsec_policy_head_t *dph,
963 964 netstack_t *ns)
964 965 {
965 966 int af, dir, chain, nchains;
966 967
967 968 rw_enter(&dph->iph_lock, RW_WRITER);
968 969
969 970 ipsec_polhead_flush(dph, ns);
970 971
971 972 rw_enter(&sph->iph_lock, RW_READER);
972 973
973 974 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
974 975 ipsec_policy_root_t *dpr = &dph->iph_root[dir];
975 976 ipsec_policy_root_t *spr = &sph->iph_root[dir];
976 977 nchains = dpr->ipr_nchains;
977 978
978 979 ASSERT(dpr->ipr_nchains == spr->ipr_nchains);
979 980
980 981 for (af = 0; af < IPSEC_NAF; af++) {
981 982 if (ipsec_copy_chain(dph, spr->ipr_nonhash[af],
982 983 &dpr->ipr_nonhash[af]))
983 984 goto abort_copy;
984 985 }
985 986
986 987 for (chain = 0; chain < nchains; chain++) {
987 988 if (ipsec_copy_chain(dph,
988 989 spr->ipr_hash[chain].hash_head,
989 990 &dpr->ipr_hash[chain].hash_head))
990 991 goto abort_copy;
991 992 }
992 993 }
993 994
994 995 dph->iph_gen++;
995 996
996 997 rw_exit(&sph->iph_lock);
997 998 rw_exit(&dph->iph_lock);
998 999 return (0);
999 1000
1000 1001 abort_copy:
1001 1002 ipsec_polhead_flush(dph, ns);
1002 1003 rw_exit(&sph->iph_lock);
1003 1004 rw_exit(&dph->iph_lock);
1004 1005 return (ENOMEM);
1005 1006 }
1006 1007
1007 1008 /*
1008 1009 * Clone currently active policy to the inactive policy list.
1009 1010 */
1010 1011 int
1011 1012 ipsec_clone_system_policy(netstack_t *ns)
1012 1013 {
1013 1014 ipsec_stack_t *ipss = ns->netstack_ipsec;
1014 1015
1015 1016 return (ipsec_copy_polhead(&ipss->ipsec_system_policy,
1016 1017 &ipss->ipsec_inactive_policy, ns));
1017 1018 }
1018 1019
1019 1020 /*
1020 1021 * Extract the string from ipsec_policy_failure_msgs[type] and
1021 1022 * log it.
1022 1023 *
1023 1024 */
1024 1025 void
1025 1026 ipsec_log_policy_failure(int type, char *func_name, ipha_t *ipha, ip6_t *ip6h,
1026 1027 boolean_t secure, netstack_t *ns)
1027 1028 {
1028 1029 char sbuf[INET6_ADDRSTRLEN];
1029 1030 char dbuf[INET6_ADDRSTRLEN];
1030 1031 char *s;
1031 1032 char *d;
1032 1033 ipsec_stack_t *ipss = ns->netstack_ipsec;
1033 1034
1034 1035 ASSERT((ipha == NULL && ip6h != NULL) ||
1035 1036 (ip6h == NULL && ipha != NULL));
1036 1037
1037 1038 if (ipha != NULL) {
1038 1039 s = inet_ntop(AF_INET, &ipha->ipha_src, sbuf, sizeof (sbuf));
1039 1040 d = inet_ntop(AF_INET, &ipha->ipha_dst, dbuf, sizeof (dbuf));
1040 1041 } else {
1041 1042 s = inet_ntop(AF_INET6, &ip6h->ip6_src, sbuf, sizeof (sbuf));
1042 1043 d = inet_ntop(AF_INET6, &ip6h->ip6_dst, dbuf, sizeof (dbuf));
1043 1044
1044 1045 }
1045 1046
1046 1047 /* Always bump the policy failure counter. */
1047 1048 ipss->ipsec_policy_failure_count[type]++;
1048 1049
1049 1050 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1050 1051 ipsec_policy_failure_msgs[type], func_name,
1051 1052 (secure ? "secure" : "not secure"), s, d);
1052 1053 }
1053 1054
1054 1055 /*
1055 1056 * Rate-limiting front-end to strlog() for AH and ESP. Uses the ndd variables
1056 1057 * in /dev/ip and the same rate-limiting clock so that there's a single
1057 1058 * knob to turn to throttle the rate of messages.
1058 1059 */
1059 1060 void
1060 1061 ipsec_rl_strlog(netstack_t *ns, short mid, short sid, char level, ushort_t sl,
1061 1062 char *fmt, ...)
1062 1063 {
1063 1064 va_list adx;
1064 1065 hrtime_t current = gethrtime();
1065 1066 ip_stack_t *ipst = ns->netstack_ip;
1066 1067 ipsec_stack_t *ipss = ns->netstack_ipsec;
1067 1068
1068 1069 sl |= SL_CONSOLE;
1069 1070 /*
1070 1071 * Throttle logging to stop syslog from being swamped. If variable
1071 1072 * 'ipsec_policy_log_interval' is zero, don't log any messages at
1072 1073 * all, otherwise log only one message every 'ipsec_policy_log_interval'
1073 1074 * msec. Convert interval (in msec) to hrtime (in nsec).
1074 1075 */
1075 1076
1076 1077 if (ipst->ips_ipsec_policy_log_interval) {
1077 1078 if (ipss->ipsec_policy_failure_last +
1078 1079 MSEC2NSEC(ipst->ips_ipsec_policy_log_interval) <= current) {
1079 1080 va_start(adx, fmt);
1080 1081 (void) vstrlog(mid, sid, level, sl, fmt, adx);
1081 1082 va_end(adx);
1082 1083 ipss->ipsec_policy_failure_last = current;
1083 1084 }
1084 1085 }
1085 1086 }
1086 1087
1087 1088 void
1088 1089 ipsec_config_flush(netstack_t *ns)
1089 1090 {
1090 1091 ipsec_stack_t *ipss = ns->netstack_ipsec;
1091 1092
1092 1093 rw_enter(&ipss->ipsec_system_policy.iph_lock, RW_WRITER);
1093 1094 ipsec_polhead_flush(&ipss->ipsec_system_policy, ns);
1094 1095 ipss->ipsec_next_policy_index = 1;
1095 1096 rw_exit(&ipss->ipsec_system_policy.iph_lock);
1096 1097 ipsec_action_reclaim_stack(ipss);
1097 1098 }
1098 1099
1099 1100 /*
1100 1101 * Clip a policy's min/max keybits vs. the capabilities of the
1101 1102 * algorithm.
1102 1103 */
1103 1104 static void
1104 1105 act_alg_adjust(uint_t algtype, uint_t algid,
1105 1106 uint16_t *minbits, uint16_t *maxbits, netstack_t *ns)
1106 1107 {
1107 1108 ipsec_stack_t *ipss = ns->netstack_ipsec;
1108 1109 ipsec_alginfo_t *algp = ipss->ipsec_alglists[algtype][algid];
1109 1110
1110 1111 if (algp != NULL) {
1111 1112 /*
1112 1113 * If passed-in minbits is zero, we assume the caller trusts
1113 1114 * us with setting the minimum key size. We pick the
1114 1115 * algorithms DEFAULT key size for the minimum in this case.
1115 1116 */
1116 1117 if (*minbits == 0) {
1117 1118 *minbits = algp->alg_default_bits;
1118 1119 ASSERT(*minbits >= algp->alg_minbits);
1119 1120 } else {
1120 1121 *minbits = MAX(MIN(*minbits, algp->alg_maxbits),
1121 1122 algp->alg_minbits);
1122 1123 }
1123 1124 if (*maxbits == 0)
1124 1125 *maxbits = algp->alg_maxbits;
1125 1126 else
1126 1127 *maxbits = MIN(MAX(*maxbits, algp->alg_minbits),
1127 1128 algp->alg_maxbits);
1128 1129 ASSERT(*minbits <= *maxbits);
1129 1130 } else {
1130 1131 *minbits = 0;
1131 1132 *maxbits = 0;
1132 1133 }
1133 1134 }
1134 1135
1135 1136 /*
1136 1137 * Check an action's requested algorithms against the algorithms currently
1137 1138 * loaded in the system.
1138 1139 */
1139 1140 boolean_t
1140 1141 ipsec_check_action(ipsec_act_t *act, int *diag, netstack_t *ns)
1141 1142 {
1142 1143 ipsec_prot_t *ipp;
1143 1144 ipsec_stack_t *ipss = ns->netstack_ipsec;
1144 1145
1145 1146 ipp = &act->ipa_apply;
1146 1147
1147 1148 if (ipp->ipp_use_ah &&
1148 1149 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_auth_alg] == NULL) {
1149 1150 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
1150 1151 return (B_FALSE);
1151 1152 }
1152 1153 if (ipp->ipp_use_espa &&
1153 1154 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_esp_auth_alg] ==
1154 1155 NULL) {
1155 1156 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
1156 1157 return (B_FALSE);
1157 1158 }
1158 1159 if (ipp->ipp_use_esp &&
1159 1160 ipss->ipsec_alglists[IPSEC_ALG_ENCR][ipp->ipp_encr_alg] == NULL) {
1160 1161 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
1161 1162 return (B_FALSE);
1162 1163 }
1163 1164
1164 1165 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_auth_alg,
1165 1166 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1166 1167 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_esp_auth_alg,
1167 1168 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1168 1169 act_alg_adjust(IPSEC_ALG_ENCR, ipp->ipp_encr_alg,
1169 1170 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1170 1171
1171 1172 if (ipp->ipp_ah_minbits > ipp->ipp_ah_maxbits) {
1172 1173 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_KEYSIZE;
1173 1174 return (B_FALSE);
1174 1175 }
1175 1176 if (ipp->ipp_espa_minbits > ipp->ipp_espa_maxbits) {
1176 1177 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_KEYSIZE;
1177 1178 return (B_FALSE);
1178 1179 }
1179 1180 if (ipp->ipp_espe_minbits > ipp->ipp_espe_maxbits) {
1180 1181 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_KEYSIZE;
1181 1182 return (B_FALSE);
1182 1183 }
1183 1184 /* TODO: sanity check lifetimes */
1184 1185 return (B_TRUE);
1185 1186 }
1186 1187
1187 1188 /*
1188 1189 * Set up a single action during wildcard expansion..
1189 1190 */
1190 1191 static void
1191 1192 ipsec_setup_act(ipsec_act_t *outact, ipsec_act_t *act,
1192 1193 uint_t auth_alg, uint_t encr_alg, uint_t eauth_alg, netstack_t *ns)
1193 1194 {
1194 1195 ipsec_prot_t *ipp;
1195 1196
1196 1197 *outact = *act;
1197 1198 ipp = &outact->ipa_apply;
1198 1199 ipp->ipp_auth_alg = (uint8_t)auth_alg;
1199 1200 ipp->ipp_encr_alg = (uint8_t)encr_alg;
1200 1201 ipp->ipp_esp_auth_alg = (uint8_t)eauth_alg;
1201 1202
1202 1203 act_alg_adjust(IPSEC_ALG_AUTH, auth_alg,
1203 1204 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1204 1205 act_alg_adjust(IPSEC_ALG_AUTH, eauth_alg,
1205 1206 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1206 1207 act_alg_adjust(IPSEC_ALG_ENCR, encr_alg,
1207 1208 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1208 1209 }
1209 1210
1210 1211 /*
1211 1212 * combinatoric expansion time: expand a wildcarded action into an
1212 1213 * array of wildcarded actions; we return the exploded action list,
1213 1214 * and return a count in *nact (output only).
1214 1215 */
1215 1216 static ipsec_act_t *
1216 1217 ipsec_act_wildcard_expand(ipsec_act_t *act, uint_t *nact, netstack_t *ns)
1217 1218 {
1218 1219 boolean_t use_ah, use_esp, use_espa;
1219 1220 boolean_t wild_auth, wild_encr, wild_eauth;
1220 1221 uint_t auth_alg, auth_idx, auth_min, auth_max;
1221 1222 uint_t eauth_alg, eauth_idx, eauth_min, eauth_max;
1222 1223 uint_t encr_alg, encr_idx, encr_min, encr_max;
1223 1224 uint_t action_count, ai;
1224 1225 ipsec_act_t *outact;
1225 1226 ipsec_stack_t *ipss = ns->netstack_ipsec;
1226 1227
1227 1228 if (act->ipa_type != IPSEC_ACT_APPLY) {
1228 1229 outact = kmem_alloc(sizeof (*act), KM_NOSLEEP);
1229 1230 *nact = 1;
1230 1231 if (outact != NULL)
1231 1232 bcopy(act, outact, sizeof (*act));
1232 1233 return (outact);
1233 1234 }
1234 1235 /*
1235 1236 * compute the combinatoric explosion..
1236 1237 *
1237 1238 * we assume a request for encr if esp_req is PREF_REQUIRED
1238 1239 * we assume a request for ah auth if ah_req is PREF_REQUIRED.
1239 1240 * we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED
1240 1241 */
1241 1242
1242 1243 use_ah = act->ipa_apply.ipp_use_ah;
1243 1244 use_esp = act->ipa_apply.ipp_use_esp;
1244 1245 use_espa = act->ipa_apply.ipp_use_espa;
1245 1246 auth_alg = act->ipa_apply.ipp_auth_alg;
1246 1247 eauth_alg = act->ipa_apply.ipp_esp_auth_alg;
1247 1248 encr_alg = act->ipa_apply.ipp_encr_alg;
1248 1249
1249 1250 wild_auth = use_ah && (auth_alg == 0);
1250 1251 wild_eauth = use_espa && (eauth_alg == 0);
1251 1252 wild_encr = use_esp && (encr_alg == 0);
1252 1253
1253 1254 action_count = 1;
1254 1255 auth_min = auth_max = auth_alg;
1255 1256 eauth_min = eauth_max = eauth_alg;
1256 1257 encr_min = encr_max = encr_alg;
1257 1258
1258 1259 /*
1259 1260 * set up for explosion.. for each dimension, expand output
1260 1261 * size by the explosion factor.
1261 1262 *
1262 1263 * Don't include the "any" algorithms, if defined, as no
1263 1264 * kernel policies should be set for these algorithms.
1264 1265 */
1265 1266
1266 1267 #define SET_EXP_MINMAX(type, wild, alg, min, max, ipss) \
1267 1268 if (wild) { \
1268 1269 int nalgs = ipss->ipsec_nalgs[type]; \
1269 1270 if (ipss->ipsec_alglists[type][alg] != NULL) \
1270 1271 nalgs--; \
1271 1272 action_count *= nalgs; \
1272 1273 min = 0; \
1273 1274 max = ipss->ipsec_nalgs[type] - 1; \
1274 1275 }
1275 1276
1276 1277 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_auth, SADB_AALG_NONE,
1277 1278 auth_min, auth_max, ipss);
1278 1279 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_eauth, SADB_AALG_NONE,
1279 1280 eauth_min, eauth_max, ipss);
1280 1281 SET_EXP_MINMAX(IPSEC_ALG_ENCR, wild_encr, SADB_EALG_NONE,
1281 1282 encr_min, encr_max, ipss);
1282 1283
1283 1284 #undef SET_EXP_MINMAX
1284 1285
1285 1286 /*
1286 1287 * ok, allocate the whole mess..
1287 1288 */
1288 1289
1289 1290 outact = kmem_alloc(sizeof (*outact) * action_count, KM_NOSLEEP);
1290 1291 if (outact == NULL)
1291 1292 return (NULL);
1292 1293
1293 1294 /*
1294 1295 * Now compute all combinations. Note that non-wildcarded
1295 1296 * dimensions just get a single value from auth_min, while
1296 1297 * wildcarded dimensions indirect through the sortlist.
1297 1298 *
1298 1299 * We do encryption outermost since, at this time, there's
1299 1300 * greater difference in security and performance between
1300 1301 * encryption algorithms vs. authentication algorithms.
1301 1302 */
1302 1303
1303 1304 ai = 0;
1304 1305
1305 1306 #define WHICH_ALG(type, wild, idx, ipss) \
1306 1307 ((wild)?(ipss->ipsec_sortlist[type][idx]):(idx))
1307 1308
1308 1309 for (encr_idx = encr_min; encr_idx <= encr_max; encr_idx++) {
1309 1310 encr_alg = WHICH_ALG(IPSEC_ALG_ENCR, wild_encr, encr_idx, ipss);
1310 1311 if (wild_encr && encr_alg == SADB_EALG_NONE)
1311 1312 continue;
1312 1313 for (auth_idx = auth_min; auth_idx <= auth_max; auth_idx++) {
1313 1314 auth_alg = WHICH_ALG(IPSEC_ALG_AUTH, wild_auth,
1314 1315 auth_idx, ipss);
1315 1316 if (wild_auth && auth_alg == SADB_AALG_NONE)
1316 1317 continue;
1317 1318 for (eauth_idx = eauth_min; eauth_idx <= eauth_max;
1318 1319 eauth_idx++) {
1319 1320 eauth_alg = WHICH_ALG(IPSEC_ALG_AUTH,
1320 1321 wild_eauth, eauth_idx, ipss);
1321 1322 if (wild_eauth && eauth_alg == SADB_AALG_NONE)
1322 1323 continue;
1323 1324
1324 1325 ipsec_setup_act(&outact[ai], act,
1325 1326 auth_alg, encr_alg, eauth_alg, ns);
1326 1327 ai++;
1327 1328 }
1328 1329 }
1329 1330 }
1330 1331
1331 1332 #undef WHICH_ALG
1332 1333
1333 1334 ASSERT(ai == action_count);
1334 1335 *nact = action_count;
1335 1336 return (outact);
1336 1337 }
1337 1338
1338 1339 /*
1339 1340 * Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t.
1340 1341 */
1341 1342 static void
1342 1343 ipsec_prot_from_req(const ipsec_req_t *req, ipsec_prot_t *ipp)
1343 1344 {
1344 1345 bzero(ipp, sizeof (*ipp));
1345 1346 /*
1346 1347 * ipp_use_* are bitfields. Look at "!!" in the following as a
1347 1348 * "boolean canonicalization" operator.
1348 1349 */
1349 1350 ipp->ipp_use_ah = !!(req->ipsr_ah_req & IPSEC_PREF_REQUIRED);
1350 1351 ipp->ipp_use_esp = !!(req->ipsr_esp_req & IPSEC_PREF_REQUIRED);
1351 1352 ipp->ipp_use_espa = !!(req->ipsr_esp_auth_alg);
1352 1353 ipp->ipp_use_se = !!(req->ipsr_self_encap_req & IPSEC_PREF_REQUIRED);
1353 1354 ipp->ipp_use_unique = !!((req->ipsr_ah_req|req->ipsr_esp_req) &
1354 1355 IPSEC_PREF_UNIQUE);
1355 1356 ipp->ipp_encr_alg = req->ipsr_esp_alg;
1356 1357 /*
1357 1358 * SADB_AALG_ANY is a placeholder to distinguish "any" from
1358 1359 * "none" above. If auth is required, as determined above,
1359 1360 * SADB_AALG_ANY becomes 0, which is the representation
1360 1361 * of "any" and "none" in PF_KEY v2.
1361 1362 */
1362 1363 ipp->ipp_auth_alg = (req->ipsr_auth_alg != SADB_AALG_ANY) ?
1363 1364 req->ipsr_auth_alg : 0;
1364 1365 ipp->ipp_esp_auth_alg = (req->ipsr_esp_auth_alg != SADB_AALG_ANY) ?
1365 1366 req->ipsr_esp_auth_alg : 0;
1366 1367 }
1367 1368
1368 1369 /*
1369 1370 * Extract a new-style action from a request.
1370 1371 */
1371 1372 void
1372 1373 ipsec_actvec_from_req(const ipsec_req_t *req, ipsec_act_t **actp, uint_t *nactp,
1373 1374 netstack_t *ns)
1374 1375 {
1375 1376 struct ipsec_act act;
1376 1377
1377 1378 bzero(&act, sizeof (act));
1378 1379 if ((req->ipsr_ah_req & IPSEC_PREF_NEVER) &&
1379 1380 (req->ipsr_esp_req & IPSEC_PREF_NEVER)) {
1380 1381 act.ipa_type = IPSEC_ACT_BYPASS;
1381 1382 } else {
1382 1383 act.ipa_type = IPSEC_ACT_APPLY;
1383 1384 ipsec_prot_from_req(req, &act.ipa_apply);
1384 1385 }
1385 1386 *actp = ipsec_act_wildcard_expand(&act, nactp, ns);
1386 1387 }
1387 1388
1388 1389 /*
1389 1390 * Convert a new-style "prot" back to an ipsec_req_t (more backwards compat).
1390 1391 * We assume caller has already zero'ed *req for us.
1391 1392 */
1392 1393 static int
1393 1394 ipsec_req_from_prot(ipsec_prot_t *ipp, ipsec_req_t *req)
1394 1395 {
1395 1396 req->ipsr_esp_alg = ipp->ipp_encr_alg;
1396 1397 req->ipsr_auth_alg = ipp->ipp_auth_alg;
1397 1398 req->ipsr_esp_auth_alg = ipp->ipp_esp_auth_alg;
1398 1399
1399 1400 if (ipp->ipp_use_unique) {
1400 1401 req->ipsr_ah_req |= IPSEC_PREF_UNIQUE;
1401 1402 req->ipsr_esp_req |= IPSEC_PREF_UNIQUE;
1402 1403 }
1403 1404 if (ipp->ipp_use_se)
1404 1405 req->ipsr_self_encap_req |= IPSEC_PREF_REQUIRED;
1405 1406 if (ipp->ipp_use_ah)
1406 1407 req->ipsr_ah_req |= IPSEC_PREF_REQUIRED;
1407 1408 if (ipp->ipp_use_esp)
1408 1409 req->ipsr_esp_req |= IPSEC_PREF_REQUIRED;
1409 1410 return (sizeof (*req));
1410 1411 }
1411 1412
1412 1413 /*
1413 1414 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1414 1415 * We assume caller has already zero'ed *req for us.
1415 1416 */
1416 1417 static int
1417 1418 ipsec_req_from_act(ipsec_action_t *ap, ipsec_req_t *req)
1418 1419 {
1419 1420 switch (ap->ipa_act.ipa_type) {
1420 1421 case IPSEC_ACT_BYPASS:
1421 1422 req->ipsr_ah_req = IPSEC_PREF_NEVER;
1422 1423 req->ipsr_esp_req = IPSEC_PREF_NEVER;
1423 1424 return (sizeof (*req));
1424 1425 case IPSEC_ACT_APPLY:
1425 1426 return (ipsec_req_from_prot(&ap->ipa_act.ipa_apply, req));
1426 1427 }
1427 1428 return (sizeof (*req));
1428 1429 }
1429 1430
1430 1431 /*
1431 1432 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1432 1433 * We assume caller has already zero'ed *req for us.
1433 1434 */
1434 1435 int
1435 1436 ipsec_req_from_head(ipsec_policy_head_t *ph, ipsec_req_t *req, int af)
1436 1437 {
1437 1438 ipsec_policy_t *p;
1438 1439
1439 1440 /*
1440 1441 * FULL-PERSOCK: consult hash table, too?
1441 1442 */
1442 1443 for (p = ph->iph_root[IPSEC_INBOUND].ipr_nonhash[af];
1443 1444 p != NULL;
1444 1445 p = p->ipsp_hash.hash_next) {
1445 1446 if ((p->ipsp_sel->ipsl_key.ipsl_valid & IPSL_WILDCARD) == 0)
1446 1447 return (ipsec_req_from_act(p->ipsp_act, req));
1447 1448 }
1448 1449 return (sizeof (*req));
1449 1450 }
1450 1451
1451 1452 /*
1452 1453 * Based on per-socket or latched policy, convert to an appropriate
1453 1454 * IP_SEC_OPT ipsec_req_t for the socket option; return size so we can
1454 1455 * be tail-called from ip.
1455 1456 */
1456 1457 int
1457 1458 ipsec_req_from_conn(conn_t *connp, ipsec_req_t *req, int af)
1458 1459 {
1459 1460 ipsec_latch_t *ipl;
1460 1461 int rv = sizeof (ipsec_req_t);
1461 1462
1462 1463 bzero(req, sizeof (*req));
1463 1464
1464 1465 ASSERT(MUTEX_HELD(&connp->conn_lock));
1465 1466 ipl = connp->conn_latch;
1466 1467
1467 1468 /*
1468 1469 * Find appropriate policy. First choice is latched action;
1469 1470 * failing that, see latched policy; failing that,
1470 1471 * look at configured policy.
1471 1472 */
1472 1473 if (ipl != NULL) {
1473 1474 if (connp->conn_latch_in_action != NULL) {
1474 1475 rv = ipsec_req_from_act(connp->conn_latch_in_action,
1475 1476 req);
1476 1477 goto done;
1477 1478 }
1478 1479 if (connp->conn_latch_in_policy != NULL) {
1479 1480 rv = ipsec_req_from_act(
1480 1481 connp->conn_latch_in_policy->ipsp_act, req);
1481 1482 goto done;
1482 1483 }
1483 1484 }
1484 1485 if (connp->conn_policy != NULL)
1485 1486 rv = ipsec_req_from_head(connp->conn_policy, req, af);
1486 1487 done:
1487 1488 return (rv);
1488 1489 }
1489 1490
1490 1491 void
1491 1492 ipsec_actvec_free(ipsec_act_t *act, uint_t nact)
1492 1493 {
1493 1494 kmem_free(act, nact * sizeof (*act));
1494 1495 }
1495 1496
1496 1497 /*
1497 1498 * Consumes a reference to ipsp.
1498 1499 */
1499 1500 static mblk_t *
1500 1501 ipsec_check_loopback_policy(mblk_t *data_mp, ip_recv_attr_t *ira,
1501 1502 ipsec_policy_t *ipsp)
1502 1503 {
1503 1504 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
1504 1505 return (data_mp);
1505 1506
1506 1507 ASSERT(ira->ira_flags & IRAF_LOOPBACK);
1507 1508
1508 1509 IPPOL_REFRELE(ipsp);
1509 1510
1510 1511 /*
1511 1512 * We should do an actual policy check here. Revisit this
1512 1513 * when we revisit the IPsec API. (And pass a conn_t in when we
1513 1514 * get there.)
1514 1515 */
1515 1516
1516 1517 return (data_mp);
1517 1518 }
1518 1519
1519 1520 /*
1520 1521 * Check that packet's inbound ports & proto match the selectors
1521 1522 * expected by the SAs it traversed on the way in.
1522 1523 */
1523 1524 static boolean_t
1524 1525 ipsec_check_ipsecin_unique(ip_recv_attr_t *ira, const char **reason,
1525 1526 kstat_named_t **counter, uint64_t pkt_unique, netstack_t *ns)
1526 1527 {
1527 1528 uint64_t ah_mask, esp_mask;
1528 1529 ipsa_t *ah_assoc;
1529 1530 ipsa_t *esp_assoc;
1530 1531 ipsec_stack_t *ipss = ns->netstack_ipsec;
1531 1532
1532 1533 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1533 1534 ASSERT(!(ira->ira_flags & IRAF_LOOPBACK));
1534 1535
1535 1536 ah_assoc = ira->ira_ipsec_ah_sa;
1536 1537 esp_assoc = ira->ira_ipsec_esp_sa;
1537 1538 ASSERT((ah_assoc != NULL) || (esp_assoc != NULL));
1538 1539
1539 1540 ah_mask = (ah_assoc != NULL) ? ah_assoc->ipsa_unique_mask : 0;
1540 1541 esp_mask = (esp_assoc != NULL) ? esp_assoc->ipsa_unique_mask : 0;
1541 1542
1542 1543 if ((ah_mask == 0) && (esp_mask == 0))
1543 1544 return (B_TRUE);
1544 1545
1545 1546 /*
1546 1547 * The pkt_unique check will also check for tunnel mode on the SA
1547 1548 * vs. the tunneled_packet boolean. "Be liberal in what you receive"
1548 1549 * should not apply in this case. ;)
1549 1550 */
1550 1551
1551 1552 if (ah_mask != 0 &&
1552 1553 ah_assoc->ipsa_unique_id != (pkt_unique & ah_mask)) {
1553 1554 *reason = "AH inner header mismatch";
1554 1555 *counter = DROPPER(ipss, ipds_spd_ah_innermismatch);
1555 1556 return (B_FALSE);
1556 1557 }
1557 1558 if (esp_mask != 0 &&
1558 1559 esp_assoc->ipsa_unique_id != (pkt_unique & esp_mask)) {
1559 1560 *reason = "ESP inner header mismatch";
1560 1561 *counter = DROPPER(ipss, ipds_spd_esp_innermismatch);
1561 1562 return (B_FALSE);
1562 1563 }
1563 1564 return (B_TRUE);
1564 1565 }
1565 1566
1566 1567 static boolean_t
1567 1568 ipsec_check_ipsecin_action(ip_recv_attr_t *ira, mblk_t *mp, ipsec_action_t *ap,
1568 1569 ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter,
1569 1570 netstack_t *ns)
1570 1571 {
1571 1572 boolean_t ret = B_TRUE;
1572 1573 ipsec_prot_t *ipp;
1573 1574 ipsa_t *ah_assoc;
1574 1575 ipsa_t *esp_assoc;
1575 1576 boolean_t decaps;
1576 1577 ipsec_stack_t *ipss = ns->netstack_ipsec;
1577 1578
1578 1579 ASSERT((ipha == NULL && ip6h != NULL) ||
1579 1580 (ip6h == NULL && ipha != NULL));
1580 1581
1581 1582 if (ira->ira_flags & IRAF_LOOPBACK) {
1582 1583 /*
1583 1584 * Besides accepting pointer-equivalent actions, we also
1584 1585 * accept any ICMP errors we generated for ourselves,
1585 1586 * regardless of policy. If we do not wish to make this
1586 1587 * assumption in the future, check here, and where
1587 1588 * IXAF_TRUSTED_ICMP is initialized in ip.c and ip6.c.
1588 1589 */
1589 1590 if (ap == ira->ira_ipsec_action ||
1590 1591 (ira->ira_flags & IRAF_TRUSTED_ICMP))
1591 1592 return (B_TRUE);
1592 1593
1593 1594 /* Deep compare necessary here?? */
1594 1595 *counter = DROPPER(ipss, ipds_spd_loopback_mismatch);
1595 1596 *reason = "loopback policy mismatch";
1596 1597 return (B_FALSE);
1597 1598 }
1598 1599 ASSERT(!(ira->ira_flags & IRAF_TRUSTED_ICMP));
1599 1600 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1600 1601
1601 1602 ah_assoc = ira->ira_ipsec_ah_sa;
1602 1603 esp_assoc = ira->ira_ipsec_esp_sa;
1603 1604
1604 1605 decaps = (ira->ira_flags & IRAF_IPSEC_DECAPS);
1605 1606
1606 1607 switch (ap->ipa_act.ipa_type) {
1607 1608 case IPSEC_ACT_DISCARD:
1608 1609 case IPSEC_ACT_REJECT:
1609 1610 /* Should "fail hard" */
1610 1611 *counter = DROPPER(ipss, ipds_spd_explicit);
1611 1612 *reason = "blocked by policy";
1612 1613 return (B_FALSE);
1613 1614
1614 1615 case IPSEC_ACT_BYPASS:
1615 1616 case IPSEC_ACT_CLEAR:
1616 1617 *counter = DROPPER(ipss, ipds_spd_got_secure);
1617 1618 *reason = "expected clear, got protected";
1618 1619 return (B_FALSE);
1619 1620
1620 1621 case IPSEC_ACT_APPLY:
1621 1622 ipp = &ap->ipa_act.ipa_apply;
1622 1623 /*
1623 1624 * As of now we do the simple checks of whether
1624 1625 * the datagram has gone through the required IPSEC
1625 1626 * protocol constraints or not. We might have more
1626 1627 * in the future like sensitive levels, key bits, etc.
1627 1628 * If it fails the constraints, check whether we would
1628 1629 * have accepted this if it had come in clear.
1629 1630 */
1630 1631 if (ipp->ipp_use_ah) {
1631 1632 if (ah_assoc == NULL) {
1632 1633 ret = ipsec_inbound_accept_clear(mp, ipha,
1633 1634 ip6h);
1634 1635 *counter = DROPPER(ipss, ipds_spd_got_clear);
1635 1636 *reason = "unprotected not accepted";
1636 1637 break;
1637 1638 }
1638 1639 ASSERT(ah_assoc != NULL);
1639 1640 ASSERT(ipp->ipp_auth_alg != 0);
1640 1641
1641 1642 if (ah_assoc->ipsa_auth_alg !=
1642 1643 ipp->ipp_auth_alg) {
1643 1644 *counter = DROPPER(ipss, ipds_spd_bad_ahalg);
1644 1645 *reason = "unacceptable ah alg";
1645 1646 ret = B_FALSE;
1646 1647 break;
1647 1648 }
1648 1649 } else if (ah_assoc != NULL) {
1649 1650 /*
1650 1651 * Don't allow this. Check IPSEC NOTE above
1651 1652 * ip_fanout_proto().
1652 1653 */
1653 1654 *counter = DROPPER(ipss, ipds_spd_got_ah);
1654 1655 *reason = "unexpected AH";
1655 1656 ret = B_FALSE;
1656 1657 break;
1657 1658 }
1658 1659 if (ipp->ipp_use_esp) {
1659 1660 if (esp_assoc == NULL) {
1660 1661 ret = ipsec_inbound_accept_clear(mp, ipha,
1661 1662 ip6h);
1662 1663 *counter = DROPPER(ipss, ipds_spd_got_clear);
1663 1664 *reason = "unprotected not accepted";
1664 1665 break;
1665 1666 }
1666 1667 ASSERT(esp_assoc != NULL);
1667 1668 ASSERT(ipp->ipp_encr_alg != 0);
1668 1669
1669 1670 if (esp_assoc->ipsa_encr_alg !=
1670 1671 ipp->ipp_encr_alg) {
1671 1672 *counter = DROPPER(ipss, ipds_spd_bad_espealg);
1672 1673 *reason = "unacceptable esp alg";
1673 1674 ret = B_FALSE;
1674 1675 break;
1675 1676 }
1676 1677 /*
1677 1678 * If the client does not need authentication,
1678 1679 * we don't verify the alogrithm.
1679 1680 */
1680 1681 if (ipp->ipp_use_espa) {
1681 1682 if (esp_assoc->ipsa_auth_alg !=
1682 1683 ipp->ipp_esp_auth_alg) {
1683 1684 *counter = DROPPER(ipss,
1684 1685 ipds_spd_bad_espaalg);
1685 1686 *reason = "unacceptable esp auth alg";
1686 1687 ret = B_FALSE;
1687 1688 break;
1688 1689 }
1689 1690 }
1690 1691 } else if (esp_assoc != NULL) {
1691 1692 /*
1692 1693 * Don't allow this. Check IPSEC NOTE above
1693 1694 * ip_fanout_proto().
1694 1695 */
1695 1696 *counter = DROPPER(ipss, ipds_spd_got_esp);
1696 1697 *reason = "unexpected ESP";
1697 1698 ret = B_FALSE;
1698 1699 break;
1699 1700 }
1700 1701 if (ipp->ipp_use_se) {
1701 1702 if (!decaps) {
1702 1703 ret = ipsec_inbound_accept_clear(mp, ipha,
1703 1704 ip6h);
1704 1705 if (!ret) {
1705 1706 /* XXX mutant? */
1706 1707 *counter = DROPPER(ipss,
1707 1708 ipds_spd_bad_selfencap);
1708 1709 *reason = "self encap not found";
1709 1710 break;
1710 1711 }
1711 1712 }
1712 1713 } else if (decaps) {
1713 1714 /*
1714 1715 * XXX If the packet comes in tunneled and the
1715 1716 * recipient does not expect it to be tunneled, it
1716 1717 * is okay. But we drop to be consistent with the
1717 1718 * other cases.
1718 1719 */
1719 1720 *counter = DROPPER(ipss, ipds_spd_got_selfencap);
1720 1721 *reason = "unexpected self encap";
1721 1722 ret = B_FALSE;
1722 1723 break;
1723 1724 }
1724 1725 if (ira->ira_ipsec_action != NULL) {
1725 1726 /*
1726 1727 * This can happen if we do a double policy-check on
1727 1728 * a packet
1728 1729 * XXX XXX should fix this case!
1729 1730 */
1730 1731 IPACT_REFRELE(ira->ira_ipsec_action);
1731 1732 }
1732 1733 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1733 1734 ASSERT(ira->ira_ipsec_action == NULL);
1734 1735 IPACT_REFHOLD(ap);
1735 1736 ira->ira_ipsec_action = ap;
1736 1737 break; /* from switch */
1737 1738 }
1738 1739 return (ret);
1739 1740 }
1740 1741
1741 1742 static boolean_t
1742 1743 spd_match_inbound_ids(ipsec_latch_t *ipl, ipsa_t *sa)
1743 1744 {
1744 1745 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1745 1746 return ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_src_cid) &&
1746 1747 ipsid_equal(ipl->ipl_local_cid, sa->ipsa_dst_cid);
1747 1748 }
1748 1749
1749 1750 /*
1750 1751 * Takes a latched conn and an inbound packet and returns a unique_id suitable
1751 1752 * for SA comparisons. Most of the time we will copy from the conn_t, but
1752 1753 * there are cases when the conn_t is latched but it has wildcard selectors,
1753 1754 * and then we need to fallback to scooping them out of the packet.
1754 1755 *
1755 1756 * Assume we'll never have 0 with a conn_t present, so use 0 as a failure. We
1756 1757 * can get away with this because we only have non-zero ports/proto for
1757 1758 * latched conn_ts.
1758 1759 *
1759 1760 * Ideal candidate for an "inline" keyword, as we're JUST convoluted enough
1760 1761 * to not be a nice macro.
1761 1762 */
1762 1763 static uint64_t
1763 1764 conn_to_unique(conn_t *connp, mblk_t *data_mp, ipha_t *ipha, ip6_t *ip6h)
1764 1765 {
1765 1766 ipsec_selector_t sel;
1766 1767 uint8_t ulp = connp->conn_proto;
1767 1768
1768 1769 ASSERT(connp->conn_latch_in_policy != NULL);
1769 1770
1770 1771 if ((ulp == IPPROTO_TCP || ulp == IPPROTO_UDP || ulp == IPPROTO_SCTP) &&
1771 1772 (connp->conn_fport == 0 || connp->conn_lport == 0)) {
1772 1773 /* Slow path - we gotta grab from the packet. */
1773 1774 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
1774 1775 SEL_NONE) != SELRET_SUCCESS) {
1775 1776 /* Failure -> have caller free packet with ENOMEM. */
1776 1777 return (0);
1777 1778 }
1778 1779 return (SA_UNIQUE_ID(sel.ips_remote_port, sel.ips_local_port,
1779 1780 sel.ips_protocol, 0));
1780 1781 }
1781 1782
1782 1783 #ifdef DEBUG_NOT_UNTIL_6478464
1783 1784 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h, SEL_NONE) ==
1784 1785 SELRET_SUCCESS) {
1785 1786 ASSERT(sel.ips_local_port == connp->conn_lport);
1786 1787 ASSERT(sel.ips_remote_port == connp->conn_fport);
1787 1788 ASSERT(sel.ips_protocol == connp->conn_proto);
1788 1789 }
1789 1790 ASSERT(connp->conn_proto != 0);
1790 1791 #endif
1791 1792
1792 1793 return (SA_UNIQUE_ID(connp->conn_fport, connp->conn_lport, ulp, 0));
1793 1794 }
1794 1795
1795 1796 /*
1796 1797 * Called to check policy on a latched connection.
1797 1798 * Note that we don't dereference conn_latch or conn_ihere since the conn might
1798 1799 * be closing. The caller passes a held ipsec_latch_t instead.
1799 1800 */
1800 1801 static boolean_t
1801 1802 ipsec_check_ipsecin_latch(ip_recv_attr_t *ira, mblk_t *mp, ipsec_latch_t *ipl,
1802 1803 ipsec_action_t *ap, ipha_t *ipha, ip6_t *ip6h, const char **reason,
1803 1804 kstat_named_t **counter, conn_t *connp, netstack_t *ns)
1804 1805 {
1805 1806 ipsec_stack_t *ipss = ns->netstack_ipsec;
1806 1807
1807 1808 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1808 1809 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1809 1810
1810 1811 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
1811 1812 /*
1812 1813 * Over loopback, there aren't real security associations,
1813 1814 * so there are neither identities nor "unique" values
1814 1815 * for us to check the packet against.
1815 1816 */
1816 1817 if (ira->ira_ipsec_ah_sa != NULL) {
1817 1818 if (!spd_match_inbound_ids(ipl,
1818 1819 ira->ira_ipsec_ah_sa)) {
1819 1820 *counter = DROPPER(ipss, ipds_spd_ah_badid);
1820 1821 *reason = "AH identity mismatch";
1821 1822 return (B_FALSE);
1822 1823 }
1823 1824 }
1824 1825
1825 1826 if (ira->ira_ipsec_esp_sa != NULL) {
1826 1827 if (!spd_match_inbound_ids(ipl,
1827 1828 ira->ira_ipsec_esp_sa)) {
1828 1829 *counter = DROPPER(ipss, ipds_spd_esp_badid);
1829 1830 *reason = "ESP identity mismatch";
1830 1831 return (B_FALSE);
1831 1832 }
1832 1833 }
1833 1834
1834 1835 /*
1835 1836 * Can fudge pkt_unique from connp because we're latched.
1836 1837 * In DEBUG kernels (see conn_to_unique()'s implementation),
1837 1838 * verify this even if it REALLY slows things down.
1838 1839 */
1839 1840 if (!ipsec_check_ipsecin_unique(ira, reason, counter,
1840 1841 conn_to_unique(connp, mp, ipha, ip6h), ns)) {
1841 1842 return (B_FALSE);
1842 1843 }
1843 1844 }
1844 1845 return (ipsec_check_ipsecin_action(ira, mp, ap, ipha, ip6h, reason,
1845 1846 counter, ns));
1846 1847 }
1847 1848
1848 1849 /*
1849 1850 * Check to see whether this secured datagram meets the policy
1850 1851 * constraints specified in ipsp.
1851 1852 *
1852 1853 * Called from ipsec_check_global_policy, and ipsec_check_inbound_policy.
1853 1854 *
1854 1855 * Consumes a reference to ipsp.
1855 1856 * Returns the mblk if ok.
1856 1857 */
1857 1858 static mblk_t *
1858 1859 ipsec_check_ipsecin_policy(mblk_t *data_mp, ipsec_policy_t *ipsp,
1859 1860 ipha_t *ipha, ip6_t *ip6h, uint64_t pkt_unique, ip_recv_attr_t *ira,
1860 1861 netstack_t *ns)
1861 1862 {
1862 1863 ipsec_action_t *ap;
1863 1864 const char *reason = "no policy actions found";
1864 1865 ip_stack_t *ipst = ns->netstack_ip;
1865 1866 ipsec_stack_t *ipss = ns->netstack_ipsec;
1866 1867 kstat_named_t *counter;
1867 1868
1868 1869 counter = DROPPER(ipss, ipds_spd_got_secure);
1869 1870
1870 1871 ASSERT(ipsp != NULL);
1871 1872
1872 1873 ASSERT((ipha == NULL && ip6h != NULL) ||
1873 1874 (ip6h == NULL && ipha != NULL));
1874 1875
1875 1876 if (ira->ira_flags & IRAF_LOOPBACK)
1876 1877 return (ipsec_check_loopback_policy(data_mp, ira, ipsp));
1877 1878
1878 1879 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1879 1880
1880 1881 if (ira->ira_ipsec_action != NULL) {
1881 1882 /*
1882 1883 * this can happen if we do a double policy-check on a packet
1883 1884 * Would be nice to be able to delete this test..
1884 1885 */
1885 1886 IPACT_REFRELE(ira->ira_ipsec_action);
1886 1887 }
1887 1888 ASSERT(ira->ira_ipsec_action == NULL);
1888 1889
1889 1890 if (!SA_IDS_MATCH(ira->ira_ipsec_ah_sa, ira->ira_ipsec_esp_sa)) {
1890 1891 reason = "inbound AH and ESP identities differ";
1891 1892 counter = DROPPER(ipss, ipds_spd_ahesp_diffid);
1892 1893 goto drop;
1893 1894 }
1894 1895
1895 1896 if (!ipsec_check_ipsecin_unique(ira, &reason, &counter, pkt_unique,
1896 1897 ns))
1897 1898 goto drop;
1898 1899
1899 1900 /*
1900 1901 * Ok, now loop through the possible actions and see if any
1901 1902 * of them work for us.
1902 1903 */
1903 1904
1904 1905 for (ap = ipsp->ipsp_act; ap != NULL; ap = ap->ipa_next) {
1905 1906 if (ipsec_check_ipsecin_action(ira, data_mp, ap,
1906 1907 ipha, ip6h, &reason, &counter, ns)) {
1907 1908 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
1908 1909 IPPOL_REFRELE(ipsp);
1909 1910 return (data_mp);
1910 1911 }
1911 1912 }
1912 1913 drop:
1913 1914 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1914 1915 "ipsec inbound policy mismatch: %s, packet dropped\n",
1915 1916 reason);
1916 1917 IPPOL_REFRELE(ipsp);
1917 1918 ASSERT(ira->ira_ipsec_action == NULL);
1918 1919 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
1919 1920 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
1920 1921 &ipss->ipsec_spd_dropper);
1921 1922 return (NULL);
1922 1923 }
1923 1924
1924 1925 /*
1925 1926 * sleazy prefix-length-based compare.
1926 1927 * another inlining candidate..
1927 1928 */
1928 1929 boolean_t
1929 1930 ip_addr_match(uint8_t *addr1, int pfxlen, in6_addr_t *addr2p)
1930 1931 {
1931 1932 int offset = pfxlen>>3;
1932 1933 int bitsleft = pfxlen & 7;
1933 1934 uint8_t *addr2 = (uint8_t *)addr2p;
1934 1935
1935 1936 /*
1936 1937 * and there was much evil..
1937 1938 * XXX should inline-expand the bcmp here and do this 32 bits
1938 1939 * or 64 bits at a time..
1939 1940 */
1940 1941 return ((bcmp(addr1, addr2, offset) == 0) &&
1941 1942 ((bitsleft == 0) ||
1942 1943 (((addr1[offset] ^ addr2[offset]) & (0xff<<(8-bitsleft))) == 0)));
1943 1944 }
1944 1945
1945 1946 static ipsec_policy_t *
1946 1947 ipsec_find_policy_chain(ipsec_policy_t *best, ipsec_policy_t *chain,
1947 1948 ipsec_selector_t *sel, boolean_t is_icmp_inv_acq)
1948 1949 {
1949 1950 ipsec_selkey_t *isel;
1950 1951 ipsec_policy_t *p;
1951 1952 int bpri = best ? best->ipsp_prio : 0;
1952 1953
1953 1954 for (p = chain; p != NULL; p = p->ipsp_hash.hash_next) {
1954 1955 uint32_t valid;
1955 1956
1956 1957 if (p->ipsp_prio <= bpri)
1957 1958 continue;
1958 1959 isel = &p->ipsp_sel->ipsl_key;
1959 1960 valid = isel->ipsl_valid;
1960 1961
1961 1962 if ((valid & IPSL_PROTOCOL) &&
1962 1963 (isel->ipsl_proto != sel->ips_protocol))
1963 1964 continue;
1964 1965
1965 1966 if ((valid & IPSL_REMOTE_ADDR) &&
1966 1967 !ip_addr_match((uint8_t *)&isel->ipsl_remote,
1967 1968 isel->ipsl_remote_pfxlen, &sel->ips_remote_addr_v6))
1968 1969 continue;
1969 1970
1970 1971 if ((valid & IPSL_LOCAL_ADDR) &&
1971 1972 !ip_addr_match((uint8_t *)&isel->ipsl_local,
1972 1973 isel->ipsl_local_pfxlen, &sel->ips_local_addr_v6))
1973 1974 continue;
1974 1975
1975 1976 if ((valid & IPSL_REMOTE_PORT) &&
1976 1977 isel->ipsl_rport != sel->ips_remote_port)
1977 1978 continue;
1978 1979
1979 1980 if ((valid & IPSL_LOCAL_PORT) &&
1980 1981 isel->ipsl_lport != sel->ips_local_port)
1981 1982 continue;
1982 1983
1983 1984 if (!is_icmp_inv_acq) {
1984 1985 if ((valid & IPSL_ICMP_TYPE) &&
1985 1986 (isel->ipsl_icmp_type > sel->ips_icmp_type ||
1986 1987 isel->ipsl_icmp_type_end < sel->ips_icmp_type)) {
1987 1988 continue;
1988 1989 }
1989 1990
1990 1991 if ((valid & IPSL_ICMP_CODE) &&
1991 1992 (isel->ipsl_icmp_code > sel->ips_icmp_code ||
1992 1993 isel->ipsl_icmp_code_end <
1993 1994 sel->ips_icmp_code)) {
1994 1995 continue;
1995 1996 }
1996 1997 } else {
1997 1998 /*
1998 1999 * special case for icmp inverse acquire
1999 2000 * we only want policies that aren't drop/pass
2000 2001 */
2001 2002 if (p->ipsp_act->ipa_act.ipa_type != IPSEC_ACT_APPLY)
2002 2003 continue;
2003 2004 }
2004 2005
2005 2006 /* we matched all the packet-port-field selectors! */
2006 2007 best = p;
2007 2008 bpri = p->ipsp_prio;
2008 2009 }
2009 2010
2010 2011 return (best);
2011 2012 }
2012 2013
2013 2014 /*
2014 2015 * Try to find and return the best policy entry under a given policy
2015 2016 * root for a given set of selectors; the first parameter "best" is
2016 2017 * the current best policy so far. If "best" is non-null, we have a
2017 2018 * reference to it. We return a reference to a policy; if that policy
2018 2019 * is not the original "best", we need to release that reference
2019 2020 * before returning.
2020 2021 */
2021 2022 ipsec_policy_t *
2022 2023 ipsec_find_policy_head(ipsec_policy_t *best, ipsec_policy_head_t *head,
2023 2024 int direction, ipsec_selector_t *sel)
2024 2025 {
2025 2026 ipsec_policy_t *curbest;
2026 2027 ipsec_policy_root_t *root;
2027 2028 uint8_t is_icmp_inv_acq = sel->ips_is_icmp_inv_acq;
2028 2029 int af = sel->ips_isv4 ? IPSEC_AF_V4 : IPSEC_AF_V6;
2029 2030
2030 2031 curbest = best;
2031 2032 root = &head->iph_root[direction];
2032 2033
2033 2034 #ifdef DEBUG
2034 2035 if (is_icmp_inv_acq) {
2035 2036 if (sel->ips_isv4) {
2036 2037 if (sel->ips_protocol != IPPROTO_ICMP) {
2037 2038 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2038 2039 " expecting icmp, got %d",
2039 2040 sel->ips_protocol);
2040 2041 }
2041 2042 } else {
2042 2043 if (sel->ips_protocol != IPPROTO_ICMPV6) {
2043 2044 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2044 2045 " expecting icmpv6, got %d",
2045 2046 sel->ips_protocol);
2046 2047 }
2047 2048 }
2048 2049 }
2049 2050 #endif
2050 2051
2051 2052 rw_enter(&head->iph_lock, RW_READER);
2052 2053
2053 2054 if (root->ipr_nchains > 0) {
2054 2055 curbest = ipsec_find_policy_chain(curbest,
2055 2056 root->ipr_hash[selector_hash(sel, root)].hash_head, sel,
2056 2057 is_icmp_inv_acq);
2057 2058 }
2058 2059 curbest = ipsec_find_policy_chain(curbest, root->ipr_nonhash[af], sel,
2059 2060 is_icmp_inv_acq);
2060 2061
2061 2062 /*
2062 2063 * Adjust reference counts if we found anything new.
2063 2064 */
2064 2065 if (curbest != best) {
2065 2066 ASSERT(curbest != NULL);
2066 2067 IPPOL_REFHOLD(curbest);
2067 2068
2068 2069 if (best != NULL) {
2069 2070 IPPOL_REFRELE(best);
2070 2071 }
2071 2072 }
2072 2073
2073 2074 rw_exit(&head->iph_lock);
2074 2075
2075 2076 return (curbest);
2076 2077 }
2077 2078
2078 2079 /*
2079 2080 * Find the best system policy (either global or per-interface) which
2080 2081 * applies to the given selector; look in all the relevant policy roots
2081 2082 * to figure out which policy wins.
2082 2083 *
2083 2084 * Returns a reference to a policy; caller must release this
2084 2085 * reference when done.
2085 2086 */
2086 2087 ipsec_policy_t *
2087 2088 ipsec_find_policy(int direction, const conn_t *connp, ipsec_selector_t *sel,
2088 2089 netstack_t *ns)
2089 2090 {
2090 2091 ipsec_policy_t *p;
2091 2092 ipsec_stack_t *ipss = ns->netstack_ipsec;
2092 2093
2093 2094 p = ipsec_find_policy_head(NULL, &ipss->ipsec_system_policy,
2094 2095 direction, sel);
2095 2096 if ((connp != NULL) && (connp->conn_policy != NULL)) {
2096 2097 p = ipsec_find_policy_head(p, connp->conn_policy,
2097 2098 direction, sel);
2098 2099 }
2099 2100
2100 2101 return (p);
2101 2102 }
2102 2103
2103 2104 /*
2104 2105 * Check with global policy and see whether this inbound
2105 2106 * packet meets the policy constraints.
2106 2107 *
2107 2108 * Locate appropriate policy from global policy, supplemented by the
2108 2109 * conn's configured and/or cached policy if the conn is supplied.
2109 2110 *
2110 2111 * Dispatch to ipsec_check_ipsecin_policy if we have policy and an
2111 2112 * encrypted packet to see if they match.
2112 2113 *
2113 2114 * Otherwise, see if the policy allows cleartext; if not, drop it on the
2114 2115 * floor.
2115 2116 */
2116 2117 mblk_t *
2117 2118 ipsec_check_global_policy(mblk_t *data_mp, conn_t *connp,
2118 2119 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira, netstack_t *ns)
2119 2120 {
2120 2121 ipsec_policy_t *p;
2121 2122 ipsec_selector_t sel;
2122 2123 boolean_t policy_present;
2123 2124 kstat_named_t *counter;
2124 2125 uint64_t pkt_unique;
2125 2126 ip_stack_t *ipst = ns->netstack_ip;
2126 2127 ipsec_stack_t *ipss = ns->netstack_ipsec;
2127 2128
2128 2129 sel.ips_is_icmp_inv_acq = 0;
2129 2130
2130 2131 ASSERT((ipha == NULL && ip6h != NULL) ||
2131 2132 (ip6h == NULL && ipha != NULL));
2132 2133
2133 2134 if (ipha != NULL)
2134 2135 policy_present = ipss->ipsec_inbound_v4_policy_present;
2135 2136 else
2136 2137 policy_present = ipss->ipsec_inbound_v6_policy_present;
2137 2138
2138 2139 if (!policy_present && connp == NULL) {
2139 2140 /*
2140 2141 * No global policy and no per-socket policy;
2141 2142 * just pass it back (but we shouldn't get here in that case)
2142 2143 */
2143 2144 return (data_mp);
2144 2145 }
2145 2146
2146 2147 /*
2147 2148 * If we have cached policy, use it.
2148 2149 * Otherwise consult system policy.
2149 2150 */
2150 2151 if ((connp != NULL) && (connp->conn_latch != NULL)) {
2151 2152 p = connp->conn_latch_in_policy;
2152 2153 if (p != NULL) {
2153 2154 IPPOL_REFHOLD(p);
2154 2155 }
2155 2156 /*
2156 2157 * Fudge sel for UNIQUE_ID setting below.
2157 2158 */
2158 2159 pkt_unique = conn_to_unique(connp, data_mp, ipha, ip6h);
2159 2160 } else {
2160 2161 /* Initialize the ports in the selector */
2161 2162 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
2162 2163 SEL_NONE) == SELRET_NOMEM) {
2163 2164 /*
2164 2165 * Technically not a policy mismatch, but it is
2165 2166 * an internal failure.
2166 2167 */
2167 2168 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2168 2169 "ipsec_init_inbound_sel", ipha, ip6h, B_TRUE, ns);
2169 2170 counter = DROPPER(ipss, ipds_spd_nomem);
2170 2171 goto fail;
2171 2172 }
2172 2173
2173 2174 /*
2174 2175 * Find the policy which best applies.
2175 2176 *
2176 2177 * If we find global policy, we should look at both
2177 2178 * local policy and global policy and see which is
2178 2179 * stronger and match accordingly.
2179 2180 *
2180 2181 * If we don't find a global policy, check with
2181 2182 * local policy alone.
2182 2183 */
2183 2184
2184 2185 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
2185 2186 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
2186 2187 sel.ips_local_port, sel.ips_protocol, 0);
2187 2188 }
2188 2189
2189 2190 if (p == NULL) {
2190 2191 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2191 2192 /*
2192 2193 * We have no policy; default to succeeding.
2193 2194 * XXX paranoid system design doesn't do this.
2194 2195 */
2195 2196 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2196 2197 return (data_mp);
2197 2198 } else {
2198 2199 counter = DROPPER(ipss, ipds_spd_got_secure);
2199 2200 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED,
2200 2201 "ipsec_check_global_policy", ipha, ip6h, B_TRUE,
2201 2202 ns);
2202 2203 goto fail;
2203 2204 }
2204 2205 }
2205 2206 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2206 2207 return (ipsec_check_ipsecin_policy(data_mp, p, ipha, ip6h,
2207 2208 pkt_unique, ira, ns));
2208 2209 }
2209 2210 if (p->ipsp_act->ipa_allow_clear) {
2210 2211 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2211 2212 IPPOL_REFRELE(p);
2212 2213 return (data_mp);
2213 2214 }
2214 2215 IPPOL_REFRELE(p);
2215 2216 /*
2216 2217 * If we reach here, we will drop the packet because it failed the
2217 2218 * global policy check because the packet was cleartext, and it
2218 2219 * should not have been.
2219 2220 */
2220 2221 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2221 2222 "ipsec_check_global_policy", ipha, ip6h, B_FALSE, ns);
2222 2223 counter = DROPPER(ipss, ipds_spd_got_clear);
2223 2224
2224 2225 fail:
2225 2226 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
2226 2227 &ipss->ipsec_spd_dropper);
2227 2228 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2228 2229 return (NULL);
2229 2230 }
2230 2231
2231 2232 /*
2232 2233 * We check whether an inbound datagram is a valid one
2233 2234 * to accept in clear. If it is secure, it is the job
2234 2235 * of IPSEC to log information appropriately if it
2235 2236 * suspects that it may not be the real one.
2236 2237 *
2237 2238 * It is called only while fanning out to the ULP
2238 2239 * where ULP accepts only secure data and the incoming
2239 2240 * is clear. Usually we never accept clear datagrams in
2240 2241 * such cases. ICMP is the only exception.
2241 2242 *
2242 2243 * NOTE : We don't call this function if the client (ULP)
2243 2244 * is willing to accept things in clear.
2244 2245 */
2245 2246 boolean_t
2246 2247 ipsec_inbound_accept_clear(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h)
2247 2248 {
2248 2249 ushort_t iph_hdr_length;
2249 2250 icmph_t *icmph;
2250 2251 icmp6_t *icmp6;
2251 2252 uint8_t *nexthdrp;
2252 2253
2253 2254 ASSERT((ipha != NULL && ip6h == NULL) ||
2254 2255 (ipha == NULL && ip6h != NULL));
2255 2256
2256 2257 if (ip6h != NULL) {
2257 2258 iph_hdr_length = ip_hdr_length_v6(mp, ip6h);
2258 2259 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length,
2259 2260 &nexthdrp)) {
2260 2261 return (B_FALSE);
2261 2262 }
2262 2263 if (*nexthdrp != IPPROTO_ICMPV6)
2263 2264 return (B_FALSE);
2264 2265 icmp6 = (icmp6_t *)(&mp->b_rptr[iph_hdr_length]);
2265 2266 /* Match IPv6 ICMP policy as closely as IPv4 as possible. */
2266 2267 switch (icmp6->icmp6_type) {
2267 2268 case ICMP6_PARAM_PROB:
2268 2269 /* Corresponds to port/proto unreach in IPv4. */
2269 2270 case ICMP6_ECHO_REQUEST:
2270 2271 /* Just like IPv4. */
2271 2272 return (B_FALSE);
2272 2273
2273 2274 case MLD_LISTENER_QUERY:
2274 2275 case MLD_LISTENER_REPORT:
2275 2276 case MLD_LISTENER_REDUCTION:
2276 2277 /*
2277 2278 * XXX Seperate NDD in IPv4 what about here?
2278 2279 * Plus, mcast is important to ND.
2279 2280 */
2280 2281 case ICMP6_DST_UNREACH:
2281 2282 /* Corresponds to HOST/NET unreachable in IPv4. */
2282 2283 case ICMP6_PACKET_TOO_BIG:
2283 2284 case ICMP6_ECHO_REPLY:
2284 2285 /* These are trusted in IPv4. */
2285 2286 case ND_ROUTER_SOLICIT:
2286 2287 case ND_ROUTER_ADVERT:
2287 2288 case ND_NEIGHBOR_SOLICIT:
2288 2289 case ND_NEIGHBOR_ADVERT:
2289 2290 case ND_REDIRECT:
2290 2291 /* Trust ND messages for now. */
2291 2292 case ICMP6_TIME_EXCEEDED:
2292 2293 default:
2293 2294 return (B_TRUE);
2294 2295 }
2295 2296 } else {
2296 2297 /*
2297 2298 * If it is not ICMP, fail this request.
2298 2299 */
2299 2300 if (ipha->ipha_protocol != IPPROTO_ICMP) {
2300 2301 #ifdef FRAGCACHE_DEBUG
2301 2302 cmn_err(CE_WARN, "Dropping - ipha_proto = %d\n",
2302 2303 ipha->ipha_protocol);
2303 2304 #endif
2304 2305 return (B_FALSE);
2305 2306 }
2306 2307 iph_hdr_length = IPH_HDR_LENGTH(ipha);
2307 2308 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
2308 2309 /*
2309 2310 * It is an insecure icmp message. Check to see whether we are
2310 2311 * willing to accept this one.
2311 2312 */
2312 2313
2313 2314 switch (icmph->icmph_type) {
2314 2315 case ICMP_ECHO_REPLY:
2315 2316 case ICMP_TIME_STAMP_REPLY:
2316 2317 case ICMP_INFO_REPLY:
2317 2318 case ICMP_ROUTER_ADVERTISEMENT:
2318 2319 /*
2319 2320 * We should not encourage clear replies if this
2320 2321 * client expects secure. If somebody is replying
2321 2322 * in clear some mailicious user watching both the
2322 2323 * request and reply, can do chosen-plain-text attacks.
2323 2324 * With global policy we might be just expecting secure
2324 2325 * but sending out clear. We don't know what the right
2325 2326 * thing is. We can't do much here as we can't control
2326 2327 * the sender here. Till we are sure of what to do,
2327 2328 * accept them.
2328 2329 */
2329 2330 return (B_TRUE);
2330 2331 case ICMP_ECHO_REQUEST:
2331 2332 case ICMP_TIME_STAMP_REQUEST:
2332 2333 case ICMP_INFO_REQUEST:
2333 2334 case ICMP_ADDRESS_MASK_REQUEST:
2334 2335 case ICMP_ROUTER_SOLICITATION:
2335 2336 case ICMP_ADDRESS_MASK_REPLY:
2336 2337 /*
2337 2338 * Don't accept this as somebody could be sending
2338 2339 * us plain text to get encrypted data. If we reply,
2339 2340 * it will lead to chosen plain text attack.
2340 2341 */
2341 2342 return (B_FALSE);
2342 2343 case ICMP_DEST_UNREACHABLE:
2343 2344 switch (icmph->icmph_code) {
2344 2345 case ICMP_FRAGMENTATION_NEEDED:
2345 2346 /*
2346 2347 * Be in sync with icmp_inbound, where we have
2347 2348 * already set dce_pmtu
2348 2349 */
2349 2350 #ifdef FRAGCACHE_DEBUG
2350 2351 cmn_err(CE_WARN, "ICMP frag needed\n");
2351 2352 #endif
2352 2353 return (B_TRUE);
2353 2354 case ICMP_HOST_UNREACHABLE:
2354 2355 case ICMP_NET_UNREACHABLE:
2355 2356 /*
2356 2357 * By accepting, we could reset a connection.
2357 2358 * How do we solve the problem of some
2358 2359 * intermediate router sending in-secure ICMP
2359 2360 * messages ?
2360 2361 */
2361 2362 return (B_TRUE);
2362 2363 case ICMP_PORT_UNREACHABLE:
2363 2364 case ICMP_PROTOCOL_UNREACHABLE:
2364 2365 default :
2365 2366 return (B_FALSE);
2366 2367 }
2367 2368 case ICMP_SOURCE_QUENCH:
2368 2369 /*
2369 2370 * If this is an attack, TCP will slow start
2370 2371 * because of this. Is it very harmful ?
2371 2372 */
2372 2373 return (B_TRUE);
2373 2374 case ICMP_PARAM_PROBLEM:
2374 2375 return (B_FALSE);
2375 2376 case ICMP_TIME_EXCEEDED:
2376 2377 return (B_TRUE);
2377 2378 case ICMP_REDIRECT:
2378 2379 return (B_FALSE);
2379 2380 default :
2380 2381 return (B_FALSE);
2381 2382 }
2382 2383 }
2383 2384 }
2384 2385
2385 2386 void
2386 2387 ipsec_latch_ids(ipsec_latch_t *ipl, ipsid_t *local, ipsid_t *remote)
2387 2388 {
2388 2389 mutex_enter(&ipl->ipl_lock);
2389 2390
2390 2391 if (ipl->ipl_ids_latched) {
2391 2392 /* I lost, someone else got here before me */
2392 2393 mutex_exit(&ipl->ipl_lock);
2393 2394 return;
2394 2395 }
2395 2396
2396 2397 if (local != NULL)
2397 2398 IPSID_REFHOLD(local);
2398 2399 if (remote != NULL)
2399 2400 IPSID_REFHOLD(remote);
2400 2401
2401 2402 ipl->ipl_local_cid = local;
2402 2403 ipl->ipl_remote_cid = remote;
2403 2404 ipl->ipl_ids_latched = B_TRUE;
2404 2405 mutex_exit(&ipl->ipl_lock);
2405 2406 }
2406 2407
2407 2408 void
2408 2409 ipsec_latch_inbound(conn_t *connp, ip_recv_attr_t *ira)
2409 2410 {
2410 2411 ipsa_t *sa;
2411 2412 ipsec_latch_t *ipl = connp->conn_latch;
2412 2413
2413 2414 if (!ipl->ipl_ids_latched) {
2414 2415 ipsid_t *local = NULL;
2415 2416 ipsid_t *remote = NULL;
2416 2417
2417 2418 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
2418 2419 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2419 2420 if (ira->ira_ipsec_esp_sa != NULL)
2420 2421 sa = ira->ira_ipsec_esp_sa;
2421 2422 else
2422 2423 sa = ira->ira_ipsec_ah_sa;
2423 2424 ASSERT(sa != NULL);
2424 2425 local = sa->ipsa_dst_cid;
2425 2426 remote = sa->ipsa_src_cid;
2426 2427 }
2427 2428 ipsec_latch_ids(ipl, local, remote);
2428 2429 }
2429 2430 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2430 2431 if (connp->conn_latch_in_action != NULL) {
2431 2432 /*
2432 2433 * Previously cached action. This is probably
2433 2434 * harmless, but in DEBUG kernels, check for
2434 2435 * action equality.
2435 2436 *
2436 2437 * Preserve the existing action to preserve latch
2437 2438 * invariance.
2438 2439 */
2439 2440 ASSERT(connp->conn_latch_in_action ==
2440 2441 ira->ira_ipsec_action);
2441 2442 return;
2442 2443 }
2443 2444 connp->conn_latch_in_action = ira->ira_ipsec_action;
2444 2445 IPACT_REFHOLD(connp->conn_latch_in_action);
2445 2446 }
2446 2447 }
2447 2448
2448 2449 /*
2449 2450 * Check whether the policy constraints are met either for an
2450 2451 * inbound datagram; called from IP in numerous places.
2451 2452 *
2452 2453 * Note that this is not a chokepoint for inbound policy checks;
2453 2454 * see also ipsec_check_ipsecin_latch() and ipsec_check_global_policy()
2454 2455 */
2455 2456 mblk_t *
2456 2457 ipsec_check_inbound_policy(mblk_t *mp, conn_t *connp,
2457 2458 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira)
2458 2459 {
2459 2460 boolean_t ret;
2460 2461 ipsec_latch_t *ipl;
2461 2462 ipsec_action_t *ap;
2462 2463 uint64_t unique_id;
2463 2464 ipsec_stack_t *ipss;
2464 2465 ip_stack_t *ipst;
2465 2466 netstack_t *ns;
2466 2467 ipsec_policy_head_t *policy_head;
2467 2468 ipsec_policy_t *p = NULL;
2468 2469
2469 2470 ASSERT(connp != NULL);
2470 2471 ns = connp->conn_netstack;
2471 2472 ipss = ns->netstack_ipsec;
2472 2473 ipst = ns->netstack_ip;
2473 2474
2474 2475 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2475 2476 /*
2476 2477 * This is the case where the incoming datagram is
2477 2478 * cleartext and we need to see whether this client
2478 2479 * would like to receive such untrustworthy things from
2479 2480 * the wire.
2480 2481 */
2481 2482 ASSERT(mp != NULL);
2482 2483
2483 2484 mutex_enter(&connp->conn_lock);
2484 2485 if (connp->conn_state_flags & CONN_CONDEMNED) {
2485 2486 mutex_exit(&connp->conn_lock);
2486 2487 ip_drop_packet(mp, B_TRUE, NULL,
2487 2488 DROPPER(ipss, ipds_spd_got_clear),
2488 2489 &ipss->ipsec_spd_dropper);
2489 2490 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2490 2491 return (NULL);
2491 2492 }
2492 2493 if (connp->conn_latch != NULL) {
2493 2494 /* Hold a reference in case the conn is closing */
2494 2495 p = connp->conn_latch_in_policy;
2495 2496 if (p != NULL)
2496 2497 IPPOL_REFHOLD(p);
2497 2498 mutex_exit(&connp->conn_lock);
2498 2499 /*
2499 2500 * Policy is cached in the conn.
2500 2501 */
2501 2502 if (p != NULL && !p->ipsp_act->ipa_allow_clear) {
2502 2503 ret = ipsec_inbound_accept_clear(mp,
2503 2504 ipha, ip6h);
2504 2505 if (ret) {
2505 2506 BUMP_MIB(&ipst->ips_ip_mib,
2506 2507 ipsecInSucceeded);
2507 2508 IPPOL_REFRELE(p);
2508 2509 return (mp);
2509 2510 } else {
2510 2511 ipsec_log_policy_failure(
2511 2512 IPSEC_POLICY_MISMATCH,
2512 2513 "ipsec_check_inbound_policy", ipha,
2513 2514 ip6h, B_FALSE, ns);
2514 2515 ip_drop_packet(mp, B_TRUE, NULL,
2515 2516 DROPPER(ipss, ipds_spd_got_clear),
2516 2517 &ipss->ipsec_spd_dropper);
2517 2518 BUMP_MIB(&ipst->ips_ip_mib,
2518 2519 ipsecInFailed);
2519 2520 IPPOL_REFRELE(p);
2520 2521 return (NULL);
2521 2522 }
2522 2523 } else {
2523 2524 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2524 2525 if (p != NULL)
2525 2526 IPPOL_REFRELE(p);
2526 2527 return (mp);
2527 2528 }
2528 2529 } else {
2529 2530 policy_head = connp->conn_policy;
2530 2531
2531 2532 /* Hold a reference in case the conn is closing */
2532 2533 if (policy_head != NULL)
2533 2534 IPPH_REFHOLD(policy_head);
2534 2535 mutex_exit(&connp->conn_lock);
2535 2536 /*
2536 2537 * As this is a non-hardbound connection we need
2537 2538 * to look at both per-socket policy and global
2538 2539 * policy.
2539 2540 */
2540 2541 mp = ipsec_check_global_policy(mp, connp,
2541 2542 ipha, ip6h, ira, ns);
2542 2543 if (policy_head != NULL)
2543 2544 IPPH_REFRELE(policy_head, ns);
2544 2545 return (mp);
2545 2546 }
2546 2547 }
2547 2548
2548 2549 mutex_enter(&connp->conn_lock);
2549 2550 /* Connection is closing */
2550 2551 if (connp->conn_state_flags & CONN_CONDEMNED) {
2551 2552 mutex_exit(&connp->conn_lock);
2552 2553 ip_drop_packet(mp, B_TRUE, NULL,
2553 2554 DROPPER(ipss, ipds_spd_got_clear),
2554 2555 &ipss->ipsec_spd_dropper);
2555 2556 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2556 2557 return (NULL);
2557 2558 }
2558 2559
2559 2560 /*
2560 2561 * Once a connection is latched it remains so for life, the conn_latch
2561 2562 * pointer on the conn has not changed, simply initializing ipl here
2562 2563 * as the earlier initialization was done only in the cleartext case.
2563 2564 */
2564 2565 if ((ipl = connp->conn_latch) == NULL) {
2565 2566 mblk_t *retmp;
2566 2567 policy_head = connp->conn_policy;
2567 2568
2568 2569 /* Hold a reference in case the conn is closing */
2569 2570 if (policy_head != NULL)
2570 2571 IPPH_REFHOLD(policy_head);
2571 2572 mutex_exit(&connp->conn_lock);
2572 2573 /*
2573 2574 * We don't have policies cached in the conn
2574 2575 * for this stream. So, look at the global
2575 2576 * policy. It will check against conn or global
2576 2577 * depending on whichever is stronger.
2577 2578 */
2578 2579 retmp = ipsec_check_global_policy(mp, connp,
2579 2580 ipha, ip6h, ira, ns);
2580 2581 if (policy_head != NULL)
2581 2582 IPPH_REFRELE(policy_head, ns);
2582 2583 return (retmp);
2583 2584 }
2584 2585
2585 2586 IPLATCH_REFHOLD(ipl);
2586 2587 /* Hold reference on conn_latch_in_action in case conn is closing */
2587 2588 ap = connp->conn_latch_in_action;
2588 2589 if (ap != NULL)
2589 2590 IPACT_REFHOLD(ap);
2590 2591 mutex_exit(&connp->conn_lock);
2591 2592
2592 2593 if (ap != NULL) {
2593 2594 /* Policy is cached & latched; fast(er) path */
2594 2595 const char *reason;
2595 2596 kstat_named_t *counter;
2596 2597
2597 2598 if (ipsec_check_ipsecin_latch(ira, mp, ipl, ap,
2598 2599 ipha, ip6h, &reason, &counter, connp, ns)) {
2599 2600 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2600 2601 IPLATCH_REFRELE(ipl);
2601 2602 IPACT_REFRELE(ap);
2602 2603 return (mp);
2603 2604 }
2604 2605 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0,
2605 2606 SL_ERROR|SL_WARN|SL_CONSOLE,
2606 2607 "ipsec inbound policy mismatch: %s, packet dropped\n",
2607 2608 reason);
2608 2609 ip_drop_packet(mp, B_TRUE, NULL, counter,
2609 2610 &ipss->ipsec_spd_dropper);
2610 2611 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2611 2612 IPLATCH_REFRELE(ipl);
2612 2613 IPACT_REFRELE(ap);
2613 2614 return (NULL);
2614 2615 }
2615 2616 if ((p = connp->conn_latch_in_policy) == NULL) {
2616 2617 ipsec_weird_null_inbound_policy++;
2617 2618 IPLATCH_REFRELE(ipl);
2618 2619 return (mp);
2619 2620 }
2620 2621
2621 2622 unique_id = conn_to_unique(connp, mp, ipha, ip6h);
2622 2623 IPPOL_REFHOLD(p);
2623 2624 mp = ipsec_check_ipsecin_policy(mp, p, ipha, ip6h, unique_id, ira, ns);
2624 2625 /*
2625 2626 * NOTE: ipsecIn{Failed,Succeeeded} bumped by
2626 2627 * ipsec_check_ipsecin_policy().
2627 2628 */
2628 2629 if (mp != NULL)
2629 2630 ipsec_latch_inbound(connp, ira);
2630 2631 IPLATCH_REFRELE(ipl);
2631 2632 return (mp);
2632 2633 }
2633 2634
2634 2635 /*
2635 2636 * Handle all sorts of cases like tunnel-mode and ICMP.
2636 2637 */
2637 2638 static int
2638 2639 prepended_length(mblk_t *mp, uintptr_t hptr)
2639 2640 {
2640 2641 int rc = 0;
2641 2642
2642 2643 while (mp != NULL) {
2643 2644 if (hptr >= (uintptr_t)mp->b_rptr && hptr <
2644 2645 (uintptr_t)mp->b_wptr) {
2645 2646 rc += (int)(hptr - (uintptr_t)mp->b_rptr);
2646 2647 break; /* out of while loop */
2647 2648 }
2648 2649 rc += (int)MBLKL(mp);
2649 2650 mp = mp->b_cont;
2650 2651 }
2651 2652
2652 2653 if (mp == NULL) {
2653 2654 /*
2654 2655 * IF (big IF) we make it here by naturally exiting the loop,
2655 2656 * then ip6h isn't in the mblk chain "mp" at all.
2656 2657 *
2657 2658 * The only case where this happens is with a reversed IP
2658 2659 * header that gets passed up by inbound ICMP processing.
2659 2660 * This unfortunately triggers longstanding bug 6478464. For
2660 2661 * now, just pass up 0 for the answer.
2661 2662 */
2662 2663 #ifdef DEBUG_NOT_UNTIL_6478464
2663 2664 ASSERT(mp != NULL);
2664 2665 #endif
2665 2666 rc = 0;
2666 2667 }
2667 2668
2668 2669 return (rc);
2669 2670 }
2670 2671
2671 2672 /*
2672 2673 * Returns:
2673 2674 *
2674 2675 * SELRET_NOMEM --> msgpullup() needed to gather things failed.
2675 2676 * SELRET_BADPKT --> If we're being called after tunnel-mode fragment
2676 2677 * gathering, the initial fragment is too short for
2677 2678 * useful data. Only returned if SEL_TUNNEL_FIRSTFRAG is
2678 2679 * set.
2679 2680 * SELRET_SUCCESS --> "sel" now has initialized IPsec selector data.
2680 2681 * SELRET_TUNFRAG --> This is a fragment in a tunnel-mode packet. Caller
2681 2682 * should put this packet in a fragment-gathering queue.
2682 2683 * Only returned if SEL_TUNNEL_MODE and SEL_PORT_POLICY
2683 2684 * is set.
2684 2685 *
2685 2686 * Note that ipha/ip6h can be in a different mblk (mp->b_cont) in the case
2686 2687 * of tunneled packets.
2687 2688 * Also, mp->b_rptr can be an ICMP error where ipha/ip6h is the packet in
2688 2689 * error past the ICMP error.
2689 2690 */
2690 2691 static selret_t
2691 2692 ipsec_init_inbound_sel(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2692 2693 ip6_t *ip6h, uint8_t sel_flags)
2693 2694 {
2694 2695 uint16_t *ports;
2695 2696 int outer_hdr_len = 0; /* For ICMP or tunnel-mode cases... */
2696 2697 ushort_t hdr_len;
2697 2698 mblk_t *spare_mp = NULL;
2698 2699 uint8_t *nexthdrp, *transportp;
2699 2700 uint8_t nexthdr;
2700 2701 uint8_t icmp_proto;
2701 2702 ip_pkt_t ipp;
2702 2703 boolean_t port_policy_present = (sel_flags & SEL_PORT_POLICY);
2703 2704 boolean_t is_icmp = (sel_flags & SEL_IS_ICMP);
2704 2705 boolean_t tunnel_mode = (sel_flags & SEL_TUNNEL_MODE);
2705 2706 boolean_t post_frag = (sel_flags & SEL_POST_FRAG);
2706 2707
2707 2708 ASSERT((ipha == NULL && ip6h != NULL) ||
2708 2709 (ipha != NULL && ip6h == NULL));
2709 2710
2710 2711 if (ip6h != NULL) {
2711 2712 outer_hdr_len = prepended_length(mp, (uintptr_t)ip6h);
2712 2713 nexthdr = ip6h->ip6_nxt;
2713 2714 icmp_proto = IPPROTO_ICMPV6;
2714 2715 sel->ips_isv4 = B_FALSE;
2715 2716 sel->ips_local_addr_v6 = ip6h->ip6_dst;
2716 2717 sel->ips_remote_addr_v6 = ip6h->ip6_src;
2717 2718
2718 2719 bzero(&ipp, sizeof (ipp));
2719 2720
2720 2721 switch (nexthdr) {
2721 2722 case IPPROTO_HOPOPTS:
2722 2723 case IPPROTO_ROUTING:
2723 2724 case IPPROTO_DSTOPTS:
2724 2725 case IPPROTO_FRAGMENT:
2725 2726 /*
2726 2727 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2727 2728 * mblk that's contiguous to feed it
2728 2729 */
2729 2730 if ((spare_mp = msgpullup(mp, -1)) == NULL)
2730 2731 return (SELRET_NOMEM);
2731 2732 if (!ip_hdr_length_nexthdr_v6(spare_mp,
2732 2733 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2733 2734 &hdr_len, &nexthdrp)) {
2734 2735 /* Malformed packet - caller frees. */
2735 2736 ipsec_freemsg_chain(spare_mp);
2736 2737 return (SELRET_BADPKT);
2737 2738 }
2738 2739 /* Repopulate now that we have the whole packet */
2739 2740 ip6h = (ip6_t *)(spare_mp->b_rptr + outer_hdr_len);
2740 2741 (void) ip_find_hdr_v6(spare_mp, ip6h, B_FALSE, &ipp,
2741 2742 NULL);
2742 2743 nexthdr = *nexthdrp;
2743 2744 /* We can just extract based on hdr_len now. */
2744 2745 break;
2745 2746 default:
2746 2747 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
2747 2748 hdr_len = IPV6_HDR_LEN;
2748 2749 break;
2749 2750 }
2750 2751 if (port_policy_present && IS_V6_FRAGMENT(ipp) && !is_icmp) {
2751 2752 /* IPv6 Fragment */
2752 2753 ipsec_freemsg_chain(spare_mp);
2753 2754 return (SELRET_TUNFRAG);
2754 2755 }
2755 2756 transportp = (uint8_t *)ip6h + hdr_len;
2756 2757 } else {
2757 2758 outer_hdr_len = prepended_length(mp, (uintptr_t)ipha);
2758 2759 icmp_proto = IPPROTO_ICMP;
2759 2760 sel->ips_isv4 = B_TRUE;
2760 2761 sel->ips_local_addr_v4 = ipha->ipha_dst;
2761 2762 sel->ips_remote_addr_v4 = ipha->ipha_src;
2762 2763 nexthdr = ipha->ipha_protocol;
2763 2764 hdr_len = IPH_HDR_LENGTH(ipha);
2764 2765
2765 2766 if (port_policy_present &&
2766 2767 IS_V4_FRAGMENT(ipha->ipha_fragment_offset_and_flags) &&
2767 2768 !is_icmp) {
2768 2769 /* IPv4 Fragment */
2769 2770 ipsec_freemsg_chain(spare_mp);
2770 2771 return (SELRET_TUNFRAG);
2771 2772 }
2772 2773 transportp = (uint8_t *)ipha + hdr_len;
2773 2774 }
2774 2775 sel->ips_protocol = nexthdr;
2775 2776
2776 2777 if ((nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2777 2778 nexthdr != IPPROTO_SCTP && nexthdr != icmp_proto) ||
2778 2779 (!port_policy_present && !post_frag && tunnel_mode)) {
2779 2780 sel->ips_remote_port = sel->ips_local_port = 0;
2780 2781 ipsec_freemsg_chain(spare_mp);
2781 2782 return (SELRET_SUCCESS);
2782 2783 }
2783 2784
2784 2785 if (transportp + 4 > mp->b_wptr) {
2785 2786 /* If we didn't pullup a copy already, do so now. */
2786 2787 /*
2787 2788 * XXX performance, will upper-layers frequently split TCP/UDP
2788 2789 * apart from IP or options? If so, perhaps we should revisit
2789 2790 * the spare_mp strategy.
2790 2791 */
2791 2792 ipsec_hdr_pullup_needed++;
2792 2793 if (spare_mp == NULL &&
2793 2794 (spare_mp = msgpullup(mp, -1)) == NULL) {
2794 2795 return (SELRET_NOMEM);
2795 2796 }
2796 2797 transportp = &spare_mp->b_rptr[hdr_len + outer_hdr_len];
2797 2798 }
2798 2799
2799 2800 if (nexthdr == icmp_proto) {
2800 2801 sel->ips_icmp_type = *transportp++;
2801 2802 sel->ips_icmp_code = *transportp;
2802 2803 sel->ips_remote_port = sel->ips_local_port = 0;
2803 2804 } else {
2804 2805 ports = (uint16_t *)transportp;
2805 2806 sel->ips_remote_port = *ports++;
2806 2807 sel->ips_local_port = *ports;
2807 2808 }
2808 2809 ipsec_freemsg_chain(spare_mp);
2809 2810 return (SELRET_SUCCESS);
2810 2811 }
2811 2812
2812 2813 /*
2813 2814 * This is called with a b_next chain of messages from the fragcache code,
2814 2815 * hence it needs to discard a chain on error.
2815 2816 */
2816 2817 static boolean_t
2817 2818 ipsec_init_outbound_ports(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2818 2819 ip6_t *ip6h, int outer_hdr_len, ipsec_stack_t *ipss)
2819 2820 {
2820 2821 /*
2821 2822 * XXX cut&paste shared with ipsec_init_inbound_sel
2822 2823 */
2823 2824 uint16_t *ports;
2824 2825 ushort_t hdr_len;
2825 2826 mblk_t *spare_mp = NULL;
2826 2827 uint8_t *nexthdrp;
2827 2828 uint8_t nexthdr;
2828 2829 uint8_t *typecode;
2829 2830 uint8_t check_proto;
2830 2831
2831 2832 ASSERT((ipha == NULL && ip6h != NULL) ||
2832 2833 (ipha != NULL && ip6h == NULL));
2833 2834
2834 2835 if (ip6h != NULL) {
2835 2836 check_proto = IPPROTO_ICMPV6;
2836 2837 nexthdr = ip6h->ip6_nxt;
2837 2838 switch (nexthdr) {
2838 2839 case IPPROTO_HOPOPTS:
2839 2840 case IPPROTO_ROUTING:
2840 2841 case IPPROTO_DSTOPTS:
2841 2842 case IPPROTO_FRAGMENT:
2842 2843 /*
2843 2844 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2844 2845 * mblk that's contiguous to feed it
2845 2846 */
2846 2847 spare_mp = msgpullup(mp, -1);
2847 2848 if (spare_mp == NULL ||
2848 2849 !ip_hdr_length_nexthdr_v6(spare_mp,
2849 2850 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2850 2851 &hdr_len, &nexthdrp)) {
2851 2852 /* Always works, even if NULL. */
2852 2853 ipsec_freemsg_chain(spare_mp);
2853 2854 ip_drop_packet_chain(mp, B_FALSE, NULL,
2854 2855 DROPPER(ipss, ipds_spd_nomem),
2855 2856 &ipss->ipsec_spd_dropper);
2856 2857 return (B_FALSE);
2857 2858 } else {
2858 2859 nexthdr = *nexthdrp;
2859 2860 /* We can just extract based on hdr_len now. */
2860 2861 }
2861 2862 break;
2862 2863 default:
2863 2864 hdr_len = IPV6_HDR_LEN;
2864 2865 break;
2865 2866 }
2866 2867 } else {
2867 2868 check_proto = IPPROTO_ICMP;
2868 2869 hdr_len = IPH_HDR_LENGTH(ipha);
2869 2870 nexthdr = ipha->ipha_protocol;
2870 2871 }
2871 2872
2872 2873 sel->ips_protocol = nexthdr;
2873 2874 if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2874 2875 nexthdr != IPPROTO_SCTP && nexthdr != check_proto) {
2875 2876 sel->ips_local_port = sel->ips_remote_port = 0;
2876 2877 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2877 2878 return (B_TRUE);
2878 2879 }
2879 2880
2880 2881 if (&mp->b_rptr[hdr_len] + 4 + outer_hdr_len > mp->b_wptr) {
2881 2882 /* If we didn't pullup a copy already, do so now. */
2882 2883 /*
2883 2884 * XXX performance, will upper-layers frequently split TCP/UDP
2884 2885 * apart from IP or options? If so, perhaps we should revisit
2885 2886 * the spare_mp strategy.
2886 2887 *
2887 2888 * XXX should this be msgpullup(mp, hdr_len+4) ???
2888 2889 */
2889 2890 if (spare_mp == NULL &&
2890 2891 (spare_mp = msgpullup(mp, -1)) == NULL) {
2891 2892 ip_drop_packet_chain(mp, B_FALSE, NULL,
2892 2893 DROPPER(ipss, ipds_spd_nomem),
2893 2894 &ipss->ipsec_spd_dropper);
2894 2895 return (B_FALSE);
2895 2896 }
2896 2897 ports = (uint16_t *)&spare_mp->b_rptr[hdr_len + outer_hdr_len];
2897 2898 } else {
2898 2899 ports = (uint16_t *)&mp->b_rptr[hdr_len + outer_hdr_len];
2899 2900 }
2900 2901
2901 2902 if (nexthdr == check_proto) {
2902 2903 typecode = (uint8_t *)ports;
2903 2904 sel->ips_icmp_type = *typecode++;
2904 2905 sel->ips_icmp_code = *typecode;
2905 2906 sel->ips_remote_port = sel->ips_local_port = 0;
2906 2907 } else {
2907 2908 sel->ips_local_port = *ports++;
2908 2909 sel->ips_remote_port = *ports;
2909 2910 }
2910 2911 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2911 2912 return (B_TRUE);
2912 2913 }
2913 2914
2914 2915 /*
2915 2916 * Prepend an mblk with a ipsec_crypto_t to the message chain.
2916 2917 * Frees the argument and returns NULL should the allocation fail.
2917 2918 * Returns the pointer to the crypto data part.
2918 2919 */
2919 2920 mblk_t *
2920 2921 ipsec_add_crypto_data(mblk_t *data_mp, ipsec_crypto_t **icp)
2921 2922 {
2922 2923 mblk_t *mp;
2923 2924
2924 2925 mp = allocb(sizeof (ipsec_crypto_t), BPRI_MED);
2925 2926 if (mp == NULL) {
2926 2927 freemsg(data_mp);
2927 2928 return (NULL);
2928 2929 }
2929 2930 bzero(mp->b_rptr, sizeof (ipsec_crypto_t));
2930 2931 mp->b_wptr += sizeof (ipsec_crypto_t);
2931 2932 mp->b_cont = data_mp;
2932 2933 mp->b_datap->db_type = M_EVENT; /* For ASSERT */
2933 2934 *icp = (ipsec_crypto_t *)mp->b_rptr;
2934 2935 return (mp);
2935 2936 }
2936 2937
2937 2938 /*
2938 2939 * Remove what was prepended above. Return b_cont and a pointer to the
2939 2940 * crypto data.
2940 2941 * The caller must call ipsec_free_crypto_data for mblk once it is done
2941 2942 * with the crypto data.
2942 2943 */
2943 2944 mblk_t *
2944 2945 ipsec_remove_crypto_data(mblk_t *crypto_mp, ipsec_crypto_t **icp)
2945 2946 {
2946 2947 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2947 2948 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2948 2949
2949 2950 *icp = (ipsec_crypto_t *)crypto_mp->b_rptr;
2950 2951 return (crypto_mp->b_cont);
2951 2952 }
2952 2953
2953 2954 /*
2954 2955 * Free what was prepended above. Return b_cont.
2955 2956 */
2956 2957 mblk_t *
2957 2958 ipsec_free_crypto_data(mblk_t *crypto_mp)
2958 2959 {
2959 2960 mblk_t *mp;
2960 2961
2961 2962 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2962 2963 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2963 2964
2964 2965 mp = crypto_mp->b_cont;
2965 2966 freeb(crypto_mp);
2966 2967 return (mp);
2967 2968 }
2968 2969
2969 2970 /*
2970 2971 * Create an ipsec_action_t based on the way an inbound packet was protected.
2971 2972 * Used to reflect traffic back to a sender.
2972 2973 *
2973 2974 * We don't bother interning the action into the hash table.
2974 2975 */
2975 2976 ipsec_action_t *
2976 2977 ipsec_in_to_out_action(ip_recv_attr_t *ira)
2977 2978 {
2978 2979 ipsa_t *ah_assoc, *esp_assoc;
2979 2980 uint_t auth_alg = 0, encr_alg = 0, espa_alg = 0;
2980 2981 ipsec_action_t *ap;
2981 2982 boolean_t unique;
2982 2983
2983 2984 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
2984 2985
2985 2986 if (ap == NULL)
2986 2987 return (NULL);
2987 2988
2988 2989 bzero(ap, sizeof (*ap));
2989 2990 HASH_NULL(ap, ipa_hash);
2990 2991 ap->ipa_next = NULL;
2991 2992 ap->ipa_refs = 1;
2992 2993
2993 2994 /*
2994 2995 * Get the algorithms that were used for this packet.
2995 2996 */
2996 2997 ap->ipa_act.ipa_type = IPSEC_ACT_APPLY;
2997 2998 ap->ipa_act.ipa_log = 0;
2998 2999 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2999 3000
3000 3001 ah_assoc = ira->ira_ipsec_ah_sa;
3001 3002 ap->ipa_act.ipa_apply.ipp_use_ah = (ah_assoc != NULL);
3002 3003
3003 3004 esp_assoc = ira->ira_ipsec_esp_sa;
3004 3005 ap->ipa_act.ipa_apply.ipp_use_esp = (esp_assoc != NULL);
3005 3006
3006 3007 if (esp_assoc != NULL) {
3007 3008 encr_alg = esp_assoc->ipsa_encr_alg;
3008 3009 espa_alg = esp_assoc->ipsa_auth_alg;
3009 3010 ap->ipa_act.ipa_apply.ipp_use_espa = (espa_alg != 0);
3010 3011 }
3011 3012 if (ah_assoc != NULL)
3012 3013 auth_alg = ah_assoc->ipsa_auth_alg;
3013 3014
3014 3015 ap->ipa_act.ipa_apply.ipp_encr_alg = (uint8_t)encr_alg;
3015 3016 ap->ipa_act.ipa_apply.ipp_auth_alg = (uint8_t)auth_alg;
3016 3017 ap->ipa_act.ipa_apply.ipp_esp_auth_alg = (uint8_t)espa_alg;
3017 3018 ap->ipa_act.ipa_apply.ipp_use_se =
3018 3019 !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3019 3020 unique = B_FALSE;
3020 3021
3021 3022 if (esp_assoc != NULL) {
3022 3023 ap->ipa_act.ipa_apply.ipp_espa_minbits =
3023 3024 esp_assoc->ipsa_authkeybits;
3024 3025 ap->ipa_act.ipa_apply.ipp_espa_maxbits =
3025 3026 esp_assoc->ipsa_authkeybits;
3026 3027 ap->ipa_act.ipa_apply.ipp_espe_minbits =
3027 3028 esp_assoc->ipsa_encrkeybits;
3028 3029 ap->ipa_act.ipa_apply.ipp_espe_maxbits =
3029 3030 esp_assoc->ipsa_encrkeybits;
3030 3031 ap->ipa_act.ipa_apply.ipp_km_proto = esp_assoc->ipsa_kmp;
3031 3032 ap->ipa_act.ipa_apply.ipp_km_cookie = esp_assoc->ipsa_kmc;
3032 3033 if (esp_assoc->ipsa_flags & IPSA_F_UNIQUE)
3033 3034 unique = B_TRUE;
3034 3035 }
3035 3036 if (ah_assoc != NULL) {
3036 3037 ap->ipa_act.ipa_apply.ipp_ah_minbits =
3037 3038 ah_assoc->ipsa_authkeybits;
3038 3039 ap->ipa_act.ipa_apply.ipp_ah_maxbits =
3039 3040 ah_assoc->ipsa_authkeybits;
3040 3041 ap->ipa_act.ipa_apply.ipp_km_proto = ah_assoc->ipsa_kmp;
3041 3042 ap->ipa_act.ipa_apply.ipp_km_cookie = ah_assoc->ipsa_kmc;
3042 3043 if (ah_assoc->ipsa_flags & IPSA_F_UNIQUE)
3043 3044 unique = B_TRUE;
3044 3045 }
3045 3046 ap->ipa_act.ipa_apply.ipp_use_unique = unique;
3046 3047 ap->ipa_want_unique = unique;
3047 3048 ap->ipa_allow_clear = B_FALSE;
3048 3049 ap->ipa_want_se = !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3049 3050 ap->ipa_want_ah = (ah_assoc != NULL);
3050 3051 ap->ipa_want_esp = (esp_assoc != NULL);
3051 3052
3052 3053 ap->ipa_ovhd = ipsec_act_ovhd(&ap->ipa_act);
3053 3054
3054 3055 ap->ipa_act.ipa_apply.ipp_replay_depth = 0; /* don't care */
3055 3056
3056 3057 return (ap);
3057 3058 }
3058 3059
3059 3060
3060 3061 /*
3061 3062 * Compute the worst-case amount of extra space required by an action.
3062 3063 * Note that, because of the ESP considerations listed below, this is
3063 3064 * actually not the same as the best-case reduction in the MTU; in the
3064 3065 * future, we should pass additional information to this function to
3065 3066 * allow the actual MTU impact to be computed.
3066 3067 *
3067 3068 * AH: Revisit this if we implement algorithms with
3068 3069 * a verifier size of more than 12 bytes.
3069 3070 *
3070 3071 * ESP: A more exact but more messy computation would take into
3071 3072 * account the interaction between the cipher block size and the
3072 3073 * effective MTU, yielding the inner payload size which reflects a
3073 3074 * packet with *minimum* ESP padding..
3074 3075 */
3075 3076 int32_t
3076 3077 ipsec_act_ovhd(const ipsec_act_t *act)
3077 3078 {
3078 3079 int32_t overhead = 0;
3079 3080
3080 3081 if (act->ipa_type == IPSEC_ACT_APPLY) {
3081 3082 const ipsec_prot_t *ipp = &act->ipa_apply;
3082 3083
3083 3084 if (ipp->ipp_use_ah)
3084 3085 overhead += IPSEC_MAX_AH_HDR_SIZE;
3085 3086 if (ipp->ipp_use_esp) {
3086 3087 overhead += IPSEC_MAX_ESP_HDR_SIZE;
3087 3088 overhead += sizeof (struct udphdr);
3088 3089 }
3089 3090 if (ipp->ipp_use_se)
3090 3091 overhead += IP_SIMPLE_HDR_LENGTH;
3091 3092 }
3092 3093 return (overhead);
3093 3094 }
3094 3095
3095 3096 /*
3096 3097 * This hash function is used only when creating policies and thus is not
3097 3098 * performance-critical for packet flows.
3098 3099 *
3099 3100 * Future work: canonicalize the structures hashed with this (i.e.,
3100 3101 * zeroize padding) so the hash works correctly.
3101 3102 */
3102 3103 /* ARGSUSED */
3103 3104 static uint32_t
3104 3105 policy_hash(int size, const void *start, const void *end)
3105 3106 {
3106 3107 return (0);
3107 3108 }
3108 3109
3109 3110
3110 3111 /*
3111 3112 * Hash function macros for each address type.
3112 3113 *
3113 3114 * The IPV6 hash function assumes that the low order 32-bits of the
3114 3115 * address (typically containing the low order 24 bits of the mac
3115 3116 * address) are reasonably well-distributed. Revisit this if we run
3116 3117 * into trouble from lots of collisions on ::1 addresses and the like
3117 3118 * (seems unlikely).
3118 3119 */
3119 3120 #define IPSEC_IPV4_HASH(a, n) ((a) % (n))
3120 3121 #define IPSEC_IPV6_HASH(a, n) (((a).s6_addr32[3]) % (n))
3121 3122
3122 3123 /*
3123 3124 * These two hash functions should produce coordinated values
3124 3125 * but have slightly different roles.
3125 3126 */
3126 3127 static uint32_t
3127 3128 selkey_hash(const ipsec_selkey_t *selkey, netstack_t *ns)
3128 3129 {
3129 3130 uint32_t valid = selkey->ipsl_valid;
3130 3131 ipsec_stack_t *ipss = ns->netstack_ipsec;
3131 3132
3132 3133 if (!(valid & IPSL_REMOTE_ADDR))
3133 3134 return (IPSEC_SEL_NOHASH);
3134 3135
3135 3136 if (valid & IPSL_IPV4) {
3136 3137 if (selkey->ipsl_remote_pfxlen == 32) {
3137 3138 return (IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3138 3139 ipss->ipsec_spd_hashsize));
3139 3140 }
3140 3141 }
3141 3142 if (valid & IPSL_IPV6) {
3142 3143 if (selkey->ipsl_remote_pfxlen == 128) {
3143 3144 return (IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3144 3145 ipss->ipsec_spd_hashsize));
3145 3146 }
3146 3147 }
3147 3148 return (IPSEC_SEL_NOHASH);
3148 3149 }
3149 3150
3150 3151 static uint32_t
3151 3152 selector_hash(ipsec_selector_t *sel, ipsec_policy_root_t *root)
3152 3153 {
3153 3154 if (sel->ips_isv4) {
3154 3155 return (IPSEC_IPV4_HASH(sel->ips_remote_addr_v4,
3155 3156 root->ipr_nchains));
3156 3157 }
3157 3158 return (IPSEC_IPV6_HASH(sel->ips_remote_addr_v6, root->ipr_nchains));
3158 3159 }
3159 3160
3160 3161 /*
3161 3162 * Intern actions into the action hash table.
3162 3163 */
3163 3164 ipsec_action_t *
3164 3165 ipsec_act_find(const ipsec_act_t *a, int n, netstack_t *ns)
3165 3166 {
3166 3167 int i;
3167 3168 uint32_t hval;
3168 3169 ipsec_action_t *ap;
3169 3170 ipsec_action_t *prev = NULL;
3170 3171 int32_t overhead, maxovhd = 0;
3171 3172 boolean_t allow_clear = B_FALSE;
3172 3173 boolean_t want_ah = B_FALSE;
3173 3174 boolean_t want_esp = B_FALSE;
3174 3175 boolean_t want_se = B_FALSE;
3175 3176 boolean_t want_unique = B_FALSE;
3176 3177 ipsec_stack_t *ipss = ns->netstack_ipsec;
3177 3178
3178 3179 /*
3179 3180 * TODO: should canonicalize a[] (i.e., zeroize any padding)
3180 3181 * so we can use a non-trivial policy_hash function.
3181 3182 */
3182 3183 for (i = n-1; i >= 0; i--) {
3183 3184 hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]);
3184 3185
3185 3186 HASH_LOCK(ipss->ipsec_action_hash, hval);
3186 3187
3187 3188 for (HASH_ITERATE(ap, ipa_hash,
3188 3189 ipss->ipsec_action_hash, hval)) {
3189 3190 if (bcmp(&ap->ipa_act, &a[i], sizeof (*a)) != 0)
3190 3191 continue;
3191 3192 if (ap->ipa_next != prev)
3192 3193 continue;
3193 3194 break;
3194 3195 }
3195 3196 if (ap != NULL) {
3196 3197 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3197 3198 prev = ap;
3198 3199 continue;
3199 3200 }
3200 3201 /*
3201 3202 * need to allocate a new one..
3202 3203 */
3203 3204 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
3204 3205 if (ap == NULL) {
3205 3206 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3206 3207 if (prev != NULL)
3207 3208 ipsec_action_free(prev);
3208 3209 return (NULL);
3209 3210 }
3210 3211 HASH_INSERT(ap, ipa_hash, ipss->ipsec_action_hash, hval);
3211 3212
3212 3213 ap->ipa_next = prev;
3213 3214 ap->ipa_act = a[i];
3214 3215
3215 3216 overhead = ipsec_act_ovhd(&a[i]);
3216 3217 if (maxovhd < overhead)
3217 3218 maxovhd = overhead;
3218 3219
3219 3220 if ((a[i].ipa_type == IPSEC_ACT_BYPASS) ||
3220 3221 (a[i].ipa_type == IPSEC_ACT_CLEAR))
3221 3222 allow_clear = B_TRUE;
3222 3223 if (a[i].ipa_type == IPSEC_ACT_APPLY) {
3223 3224 const ipsec_prot_t *ipp = &a[i].ipa_apply;
3224 3225
3225 3226 ASSERT(ipp->ipp_use_ah || ipp->ipp_use_esp);
3226 3227 want_ah |= ipp->ipp_use_ah;
3227 3228 want_esp |= ipp->ipp_use_esp;
3228 3229 want_se |= ipp->ipp_use_se;
3229 3230 want_unique |= ipp->ipp_use_unique;
3230 3231 }
3231 3232 ap->ipa_allow_clear = allow_clear;
3232 3233 ap->ipa_want_ah = want_ah;
3233 3234 ap->ipa_want_esp = want_esp;
3234 3235 ap->ipa_want_se = want_se;
3235 3236 ap->ipa_want_unique = want_unique;
3236 3237 ap->ipa_refs = 1; /* from the hash table */
3237 3238 ap->ipa_ovhd = maxovhd;
3238 3239 if (prev)
3239 3240 prev->ipa_refs++;
3240 3241 prev = ap;
3241 3242 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3242 3243 }
3243 3244
3244 3245 ap->ipa_refs++; /* caller's reference */
3245 3246
3246 3247 return (ap);
3247 3248 }
3248 3249
3249 3250 /*
3250 3251 * Called when refcount goes to 0, indicating that all references to this
3251 3252 * node are gone.
3252 3253 *
3253 3254 * This does not unchain the action from the hash table.
3254 3255 */
3255 3256 void
3256 3257 ipsec_action_free(ipsec_action_t *ap)
3257 3258 {
3258 3259 for (;;) {
3259 3260 ipsec_action_t *np = ap->ipa_next;
3260 3261 ASSERT(ap->ipa_refs == 0);
3261 3262 ASSERT(ap->ipa_hash.hash_pp == NULL);
3262 3263 kmem_cache_free(ipsec_action_cache, ap);
3263 3264 ap = np;
3264 3265 /* Inlined IPACT_REFRELE -- avoid recursion */
3265 3266 if (ap == NULL)
3266 3267 break;
3267 3268 membar_exit();
3268 3269 if (atomic_dec_32_nv(&(ap)->ipa_refs) != 0)
3269 3270 break;
3270 3271 /* End inlined IPACT_REFRELE */
3271 3272 }
3272 3273 }
3273 3274
3274 3275 /*
3275 3276 * Called when the action hash table goes away.
3276 3277 *
3277 3278 * The actions can be queued on an mblk with ipsec_in or
3278 3279 * ipsec_out, hence the actions might still be around.
3279 3280 * But we decrement ipa_refs here since we no longer have
3280 3281 * a reference to the action from the hash table.
3281 3282 */
3282 3283 static void
3283 3284 ipsec_action_free_table(ipsec_action_t *ap)
3284 3285 {
3285 3286 while (ap != NULL) {
3286 3287 ipsec_action_t *np = ap->ipa_next;
3287 3288
3288 3289 /* FIXME: remove? */
3289 3290 (void) printf("ipsec_action_free_table(%p) ref %d\n",
3290 3291 (void *)ap, ap->ipa_refs);
3291 3292 ASSERT(ap->ipa_refs > 0);
3292 3293 IPACT_REFRELE(ap);
3293 3294 ap = np;
3294 3295 }
3295 3296 }
3296 3297
3297 3298 /*
3298 3299 * Need to walk all stack instances since the reclaim function
3299 3300 * is global for all instances
3300 3301 */
3301 3302 /* ARGSUSED */
3302 3303 static void
3303 3304 ipsec_action_reclaim(void *arg)
3304 3305 {
3305 3306 netstack_handle_t nh;
3306 3307 netstack_t *ns;
3307 3308 ipsec_stack_t *ipss;
3308 3309
3309 3310 netstack_next_init(&nh);
3310 3311 while ((ns = netstack_next(&nh)) != NULL) {
3311 3312 /*
3312 3313 * netstack_next() can return a netstack_t with a NULL
3313 3314 * netstack_ipsec at boot time.
3314 3315 */
3315 3316 if ((ipss = ns->netstack_ipsec) == NULL) {
3316 3317 netstack_rele(ns);
3317 3318 continue;
3318 3319 }
3319 3320 ipsec_action_reclaim_stack(ipss);
3320 3321 netstack_rele(ns);
3321 3322 }
3322 3323 netstack_next_fini(&nh);
3323 3324 }
3324 3325
3325 3326 /*
3326 3327 * Periodically sweep action hash table for actions with refcount==1, and
3327 3328 * nuke them. We cannot do this "on demand" (i.e., from IPACT_REFRELE)
3328 3329 * because we can't close the race between another thread finding the action
3329 3330 * in the hash table without holding the bucket lock during IPACT_REFRELE.
3330 3331 * Instead, we run this function sporadically to clean up after ourselves;
3331 3332 * we also set it as the "reclaim" function for the action kmem_cache.
3332 3333 *
3333 3334 * Note that it may take several passes of ipsec_action_gc() to free all
3334 3335 * "stale" actions.
3335 3336 */
3336 3337 static void
3337 3338 ipsec_action_reclaim_stack(ipsec_stack_t *ipss)
3338 3339 {
3339 3340 int i;
3340 3341
3341 3342 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
3342 3343 ipsec_action_t *ap, *np;
3343 3344
3344 3345 /* skip the lock if nobody home */
3345 3346 if (ipss->ipsec_action_hash[i].hash_head == NULL)
3346 3347 continue;
3347 3348
3348 3349 HASH_LOCK(ipss->ipsec_action_hash, i);
3349 3350 for (ap = ipss->ipsec_action_hash[i].hash_head;
3350 3351 ap != NULL; ap = np) {
3351 3352 ASSERT(ap->ipa_refs > 0);
3352 3353 np = ap->ipa_hash.hash_next;
3353 3354 if (ap->ipa_refs > 1)
3354 3355 continue;
3355 3356 HASH_UNCHAIN(ap, ipa_hash,
3356 3357 ipss->ipsec_action_hash, i);
3357 3358 IPACT_REFRELE(ap);
3358 3359 }
3359 3360 HASH_UNLOCK(ipss->ipsec_action_hash, i);
3360 3361 }
3361 3362 }
3362 3363
3363 3364 /*
3364 3365 * Intern a selector set into the selector set hash table.
3365 3366 * This is simpler than the actions case..
3366 3367 */
3367 3368 static ipsec_sel_t *
3368 3369 ipsec_find_sel(ipsec_selkey_t *selkey, netstack_t *ns)
3369 3370 {
3370 3371 ipsec_sel_t *sp;
3371 3372 uint32_t hval, bucket;
3372 3373 ipsec_stack_t *ipss = ns->netstack_ipsec;
3373 3374
3374 3375 /*
3375 3376 * Exactly one AF bit should be set in selkey.
3376 3377 */
3377 3378 ASSERT(!(selkey->ipsl_valid & IPSL_IPV4) ^
3378 3379 !(selkey->ipsl_valid & IPSL_IPV6));
3379 3380
3380 3381 hval = selkey_hash(selkey, ns);
3381 3382 /* Set pol_hval to uninitialized until we put it in a polhead. */
3382 3383 selkey->ipsl_sel_hval = hval;
3383 3384
3384 3385 bucket = (hval == IPSEC_SEL_NOHASH) ? 0 : hval;
3385 3386
3386 3387 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, bucket));
3387 3388 HASH_LOCK(ipss->ipsec_sel_hash, bucket);
3388 3389
3389 3390 for (HASH_ITERATE(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket)) {
3390 3391 if (bcmp(&sp->ipsl_key, selkey,
3391 3392 offsetof(ipsec_selkey_t, ipsl_pol_hval)) == 0)
3392 3393 break;
3393 3394 }
3394 3395 if (sp != NULL) {
3395 3396 sp->ipsl_refs++;
3396 3397
3397 3398 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3398 3399 return (sp);
3399 3400 }
3400 3401
3401 3402 sp = kmem_cache_alloc(ipsec_sel_cache, KM_NOSLEEP);
3402 3403 if (sp == NULL) {
3403 3404 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3404 3405 return (NULL);
3405 3406 }
3406 3407
3407 3408 HASH_INSERT(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket);
3408 3409 sp->ipsl_refs = 2; /* one for hash table, one for caller */
3409 3410 sp->ipsl_key = *selkey;
3410 3411 /* Set to uninitalized and have insertion into polhead fix things. */
3411 3412 if (selkey->ipsl_sel_hval != IPSEC_SEL_NOHASH)
3412 3413 sp->ipsl_key.ipsl_pol_hval = 0;
3413 3414 else
3414 3415 sp->ipsl_key.ipsl_pol_hval = IPSEC_SEL_NOHASH;
3415 3416
3416 3417 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3417 3418
3418 3419 return (sp);
3419 3420 }
3420 3421
3421 3422 static void
3422 3423 ipsec_sel_rel(ipsec_sel_t **spp, netstack_t *ns)
3423 3424 {
3424 3425 ipsec_sel_t *sp = *spp;
3425 3426 int hval = sp->ipsl_key.ipsl_sel_hval;
3426 3427 ipsec_stack_t *ipss = ns->netstack_ipsec;
3427 3428
3428 3429 *spp = NULL;
3429 3430
3430 3431 if (hval == IPSEC_SEL_NOHASH)
3431 3432 hval = 0;
3432 3433
3433 3434 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, hval));
3434 3435 HASH_LOCK(ipss->ipsec_sel_hash, hval);
3435 3436 if (--sp->ipsl_refs == 1) {
3436 3437 HASH_UNCHAIN(sp, ipsl_hash, ipss->ipsec_sel_hash, hval);
3437 3438 sp->ipsl_refs--;
3438 3439 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3439 3440 ASSERT(sp->ipsl_refs == 0);
3440 3441 kmem_cache_free(ipsec_sel_cache, sp);
3441 3442 /* Caller unlocks */
3442 3443 return;
3443 3444 }
3444 3445
3445 3446 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3446 3447 }
3447 3448
3448 3449 /*
3449 3450 * Free a policy rule which we know is no longer being referenced.
3450 3451 */
3451 3452 void
3452 3453 ipsec_policy_free(ipsec_policy_t *ipp)
3453 3454 {
3454 3455 ASSERT(ipp->ipsp_refs == 0);
3455 3456 ASSERT(ipp->ipsp_sel != NULL);
3456 3457 ASSERT(ipp->ipsp_act != NULL);
3457 3458 ASSERT(ipp->ipsp_netstack != NULL);
3458 3459
3459 3460 ipsec_sel_rel(&ipp->ipsp_sel, ipp->ipsp_netstack);
3460 3461 IPACT_REFRELE(ipp->ipsp_act);
3461 3462 kmem_cache_free(ipsec_pol_cache, ipp);
3462 3463 }
3463 3464
3464 3465 /*
3465 3466 * Construction of new policy rules; construct a policy, and add it to
3466 3467 * the appropriate tables.
3467 3468 */
3468 3469 ipsec_policy_t *
3469 3470 ipsec_policy_create(ipsec_selkey_t *keys, const ipsec_act_t *a,
3470 3471 int nacts, int prio, uint64_t *index_ptr, netstack_t *ns)
3471 3472 {
3472 3473 ipsec_action_t *ap;
3473 3474 ipsec_sel_t *sp;
3474 3475 ipsec_policy_t *ipp;
3475 3476 ipsec_stack_t *ipss = ns->netstack_ipsec;
3476 3477
3477 3478 if (index_ptr == NULL)
3478 3479 index_ptr = &ipss->ipsec_next_policy_index;
3479 3480
3480 3481 ipp = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
3481 3482 ap = ipsec_act_find(a, nacts, ns);
3482 3483 sp = ipsec_find_sel(keys, ns);
3483 3484
3484 3485 if ((ap == NULL) || (sp == NULL) || (ipp == NULL)) {
3485 3486 if (ap != NULL) {
3486 3487 IPACT_REFRELE(ap);
3487 3488 }
3488 3489 if (sp != NULL)
3489 3490 ipsec_sel_rel(&sp, ns);
3490 3491 if (ipp != NULL)
3491 3492 kmem_cache_free(ipsec_pol_cache, ipp);
3492 3493 return (NULL);
3493 3494 }
3494 3495
3495 3496 HASH_NULL(ipp, ipsp_hash);
3496 3497
3497 3498 ipp->ipsp_netstack = ns; /* Needed for ipsec_policy_free */
3498 3499 ipp->ipsp_refs = 1; /* caller's reference */
3499 3500 ipp->ipsp_sel = sp;
3500 3501 ipp->ipsp_act = ap;
3501 3502 ipp->ipsp_prio = prio; /* rule priority */
3502 3503 ipp->ipsp_index = *index_ptr;
3503 3504 (*index_ptr)++;
3504 3505
3505 3506 return (ipp);
3506 3507 }
3507 3508
3508 3509 static void
3509 3510 ipsec_update_present_flags(ipsec_stack_t *ipss)
3510 3511 {
3511 3512 boolean_t hashpol;
3512 3513
3513 3514 hashpol = (avl_numnodes(&ipss->ipsec_system_policy.iph_rulebyid) > 0);
3514 3515
3515 3516 if (hashpol) {
3516 3517 ipss->ipsec_outbound_v4_policy_present = B_TRUE;
3517 3518 ipss->ipsec_outbound_v6_policy_present = B_TRUE;
3518 3519 ipss->ipsec_inbound_v4_policy_present = B_TRUE;
3519 3520 ipss->ipsec_inbound_v6_policy_present = B_TRUE;
3520 3521 return;
3521 3522 }
3522 3523
3523 3524 ipss->ipsec_outbound_v4_policy_present = (NULL !=
3524 3525 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3525 3526 ipr_nonhash[IPSEC_AF_V4]);
3526 3527 ipss->ipsec_outbound_v6_policy_present = (NULL !=
3527 3528 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3528 3529 ipr_nonhash[IPSEC_AF_V6]);
3529 3530 ipss->ipsec_inbound_v4_policy_present = (NULL !=
3530 3531 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3531 3532 ipr_nonhash[IPSEC_AF_V4]);
3532 3533 ipss->ipsec_inbound_v6_policy_present = (NULL !=
3533 3534 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3534 3535 ipr_nonhash[IPSEC_AF_V6]);
3535 3536 }
3536 3537
3537 3538 boolean_t
3538 3539 ipsec_policy_delete(ipsec_policy_head_t *php, ipsec_selkey_t *keys, int dir,
3539 3540 netstack_t *ns)
3540 3541 {
3541 3542 ipsec_sel_t *sp;
3542 3543 ipsec_policy_t *ip, *nip, *head;
3543 3544 int af;
3544 3545 ipsec_policy_root_t *pr = &php->iph_root[dir];
3545 3546
3546 3547 sp = ipsec_find_sel(keys, ns);
3547 3548
3548 3549 if (sp == NULL)
3549 3550 return (B_FALSE);
3550 3551
3551 3552 af = (sp->ipsl_key.ipsl_valid & IPSL_IPV4) ? IPSEC_AF_V4 : IPSEC_AF_V6;
3552 3553
3553 3554 rw_enter(&php->iph_lock, RW_WRITER);
3554 3555
3555 3556 if (sp->ipsl_key.ipsl_pol_hval == IPSEC_SEL_NOHASH) {
3556 3557 head = pr->ipr_nonhash[af];
3557 3558 } else {
3558 3559 head = pr->ipr_hash[sp->ipsl_key.ipsl_pol_hval].hash_head;
3559 3560 }
3560 3561
3561 3562 for (ip = head; ip != NULL; ip = nip) {
3562 3563 nip = ip->ipsp_hash.hash_next;
3563 3564 if (ip->ipsp_sel != sp) {
3564 3565 continue;
3565 3566 }
3566 3567
3567 3568 IPPOL_UNCHAIN(php, ip);
3568 3569
3569 3570 php->iph_gen++;
3570 3571 ipsec_update_present_flags(ns->netstack_ipsec);
3571 3572
3572 3573 rw_exit(&php->iph_lock);
3573 3574
3574 3575 ipsec_sel_rel(&sp, ns);
3575 3576
3576 3577 return (B_TRUE);
3577 3578 }
3578 3579
3579 3580 rw_exit(&php->iph_lock);
3580 3581 ipsec_sel_rel(&sp, ns);
3581 3582 return (B_FALSE);
3582 3583 }
3583 3584
3584 3585 int
3585 3586 ipsec_policy_delete_index(ipsec_policy_head_t *php, uint64_t policy_index,
3586 3587 netstack_t *ns)
3587 3588 {
3588 3589 boolean_t found = B_FALSE;
3589 3590 ipsec_policy_t ipkey;
3590 3591 ipsec_policy_t *ip;
3591 3592 avl_index_t where;
3592 3593
3593 3594 bzero(&ipkey, sizeof (ipkey));
3594 3595 ipkey.ipsp_index = policy_index;
3595 3596
3596 3597 rw_enter(&php->iph_lock, RW_WRITER);
3597 3598
3598 3599 /*
3599 3600 * We could be cleverer here about the walk.
3600 3601 * but well, (k+1)*log(N) will do for now (k==number of matches,
3601 3602 * N==number of table entries
3602 3603 */
3603 3604 for (;;) {
3604 3605 ip = (ipsec_policy_t *)avl_find(&php->iph_rulebyid,
3605 3606 (void *)&ipkey, &where);
3606 3607 ASSERT(ip == NULL);
3607 3608
3608 3609 ip = avl_nearest(&php->iph_rulebyid, where, AVL_AFTER);
3609 3610
3610 3611 if (ip == NULL)
3611 3612 break;
3612 3613
3613 3614 if (ip->ipsp_index != policy_index) {
3614 3615 ASSERT(ip->ipsp_index > policy_index);
3615 3616 break;
3616 3617 }
3617 3618
3618 3619 IPPOL_UNCHAIN(php, ip);
3619 3620 found = B_TRUE;
3620 3621 }
3621 3622
3622 3623 if (found) {
3623 3624 php->iph_gen++;
3624 3625 ipsec_update_present_flags(ns->netstack_ipsec);
3625 3626 }
3626 3627
3627 3628 rw_exit(&php->iph_lock);
3628 3629
3629 3630 return (found ? 0 : ENOENT);
3630 3631 }
3631 3632
3632 3633 /*
3633 3634 * Given a constructed ipsec_policy_t policy rule, see if it can be entered
3634 3635 * into the correct policy ruleset. As a side-effect, it sets the hash
3635 3636 * entries on "ipp"'s ipsp_pol_hval.
3636 3637 *
3637 3638 * Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a
3638 3639 * duplicate policy exists with exactly the same selectors), or an icmp
3639 3640 * rule exists with a different encryption/authentication action.
3640 3641 */
3641 3642 boolean_t
3642 3643 ipsec_check_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction)
3643 3644 {
3644 3645 ipsec_policy_root_t *pr = &php->iph_root[direction];
3645 3646 int af = -1;
3646 3647 ipsec_policy_t *p2, *head;
3647 3648 uint8_t check_proto;
3648 3649 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3649 3650 uint32_t valid = selkey->ipsl_valid;
3650 3651
3651 3652 if (valid & IPSL_IPV6) {
3652 3653 ASSERT(!(valid & IPSL_IPV4));
3653 3654 af = IPSEC_AF_V6;
3654 3655 check_proto = IPPROTO_ICMPV6;
3655 3656 } else {
3656 3657 ASSERT(valid & IPSL_IPV4);
3657 3658 af = IPSEC_AF_V4;
3658 3659 check_proto = IPPROTO_ICMP;
3659 3660 }
3660 3661
3661 3662 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3662 3663
3663 3664 /*
3664 3665 * Double-check that we don't have any duplicate selectors here.
3665 3666 * Because selectors are interned below, we need only compare pointers
3666 3667 * for equality.
3667 3668 */
3668 3669 if (selkey->ipsl_sel_hval == IPSEC_SEL_NOHASH) {
3669 3670 head = pr->ipr_nonhash[af];
3670 3671 } else {
3671 3672 selkey->ipsl_pol_hval =
3672 3673 (selkey->ipsl_valid & IPSL_IPV4) ?
3673 3674 IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3674 3675 pr->ipr_nchains) :
3675 3676 IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3676 3677 pr->ipr_nchains);
3677 3678
3678 3679 head = pr->ipr_hash[selkey->ipsl_pol_hval].hash_head;
3679 3680 }
3680 3681
3681 3682 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3682 3683 if (p2->ipsp_sel == ipp->ipsp_sel)
3683 3684 return (B_FALSE);
3684 3685 }
3685 3686
3686 3687 /*
3687 3688 * If it's ICMP and not a drop or pass rule, run through the ICMP
3688 3689 * rules and make sure the action is either new or the same as any
3689 3690 * other actions. We don't have to check the full chain because
3690 3691 * discard and bypass will override all other actions
3691 3692 */
3692 3693
3693 3694 if (valid & IPSL_PROTOCOL &&
3694 3695 selkey->ipsl_proto == check_proto &&
3695 3696 (ipp->ipsp_act->ipa_act.ipa_type == IPSEC_ACT_APPLY)) {
3696 3697
3697 3698 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3698 3699
3699 3700 if (p2->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL &&
3700 3701 p2->ipsp_sel->ipsl_key.ipsl_proto == check_proto &&
3701 3702 (p2->ipsp_act->ipa_act.ipa_type ==
3702 3703 IPSEC_ACT_APPLY)) {
3703 3704 return (ipsec_compare_action(p2, ipp));
3704 3705 }
3705 3706 }
3706 3707 }
3707 3708
3708 3709 return (B_TRUE);
3709 3710 }
3710 3711
3711 3712 /*
3712 3713 * compare the action chains of two policies for equality
3713 3714 * B_TRUE -> effective equality
3714 3715 */
3715 3716
3716 3717 static boolean_t
3717 3718 ipsec_compare_action(ipsec_policy_t *p1, ipsec_policy_t *p2)
3718 3719 {
3719 3720
3720 3721 ipsec_action_t *act1, *act2;
3721 3722
3722 3723 /* We have a valid rule. Let's compare the actions */
3723 3724 if (p1->ipsp_act == p2->ipsp_act) {
3724 3725 /* same action. We are good */
3725 3726 return (B_TRUE);
3726 3727 }
3727 3728
3728 3729 /* we have to walk the chain */
3729 3730
3730 3731 act1 = p1->ipsp_act;
3731 3732 act2 = p2->ipsp_act;
3732 3733
3733 3734 while (act1 != NULL && act2 != NULL) {
3734 3735
3735 3736 /* otherwise, Are we close enough? */
3736 3737 if (act1->ipa_allow_clear != act2->ipa_allow_clear ||
3737 3738 act1->ipa_want_ah != act2->ipa_want_ah ||
3738 3739 act1->ipa_want_esp != act2->ipa_want_esp ||
3739 3740 act1->ipa_want_se != act2->ipa_want_se) {
3740 3741 /* Nope, we aren't */
3741 3742 return (B_FALSE);
3742 3743 }
3743 3744
3744 3745 if (act1->ipa_want_ah) {
3745 3746 if (act1->ipa_act.ipa_apply.ipp_auth_alg !=
3746 3747 act2->ipa_act.ipa_apply.ipp_auth_alg) {
3747 3748 return (B_FALSE);
3748 3749 }
3749 3750
3750 3751 if (act1->ipa_act.ipa_apply.ipp_ah_minbits !=
3751 3752 act2->ipa_act.ipa_apply.ipp_ah_minbits ||
3752 3753 act1->ipa_act.ipa_apply.ipp_ah_maxbits !=
3753 3754 act2->ipa_act.ipa_apply.ipp_ah_maxbits) {
3754 3755 return (B_FALSE);
3755 3756 }
3756 3757 }
3757 3758
3758 3759 if (act1->ipa_want_esp) {
3759 3760 if (act1->ipa_act.ipa_apply.ipp_use_esp !=
3760 3761 act2->ipa_act.ipa_apply.ipp_use_esp ||
3761 3762 act1->ipa_act.ipa_apply.ipp_use_espa !=
3762 3763 act2->ipa_act.ipa_apply.ipp_use_espa) {
3763 3764 return (B_FALSE);
3764 3765 }
3765 3766
3766 3767 if (act1->ipa_act.ipa_apply.ipp_use_esp) {
3767 3768 if (act1->ipa_act.ipa_apply.ipp_encr_alg !=
3768 3769 act2->ipa_act.ipa_apply.ipp_encr_alg) {
3769 3770 return (B_FALSE);
3770 3771 }
3771 3772
3772 3773 if (act1->ipa_act.ipa_apply.ipp_espe_minbits !=
3773 3774 act2->ipa_act.ipa_apply.ipp_espe_minbits ||
3774 3775 act1->ipa_act.ipa_apply.ipp_espe_maxbits !=
3775 3776 act2->ipa_act.ipa_apply.ipp_espe_maxbits) {
3776 3777 return (B_FALSE);
3777 3778 }
3778 3779 }
3779 3780
3780 3781 if (act1->ipa_act.ipa_apply.ipp_use_espa) {
3781 3782 if (act1->ipa_act.ipa_apply.ipp_esp_auth_alg !=
3782 3783 act2->ipa_act.ipa_apply.ipp_esp_auth_alg) {
3783 3784 return (B_FALSE);
3784 3785 }
3785 3786
3786 3787 if (act1->ipa_act.ipa_apply.ipp_espa_minbits !=
3787 3788 act2->ipa_act.ipa_apply.ipp_espa_minbits ||
3788 3789 act1->ipa_act.ipa_apply.ipp_espa_maxbits !=
3789 3790 act2->ipa_act.ipa_apply.ipp_espa_maxbits) {
3790 3791 return (B_FALSE);
3791 3792 }
3792 3793 }
3793 3794
3794 3795 }
3795 3796
3796 3797 act1 = act1->ipa_next;
3797 3798 act2 = act2->ipa_next;
3798 3799 }
3799 3800
3800 3801 if (act1 != NULL || act2 != NULL) {
3801 3802 return (B_FALSE);
3802 3803 }
3803 3804
3804 3805 return (B_TRUE);
3805 3806 }
3806 3807
3807 3808
3808 3809 /*
3809 3810 * Given a constructed ipsec_policy_t policy rule, enter it into
3810 3811 * the correct policy ruleset.
3811 3812 *
3812 3813 * ipsec_check_policy() is assumed to have succeeded first (to check for
3813 3814 * duplicates).
3814 3815 */
3815 3816 void
3816 3817 ipsec_enter_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction,
3817 3818 netstack_t *ns)
3818 3819 {
3819 3820 ipsec_policy_root_t *pr = &php->iph_root[direction];
3820 3821 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3821 3822 uint32_t valid = selkey->ipsl_valid;
3822 3823 uint32_t hval = selkey->ipsl_pol_hval;
3823 3824 int af = -1;
3824 3825
3825 3826 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3826 3827
3827 3828 if (valid & IPSL_IPV6) {
3828 3829 ASSERT(!(valid & IPSL_IPV4));
3829 3830 af = IPSEC_AF_V6;
3830 3831 } else {
3831 3832 ASSERT(valid & IPSL_IPV4);
3832 3833 af = IPSEC_AF_V4;
3833 3834 }
3834 3835
3835 3836 php->iph_gen++;
3836 3837
3837 3838 if (hval == IPSEC_SEL_NOHASH) {
3838 3839 HASHLIST_INSERT(ipp, ipsp_hash, pr->ipr_nonhash[af]);
3839 3840 } else {
3840 3841 HASH_LOCK(pr->ipr_hash, hval);
3841 3842 HASH_INSERT(ipp, ipsp_hash, pr->ipr_hash, hval);
3842 3843 HASH_UNLOCK(pr->ipr_hash, hval);
3843 3844 }
3844 3845
3845 3846 ipsec_insert_always(&php->iph_rulebyid, ipp);
3846 3847
3847 3848 ipsec_update_present_flags(ns->netstack_ipsec);
3848 3849 }
3849 3850
3850 3851 static void
3851 3852 ipsec_ipr_flush(ipsec_policy_head_t *php, ipsec_policy_root_t *ipr)
3852 3853 {
3853 3854 ipsec_policy_t *ip, *nip;
3854 3855 int af, chain, nchain;
3855 3856
3856 3857 for (af = 0; af < IPSEC_NAF; af++) {
3857 3858 for (ip = ipr->ipr_nonhash[af]; ip != NULL; ip = nip) {
3858 3859 nip = ip->ipsp_hash.hash_next;
3859 3860 IPPOL_UNCHAIN(php, ip);
3860 3861 }
3861 3862 ipr->ipr_nonhash[af] = NULL;
3862 3863 }
3863 3864 nchain = ipr->ipr_nchains;
3864 3865
3865 3866 for (chain = 0; chain < nchain; chain++) {
3866 3867 for (ip = ipr->ipr_hash[chain].hash_head; ip != NULL;
3867 3868 ip = nip) {
3868 3869 nip = ip->ipsp_hash.hash_next;
3869 3870 IPPOL_UNCHAIN(php, ip);
3870 3871 }
3871 3872 ipr->ipr_hash[chain].hash_head = NULL;
3872 3873 }
3873 3874 }
3874 3875
3875 3876 /*
3876 3877 * Create and insert inbound or outbound policy associated with actp for the
3877 3878 * address family fam into the policy head ph. Returns B_TRUE if policy was
3878 3879 * inserted, and B_FALSE otherwise.
3879 3880 */
3880 3881 boolean_t
3881 3882 ipsec_polhead_insert(ipsec_policy_head_t *ph, ipsec_act_t *actp, uint_t nact,
3882 3883 int fam, int ptype, netstack_t *ns)
3883 3884 {
3884 3885 ipsec_selkey_t sel;
3885 3886 ipsec_policy_t *pol;
3886 3887 ipsec_policy_root_t *pr;
3887 3888
3888 3889 bzero(&sel, sizeof (sel));
3889 3890 sel.ipsl_valid = (fam == IPSEC_AF_V4 ? IPSL_IPV4 : IPSL_IPV6);
3890 3891 if ((pol = ipsec_policy_create(&sel, actp, nact, IPSEC_PRIO_SOCKET,
3891 3892 NULL, ns)) != NULL) {
3892 3893 pr = &ph->iph_root[ptype];
3893 3894 HASHLIST_INSERT(pol, ipsp_hash, pr->ipr_nonhash[fam]);
3894 3895 ipsec_insert_always(&ph->iph_rulebyid, pol);
3895 3896 }
3896 3897 return (pol != NULL);
3897 3898 }
3898 3899
3899 3900 void
3900 3901 ipsec_polhead_flush(ipsec_policy_head_t *php, netstack_t *ns)
3901 3902 {
3902 3903 int dir;
3903 3904
3904 3905 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3905 3906
3906 3907 for (dir = 0; dir < IPSEC_NTYPES; dir++)
3907 3908 ipsec_ipr_flush(php, &php->iph_root[dir]);
3908 3909
3909 3910 php->iph_gen++;
3910 3911 ipsec_update_present_flags(ns->netstack_ipsec);
3911 3912 }
3912 3913
3913 3914 void
3914 3915 ipsec_polhead_free(ipsec_policy_head_t *php, netstack_t *ns)
3915 3916 {
3916 3917 int dir;
3917 3918
3918 3919 ASSERT(php->iph_refs == 0);
3919 3920
3920 3921 rw_enter(&php->iph_lock, RW_WRITER);
3921 3922 ipsec_polhead_flush(php, ns);
3922 3923 rw_exit(&php->iph_lock);
3923 3924 rw_destroy(&php->iph_lock);
3924 3925 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
3925 3926 ipsec_policy_root_t *ipr = &php->iph_root[dir];
3926 3927 int chain;
3927 3928
3928 3929 for (chain = 0; chain < ipr->ipr_nchains; chain++)
3929 3930 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
3930 3931
3931 3932 }
3932 3933 ipsec_polhead_free_table(php);
3933 3934 kmem_free(php, sizeof (*php));
3934 3935 }
3935 3936
3936 3937 static void
3937 3938 ipsec_ipr_init(ipsec_policy_root_t *ipr)
3938 3939 {
3939 3940 int af;
3940 3941
3941 3942 ipr->ipr_nchains = 0;
3942 3943 ipr->ipr_hash = NULL;
3943 3944
3944 3945 for (af = 0; af < IPSEC_NAF; af++) {
3945 3946 ipr->ipr_nonhash[af] = NULL;
3946 3947 }
3947 3948 }
3948 3949
3949 3950 ipsec_policy_head_t *
3950 3951 ipsec_polhead_create(void)
3951 3952 {
3952 3953 ipsec_policy_head_t *php;
3953 3954
3954 3955 php = kmem_alloc(sizeof (*php), KM_NOSLEEP);
3955 3956 if (php == NULL)
3956 3957 return (php);
3957 3958
3958 3959 rw_init(&php->iph_lock, NULL, RW_DEFAULT, NULL);
3959 3960 php->iph_refs = 1;
3960 3961 php->iph_gen = 0;
3961 3962
3962 3963 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_INBOUND]);
3963 3964 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_OUTBOUND]);
3964 3965
3965 3966 avl_create(&php->iph_rulebyid, ipsec_policy_cmpbyid,
3966 3967 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
3967 3968
3968 3969 return (php);
3969 3970 }
3970 3971
3971 3972 /*
3972 3973 * Clone the policy head into a new polhead; release one reference to the
3973 3974 * old one and return the only reference to the new one.
3974 3975 * If the old one had a refcount of 1, just return it.
3975 3976 */
3976 3977 ipsec_policy_head_t *
3977 3978 ipsec_polhead_split(ipsec_policy_head_t *php, netstack_t *ns)
3978 3979 {
3979 3980 ipsec_policy_head_t *nphp;
3980 3981
3981 3982 if (php == NULL)
3982 3983 return (ipsec_polhead_create());
3983 3984 else if (php->iph_refs == 1)
3984 3985 return (php);
3985 3986
3986 3987 nphp = ipsec_polhead_create();
3987 3988 if (nphp == NULL)
3988 3989 return (NULL);
3989 3990
3990 3991 if (ipsec_copy_polhead(php, nphp, ns) != 0) {
3991 3992 ipsec_polhead_free(nphp, ns);
3992 3993 return (NULL);
3993 3994 }
3994 3995 IPPH_REFRELE(php, ns);
3995 3996 return (nphp);
3996 3997 }
3997 3998
3998 3999 /*
3999 4000 * When sending a response to a ICMP request or generating a RST
4000 4001 * in the TCP case, the outbound packets need to go at the same level
4001 4002 * of protection as the incoming ones i.e we associate our outbound
4002 4003 * policy with how the packet came in. We call this after we have
4003 4004 * accepted the incoming packet which may or may not have been in
4004 4005 * clear and hence we are sending the reply back with the policy
4005 4006 * matching the incoming datagram's policy.
4006 4007 *
4007 4008 * NOTE : This technology serves two purposes :
4008 4009 *
4009 4010 * 1) If we have multiple outbound policies, we send out a reply
4010 4011 * matching with how it came in rather than matching the outbound
4011 4012 * policy.
4012 4013 *
4013 4014 * 2) For assymetric policies, we want to make sure that incoming
4014 4015 * and outgoing has the same level of protection. Assymetric
4015 4016 * policies exist only with global policy where we may not have
4016 4017 * both outbound and inbound at the same time.
4017 4018 *
4018 4019 * NOTE2: This function is called by cleartext cases, so it needs to be
4019 4020 * in IP proper.
4020 4021 *
4021 4022 * Note: the caller has moved other parts of ira into ixa already.
4022 4023 */
4023 4024 boolean_t
4024 4025 ipsec_in_to_out(ip_recv_attr_t *ira, ip_xmit_attr_t *ixa, mblk_t *data_mp,
4025 4026 ipha_t *ipha, ip6_t *ip6h)
4026 4027 {
4027 4028 ipsec_selector_t sel;
4028 4029 ipsec_action_t *reflect_action = NULL;
4029 4030 netstack_t *ns = ixa->ixa_ipst->ips_netstack;
4030 4031
4031 4032 bzero((void*)&sel, sizeof (sel));
4032 4033
4033 4034 if (ira->ira_ipsec_action != NULL) {
4034 4035 /* transfer reference.. */
4035 4036 reflect_action = ira->ira_ipsec_action;
4036 4037 ira->ira_ipsec_action = NULL;
4037 4038 } else if (!(ira->ira_flags & IRAF_LOOPBACK))
4038 4039 reflect_action = ipsec_in_to_out_action(ira);
4039 4040
4040 4041 /*
4041 4042 * The caller is going to send the datagram out which might
4042 4043 * go on the wire or delivered locally through ire_send_local.
4043 4044 *
4044 4045 * 1) If it goes out on the wire, new associations will be
4045 4046 * obtained.
4046 4047 * 2) If it is delivered locally, ire_send_local will convert
4047 4048 * this ip_xmit_attr_t back to a ip_recv_attr_t looking at the
4048 4049 * requests.
4049 4050 */
4050 4051 ixa->ixa_ipsec_action = reflect_action;
4051 4052
4052 4053 if (!ipsec_init_outbound_ports(&sel, data_mp, ipha, ip6h, 0,
4053 4054 ns->netstack_ipsec)) {
4054 4055 /* Note: data_mp already consumed and ip_drop_packet done */
4055 4056 return (B_FALSE);
4056 4057 }
4057 4058 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4058 4059 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4059 4060 ixa->ixa_ipsec_proto = sel.ips_protocol;
4060 4061 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4061 4062 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4062 4063
4063 4064 /*
4064 4065 * Don't use global policy for this, as we want
4065 4066 * to use the same protection that was applied to the inbound packet.
4066 4067 * Thus we set IXAF_NO_IPSEC is it arrived in the clear to make
4067 4068 * it be sent in the clear.
4068 4069 */
4069 4070 if (ira->ira_flags & IRAF_IPSEC_SECURE)
4070 4071 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4071 4072 else
4072 4073 ixa->ixa_flags |= IXAF_NO_IPSEC;
4073 4074
4074 4075 return (B_TRUE);
4075 4076 }
4076 4077
4077 4078 void
4078 4079 ipsec_out_release_refs(ip_xmit_attr_t *ixa)
4079 4080 {
4080 4081 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4081 4082 return;
4082 4083
4083 4084 if (ixa->ixa_ipsec_ah_sa != NULL) {
4084 4085 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
4085 4086 ixa->ixa_ipsec_ah_sa = NULL;
4086 4087 }
4087 4088 if (ixa->ixa_ipsec_esp_sa != NULL) {
4088 4089 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
4089 4090 ixa->ixa_ipsec_esp_sa = NULL;
4090 4091 }
4091 4092 if (ixa->ixa_ipsec_policy != NULL) {
4092 4093 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4093 4094 ixa->ixa_ipsec_policy = NULL;
4094 4095 }
4095 4096 if (ixa->ixa_ipsec_action != NULL) {
4096 4097 IPACT_REFRELE(ixa->ixa_ipsec_action);
4097 4098 ixa->ixa_ipsec_action = NULL;
4098 4099 }
4099 4100 if (ixa->ixa_ipsec_latch) {
4100 4101 IPLATCH_REFRELE(ixa->ixa_ipsec_latch);
4101 4102 ixa->ixa_ipsec_latch = NULL;
4102 4103 }
4103 4104 /* Clear the soft references to the SAs */
4104 4105 ixa->ixa_ipsec_ref[0].ipsr_sa = NULL;
4105 4106 ixa->ixa_ipsec_ref[0].ipsr_bucket = NULL;
4106 4107 ixa->ixa_ipsec_ref[0].ipsr_gen = 0;
4107 4108 ixa->ixa_ipsec_ref[1].ipsr_sa = NULL;
4108 4109 ixa->ixa_ipsec_ref[1].ipsr_bucket = NULL;
4109 4110 ixa->ixa_ipsec_ref[1].ipsr_gen = 0;
4110 4111 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4111 4112 }
4112 4113
4113 4114 void
4114 4115 ipsec_in_release_refs(ip_recv_attr_t *ira)
4115 4116 {
4116 4117 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
4117 4118 return;
4118 4119
4119 4120 if (ira->ira_ipsec_ah_sa != NULL) {
4120 4121 IPSA_REFRELE(ira->ira_ipsec_ah_sa);
4121 4122 ira->ira_ipsec_ah_sa = NULL;
4122 4123 }
4123 4124 if (ira->ira_ipsec_esp_sa != NULL) {
4124 4125 IPSA_REFRELE(ira->ira_ipsec_esp_sa);
4125 4126 ira->ira_ipsec_esp_sa = NULL;
4126 4127 }
4127 4128 ira->ira_flags &= ~IRAF_IPSEC_SECURE;
4128 4129 }
4129 4130
4130 4131 /*
4131 4132 * This is called from ire_send_local when a packet
4132 4133 * is looped back. We setup the ip_recv_attr_t "borrowing" the references
4133 4134 * held by the callers.
4134 4135 * Note that we don't do any IPsec but we carry the actions and IPSEC flags
4135 4136 * across so that the fanout policy checks see that IPsec was applied.
4136 4137 *
4137 4138 * The caller should do ipsec_in_release_refs() on the ira by calling
4138 4139 * ira_cleanup().
4139 4140 */
4140 4141 void
4141 4142 ipsec_out_to_in(ip_xmit_attr_t *ixa, ill_t *ill, ip_recv_attr_t *ira)
4142 4143 {
4143 4144 ipsec_policy_t *pol;
4144 4145 ipsec_action_t *act;
4145 4146
4146 4147 /* Non-IPsec operations */
4147 4148 ira->ira_free_flags = 0;
4148 4149 ira->ira_zoneid = ixa->ixa_zoneid;
4149 4150 ira->ira_cred = ixa->ixa_cred;
4150 4151 ira->ira_cpid = ixa->ixa_cpid;
4151 4152 ira->ira_tsl = ixa->ixa_tsl;
4152 4153 ira->ira_ill = ira->ira_rill = ill;
4153 4154 ira->ira_flags = ixa->ixa_flags & IAF_MASK;
4154 4155 ira->ira_no_loop_zoneid = ixa->ixa_no_loop_zoneid;
4155 4156 ira->ira_pktlen = ixa->ixa_pktlen;
4156 4157 ira->ira_ip_hdr_length = ixa->ixa_ip_hdr_length;
4157 4158 ira->ira_protocol = ixa->ixa_protocol;
4158 4159 ira->ira_mhip = NULL;
4159 4160
4160 4161 ira->ira_flags |= IRAF_LOOPBACK | IRAF_L2SRC_LOOPBACK;
4161 4162
4162 4163 ira->ira_sqp = ixa->ixa_sqp;
4163 4164 ira->ira_ring = NULL;
4164 4165
4165 4166 ira->ira_ruifindex = ill->ill_phyint->phyint_ifindex;
4166 4167 ira->ira_rifindex = ira->ira_ruifindex;
4167 4168
4168 4169 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4169 4170 return;
4170 4171
4171 4172 ira->ira_flags |= IRAF_IPSEC_SECURE;
4172 4173
4173 4174 ira->ira_ipsec_ah_sa = NULL;
4174 4175 ira->ira_ipsec_esp_sa = NULL;
4175 4176
4176 4177 act = ixa->ixa_ipsec_action;
4177 4178 if (act == NULL) {
4178 4179 pol = ixa->ixa_ipsec_policy;
4179 4180 if (pol != NULL) {
4180 4181 act = pol->ipsp_act;
4181 4182 IPACT_REFHOLD(act);
4182 4183 }
4183 4184 }
4184 4185 ixa->ixa_ipsec_action = NULL;
4185 4186 ira->ira_ipsec_action = act;
4186 4187 }
4187 4188
4188 4189 /*
4189 4190 * Consults global policy and per-socket policy to see whether this datagram
4190 4191 * should go out secure. If so it updates the ip_xmit_attr_t
4191 4192 * Should not be used when connecting, since then we want to latch the policy.
4192 4193 *
4193 4194 * If connp is NULL we just look at the global policy.
4194 4195 *
4195 4196 * Returns NULL if the packet was dropped, in which case the MIB has
4196 4197 * been incremented and ip_drop_packet done.
4197 4198 */
4198 4199 mblk_t *
4199 4200 ip_output_attach_policy(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4200 4201 const conn_t *connp, ip_xmit_attr_t *ixa)
4201 4202 {
4202 4203 ipsec_selector_t sel;
4203 4204 boolean_t policy_present;
4204 4205 ip_stack_t *ipst = ixa->ixa_ipst;
4205 4206 netstack_t *ns = ipst->ips_netstack;
4206 4207 ipsec_stack_t *ipss = ns->netstack_ipsec;
4207 4208 ipsec_policy_t *p;
4208 4209
4209 4210 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4210 4211 ASSERT((ipha != NULL && ip6h == NULL) ||
4211 4212 (ip6h != NULL && ipha == NULL));
4212 4213
4213 4214 if (ipha != NULL)
4214 4215 policy_present = ipss->ipsec_outbound_v4_policy_present;
4215 4216 else
4216 4217 policy_present = ipss->ipsec_outbound_v6_policy_present;
4217 4218
4218 4219 if (!policy_present && (connp == NULL || connp->conn_policy == NULL))
4219 4220 return (mp);
4220 4221
4221 4222 bzero((void*)&sel, sizeof (sel));
4222 4223
4223 4224 if (ipha != NULL) {
4224 4225 sel.ips_local_addr_v4 = ipha->ipha_src;
4225 4226 sel.ips_remote_addr_v4 = ip_get_dst(ipha);
4226 4227 sel.ips_isv4 = B_TRUE;
4227 4228 } else {
4228 4229 sel.ips_isv4 = B_FALSE;
4229 4230 sel.ips_local_addr_v6 = ip6h->ip6_src;
4230 4231 sel.ips_remote_addr_v6 = ip_get_dst_v6(ip6h, mp, NULL);
4231 4232 }
4232 4233 sel.ips_protocol = ixa->ixa_protocol;
4233 4234
4234 4235 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h, 0, ipss)) {
4235 4236 if (ipha != NULL) {
4236 4237 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
4237 4238 } else {
4238 4239 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
4239 4240 }
4240 4241 /* Note: mp already consumed and ip_drop_packet done */
4241 4242 return (NULL);
4242 4243 }
4243 4244
4244 4245 ASSERT(ixa->ixa_ipsec_policy == NULL);
4245 4246 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4246 4247 ixa->ixa_ipsec_policy = p;
4247 4248 if (p != NULL) {
4248 4249 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4249 4250 if (connp == NULL || connp->conn_policy == NULL)
4250 4251 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4251 4252 } else {
4252 4253 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4253 4254 }
4254 4255
4255 4256 /*
4256 4257 * Copy the right port information.
4257 4258 */
4258 4259 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4259 4260 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4260 4261 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4261 4262 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4262 4263 ixa->ixa_ipsec_proto = sel.ips_protocol;
4263 4264 return (mp);
4264 4265 }
4265 4266
4266 4267 /*
4267 4268 * When appropriate, this function caches inbound and outbound policy
4268 4269 * for this connection. The outbound policy is stored in conn_ixa.
4269 4270 * Note that it can not be used for SCTP since conn_faddr isn't set for SCTP.
4270 4271 *
4271 4272 * XXX need to work out more details about per-interface policy and
4272 4273 * caching here!
4273 4274 *
4274 4275 * XXX may want to split inbound and outbound caching for ill..
4275 4276 */
4276 4277 int
4277 4278 ipsec_conn_cache_policy(conn_t *connp, boolean_t isv4)
4278 4279 {
4279 4280 boolean_t global_policy_present;
4280 4281 netstack_t *ns = connp->conn_netstack;
4281 4282 ipsec_stack_t *ipss = ns->netstack_ipsec;
4282 4283
4283 4284 connp->conn_ixa->ixa_ipsec_policy_gen =
4284 4285 ipss->ipsec_system_policy.iph_gen;
4285 4286 /*
4286 4287 * There is no policy latching for ICMP sockets because we can't
4287 4288 * decide on which policy to use until we see the packet and get
4288 4289 * type/code selectors.
4289 4290 */
4290 4291 if (connp->conn_proto == IPPROTO_ICMP ||
4291 4292 connp->conn_proto == IPPROTO_ICMPV6) {
4292 4293 connp->conn_in_enforce_policy =
4293 4294 connp->conn_out_enforce_policy = B_TRUE;
4294 4295 if (connp->conn_latch != NULL) {
4295 4296 IPLATCH_REFRELE(connp->conn_latch);
4296 4297 connp->conn_latch = NULL;
4297 4298 }
4298 4299 if (connp->conn_latch_in_policy != NULL) {
4299 4300 IPPOL_REFRELE(connp->conn_latch_in_policy);
4300 4301 connp->conn_latch_in_policy = NULL;
4301 4302 }
4302 4303 if (connp->conn_latch_in_action != NULL) {
4303 4304 IPACT_REFRELE(connp->conn_latch_in_action);
4304 4305 connp->conn_latch_in_action = NULL;
4305 4306 }
4306 4307 if (connp->conn_ixa->ixa_ipsec_policy != NULL) {
4307 4308 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4308 4309 connp->conn_ixa->ixa_ipsec_policy = NULL;
4309 4310 }
4310 4311 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4311 4312 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4312 4313 connp->conn_ixa->ixa_ipsec_action = NULL;
4313 4314 }
4314 4315 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4315 4316 return (0);
4316 4317 }
4317 4318
4318 4319 global_policy_present = isv4 ?
4319 4320 (ipss->ipsec_outbound_v4_policy_present ||
4320 4321 ipss->ipsec_inbound_v4_policy_present) :
4321 4322 (ipss->ipsec_outbound_v6_policy_present ||
4322 4323 ipss->ipsec_inbound_v6_policy_present);
4323 4324
4324 4325 if ((connp->conn_policy != NULL) || global_policy_present) {
4325 4326 ipsec_selector_t sel;
4326 4327 ipsec_policy_t *p;
4327 4328
4328 4329 if (connp->conn_latch == NULL &&
4329 4330 (connp->conn_latch = iplatch_create()) == NULL) {
4330 4331 return (ENOMEM);
4331 4332 }
4332 4333
4333 4334 bzero((void*)&sel, sizeof (sel));
4334 4335
4335 4336 sel.ips_protocol = connp->conn_proto;
4336 4337 sel.ips_local_port = connp->conn_lport;
4337 4338 sel.ips_remote_port = connp->conn_fport;
4338 4339 sel.ips_is_icmp_inv_acq = 0;
4339 4340 sel.ips_isv4 = isv4;
4340 4341 if (isv4) {
4341 4342 sel.ips_local_addr_v4 = connp->conn_laddr_v4;
4342 4343 sel.ips_remote_addr_v4 = connp->conn_faddr_v4;
4343 4344 } else {
4344 4345 sel.ips_local_addr_v6 = connp->conn_laddr_v6;
4345 4346 sel.ips_remote_addr_v6 = connp->conn_faddr_v6;
4346 4347 }
4347 4348
4348 4349 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
4349 4350 if (connp->conn_latch_in_policy != NULL)
4350 4351 IPPOL_REFRELE(connp->conn_latch_in_policy);
4351 4352 connp->conn_latch_in_policy = p;
4352 4353 connp->conn_in_enforce_policy = (p != NULL);
4353 4354
4354 4355 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4355 4356 if (connp->conn_ixa->ixa_ipsec_policy != NULL)
4356 4357 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4357 4358 connp->conn_ixa->ixa_ipsec_policy = p;
4358 4359 connp->conn_out_enforce_policy = (p != NULL);
4359 4360 if (p != NULL) {
4360 4361 connp->conn_ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4361 4362 if (connp->conn_policy == NULL) {
4362 4363 connp->conn_ixa->ixa_flags |=
4363 4364 IXAF_IPSEC_GLOBAL_POLICY;
4364 4365 }
4365 4366 } else {
4366 4367 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4367 4368 }
4368 4369 /* Clear the latched actions too, in case we're recaching. */
4369 4370 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4370 4371 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4371 4372 connp->conn_ixa->ixa_ipsec_action = NULL;
4372 4373 }
4373 4374 if (connp->conn_latch_in_action != NULL) {
4374 4375 IPACT_REFRELE(connp->conn_latch_in_action);
4375 4376 connp->conn_latch_in_action = NULL;
4376 4377 }
4377 4378 connp->conn_ixa->ixa_ipsec_src_port = sel.ips_local_port;
4378 4379 connp->conn_ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4379 4380 connp->conn_ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4380 4381 connp->conn_ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4381 4382 connp->conn_ixa->ixa_ipsec_proto = sel.ips_protocol;
4382 4383 } else {
4383 4384 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4384 4385 }
4385 4386
4386 4387 /*
4387 4388 * We may or may not have policy for this endpoint. We still set
4388 4389 * conn_policy_cached so that inbound datagrams don't have to look
4389 4390 * at global policy as policy is considered latched for these
4390 4391 * endpoints. We should not set conn_policy_cached until the conn
4391 4392 * reflects the actual policy. If we *set* this before inheriting
4392 4393 * the policy there is a window where the check
4393 4394 * CONN_INBOUND_POLICY_PRESENT, will neither check with the policy
4394 4395 * on the conn (because we have not yet copied the policy on to
4395 4396 * conn and hence not set conn_in_enforce_policy) nor with the
4396 4397 * global policy (because conn_policy_cached is already set).
4397 4398 */
4398 4399 connp->conn_policy_cached = B_TRUE;
4399 4400 return (0);
4400 4401 }
4401 4402
4402 4403 /*
4403 4404 * When appropriate, this function caches outbound policy for faddr/fport.
4404 4405 * It is used when we are not connected i.e., when we can not latch the
4405 4406 * policy.
4406 4407 */
4407 4408 void
4408 4409 ipsec_cache_outbound_policy(const conn_t *connp, const in6_addr_t *v6src,
4409 4410 const in6_addr_t *v6dst, in_port_t dstport, ip_xmit_attr_t *ixa)
4410 4411 {
4411 4412 boolean_t isv4 = (ixa->ixa_flags & IXAF_IS_IPV4) != 0;
4412 4413 boolean_t global_policy_present;
4413 4414 netstack_t *ns = connp->conn_netstack;
4414 4415 ipsec_stack_t *ipss = ns->netstack_ipsec;
4415 4416
4416 4417 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4417 4418
4418 4419 /*
4419 4420 * There is no policy caching for ICMP sockets because we can't
4420 4421 * decide on which policy to use until we see the packet and get
4421 4422 * type/code selectors.
4422 4423 */
4423 4424 if (connp->conn_proto == IPPROTO_ICMP ||
4424 4425 connp->conn_proto == IPPROTO_ICMPV6) {
4425 4426 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4426 4427 if (ixa->ixa_ipsec_policy != NULL) {
4427 4428 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4428 4429 ixa->ixa_ipsec_policy = NULL;
4429 4430 }
4430 4431 if (ixa->ixa_ipsec_action != NULL) {
4431 4432 IPACT_REFRELE(ixa->ixa_ipsec_action);
4432 4433 ixa->ixa_ipsec_action = NULL;
4433 4434 }
4434 4435 return;
4435 4436 }
4436 4437
4437 4438 global_policy_present = isv4 ?
4438 4439 (ipss->ipsec_outbound_v4_policy_present ||
4439 4440 ipss->ipsec_inbound_v4_policy_present) :
4440 4441 (ipss->ipsec_outbound_v6_policy_present ||
4441 4442 ipss->ipsec_inbound_v6_policy_present);
4442 4443
4443 4444 if ((connp->conn_policy != NULL) || global_policy_present) {
4444 4445 ipsec_selector_t sel;
4445 4446 ipsec_policy_t *p;
4446 4447
4447 4448 bzero((void*)&sel, sizeof (sel));
4448 4449
4449 4450 sel.ips_protocol = connp->conn_proto;
4450 4451 sel.ips_local_port = connp->conn_lport;
4451 4452 sel.ips_remote_port = dstport;
4452 4453 sel.ips_is_icmp_inv_acq = 0;
4453 4454 sel.ips_isv4 = isv4;
4454 4455 if (isv4) {
4455 4456 IN6_V4MAPPED_TO_IPADDR(v6src, sel.ips_local_addr_v4);
4456 4457 IN6_V4MAPPED_TO_IPADDR(v6dst, sel.ips_remote_addr_v4);
4457 4458 } else {
4458 4459 sel.ips_local_addr_v6 = *v6src;
4459 4460 sel.ips_remote_addr_v6 = *v6dst;
4460 4461 }
4461 4462
4462 4463 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4463 4464 if (ixa->ixa_ipsec_policy != NULL)
4464 4465 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4465 4466 ixa->ixa_ipsec_policy = p;
4466 4467 if (p != NULL) {
4467 4468 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4468 4469 if (connp->conn_policy == NULL)
4469 4470 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4470 4471 } else {
4471 4472 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4472 4473 }
4473 4474 /* Clear the latched actions too, in case we're recaching. */
4474 4475 if (ixa->ixa_ipsec_action != NULL) {
4475 4476 IPACT_REFRELE(ixa->ixa_ipsec_action);
4476 4477 ixa->ixa_ipsec_action = NULL;
4477 4478 }
4478 4479
4479 4480 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4480 4481 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4481 4482 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4482 4483 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4483 4484 ixa->ixa_ipsec_proto = sel.ips_protocol;
4484 4485 } else {
4485 4486 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4486 4487 if (ixa->ixa_ipsec_policy != NULL) {
4487 4488 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4488 4489 ixa->ixa_ipsec_policy = NULL;
4489 4490 }
4490 4491 if (ixa->ixa_ipsec_action != NULL) {
4491 4492 IPACT_REFRELE(ixa->ixa_ipsec_action);
4492 4493 ixa->ixa_ipsec_action = NULL;
4493 4494 }
4494 4495 }
4495 4496 }
4496 4497
4497 4498 /*
4498 4499 * Returns B_FALSE if the policy has gone stale.
4499 4500 */
4500 4501 boolean_t
4501 4502 ipsec_outbound_policy_current(ip_xmit_attr_t *ixa)
4502 4503 {
4503 4504 ipsec_stack_t *ipss = ixa->ixa_ipst->ips_netstack->netstack_ipsec;
4504 4505
4505 4506 if (!(ixa->ixa_flags & IXAF_IPSEC_GLOBAL_POLICY))
4506 4507 return (B_TRUE);
4507 4508
4508 4509 return (ixa->ixa_ipsec_policy_gen == ipss->ipsec_system_policy.iph_gen);
4509 4510 }
4510 4511
4511 4512 void
4512 4513 iplatch_free(ipsec_latch_t *ipl)
4513 4514 {
4514 4515 if (ipl->ipl_local_cid != NULL)
4515 4516 IPSID_REFRELE(ipl->ipl_local_cid);
4516 4517 if (ipl->ipl_remote_cid != NULL)
4517 4518 IPSID_REFRELE(ipl->ipl_remote_cid);
4518 4519 mutex_destroy(&ipl->ipl_lock);
4519 4520 kmem_free(ipl, sizeof (*ipl));
4520 4521 }
4521 4522
4522 4523 ipsec_latch_t *
4523 4524 iplatch_create()
4524 4525 {
4525 4526 ipsec_latch_t *ipl = kmem_zalloc(sizeof (*ipl), KM_NOSLEEP);
4526 4527 if (ipl == NULL)
4527 4528 return (ipl);
4528 4529 mutex_init(&ipl->ipl_lock, NULL, MUTEX_DEFAULT, NULL);
4529 4530 ipl->ipl_refcnt = 1;
4530 4531 return (ipl);
4531 4532 }
4532 4533
4533 4534 /*
4534 4535 * Hash function for ID hash table.
4535 4536 */
4536 4537 static uint32_t
4537 4538 ipsid_hash(int idtype, char *idstring)
4538 4539 {
4539 4540 uint32_t hval = idtype;
4540 4541 unsigned char c;
4541 4542
4542 4543 while ((c = *idstring++) != 0) {
4543 4544 hval = (hval << 4) | (hval >> 28);
4544 4545 hval ^= c;
4545 4546 }
4546 4547 hval = hval ^ (hval >> 16);
4547 4548 return (hval & (IPSID_HASHSIZE-1));
4548 4549 }
4549 4550
4550 4551 /*
4551 4552 * Look up identity string in hash table. Return identity object
4552 4553 * corresponding to the name -- either preexisting, or newly allocated.
4553 4554 *
4554 4555 * Return NULL if we need to allocate a new one and can't get memory.
4555 4556 */
4556 4557 ipsid_t *
4557 4558 ipsid_lookup(int idtype, char *idstring, netstack_t *ns)
4558 4559 {
4559 4560 ipsid_t *retval;
4560 4561 char *nstr;
4561 4562 int idlen = strlen(idstring) + 1;
4562 4563 ipsec_stack_t *ipss = ns->netstack_ipsec;
4563 4564 ipsif_t *bucket;
4564 4565
4565 4566 bucket = &ipss->ipsec_ipsid_buckets[ipsid_hash(idtype, idstring)];
4566 4567
4567 4568 mutex_enter(&bucket->ipsif_lock);
4568 4569
4569 4570 for (retval = bucket->ipsif_head; retval != NULL;
4570 4571 retval = retval->ipsid_next) {
4571 4572 if (idtype != retval->ipsid_type)
4572 4573 continue;
4573 4574 if (bcmp(idstring, retval->ipsid_cid, idlen) != 0)
4574 4575 continue;
4575 4576
4576 4577 IPSID_REFHOLD(retval);
4577 4578 mutex_exit(&bucket->ipsif_lock);
4578 4579 return (retval);
4579 4580 }
4580 4581
4581 4582 retval = kmem_alloc(sizeof (*retval), KM_NOSLEEP);
4582 4583 if (!retval) {
4583 4584 mutex_exit(&bucket->ipsif_lock);
4584 4585 return (NULL);
4585 4586 }
4586 4587
4587 4588 nstr = kmem_alloc(idlen, KM_NOSLEEP);
4588 4589 if (!nstr) {
4589 4590 mutex_exit(&bucket->ipsif_lock);
4590 4591 kmem_free(retval, sizeof (*retval));
4591 4592 return (NULL);
4592 4593 }
4593 4594
4594 4595 retval->ipsid_refcnt = 1;
4595 4596 retval->ipsid_next = bucket->ipsif_head;
4596 4597 if (retval->ipsid_next != NULL)
4597 4598 retval->ipsid_next->ipsid_ptpn = &retval->ipsid_next;
4598 4599 retval->ipsid_ptpn = &bucket->ipsif_head;
4599 4600 retval->ipsid_type = idtype;
4600 4601 retval->ipsid_cid = nstr;
4601 4602 bucket->ipsif_head = retval;
4602 4603 bcopy(idstring, nstr, idlen);
4603 4604 mutex_exit(&bucket->ipsif_lock);
4604 4605
4605 4606 return (retval);
4606 4607 }
4607 4608
4608 4609 /*
4609 4610 * Garbage collect the identity hash table.
4610 4611 */
4611 4612 void
4612 4613 ipsid_gc(netstack_t *ns)
4613 4614 {
4614 4615 int i, len;
4615 4616 ipsid_t *id, *nid;
4616 4617 ipsif_t *bucket;
4617 4618 ipsec_stack_t *ipss = ns->netstack_ipsec;
4618 4619
4619 4620 for (i = 0; i < IPSID_HASHSIZE; i++) {
4620 4621 bucket = &ipss->ipsec_ipsid_buckets[i];
4621 4622 mutex_enter(&bucket->ipsif_lock);
4622 4623 for (id = bucket->ipsif_head; id != NULL; id = nid) {
4623 4624 nid = id->ipsid_next;
4624 4625 if (id->ipsid_refcnt == 0) {
4625 4626 *id->ipsid_ptpn = nid;
4626 4627 if (nid != NULL)
4627 4628 nid->ipsid_ptpn = id->ipsid_ptpn;
4628 4629 len = strlen(id->ipsid_cid) + 1;
4629 4630 kmem_free(id->ipsid_cid, len);
4630 4631 kmem_free(id, sizeof (*id));
4631 4632 }
4632 4633 }
4633 4634 mutex_exit(&bucket->ipsif_lock);
4634 4635 }
4635 4636 }
4636 4637
4637 4638 /*
4638 4639 * Return true if two identities are the same.
4639 4640 */
4640 4641 boolean_t
4641 4642 ipsid_equal(ipsid_t *id1, ipsid_t *id2)
4642 4643 {
4643 4644 if (id1 == id2)
4644 4645 return (B_TRUE);
4645 4646 #ifdef DEBUG
4646 4647 if ((id1 == NULL) || (id2 == NULL))
4647 4648 return (B_FALSE);
4648 4649 /*
4649 4650 * test that we're interning id's correctly..
4650 4651 */
4651 4652 ASSERT((strcmp(id1->ipsid_cid, id2->ipsid_cid) != 0) ||
4652 4653 (id1->ipsid_type != id2->ipsid_type));
4653 4654 #endif
4654 4655 return (B_FALSE);
4655 4656 }
4656 4657
4657 4658 /*
4658 4659 * Initialize identity table; called during module initialization.
4659 4660 */
4660 4661 static void
4661 4662 ipsid_init(netstack_t *ns)
4662 4663 {
4663 4664 ipsif_t *bucket;
4664 4665 int i;
4665 4666 ipsec_stack_t *ipss = ns->netstack_ipsec;
4666 4667
4667 4668 for (i = 0; i < IPSID_HASHSIZE; i++) {
4668 4669 bucket = &ipss->ipsec_ipsid_buckets[i];
4669 4670 mutex_init(&bucket->ipsif_lock, NULL, MUTEX_DEFAULT, NULL);
4670 4671 }
4671 4672 }
4672 4673
4673 4674 /*
4674 4675 * Free identity table (preparatory to module unload)
4675 4676 */
4676 4677 static void
4677 4678 ipsid_fini(netstack_t *ns)
4678 4679 {
4679 4680 ipsif_t *bucket;
4680 4681 int i;
|
↓ open down ↓ |
4645 lines elided |
↑ open up ↑ |
4681 4682 ipsec_stack_t *ipss = ns->netstack_ipsec;
4682 4683
4683 4684 for (i = 0; i < IPSID_HASHSIZE; i++) {
4684 4685 bucket = &ipss->ipsec_ipsid_buckets[i];
4685 4686 ASSERT(bucket->ipsif_head == NULL);
4686 4687 mutex_destroy(&bucket->ipsif_lock);
4687 4688 }
4688 4689 }
4689 4690
4690 4691 /*
4691 - * Update the minimum and maximum supported key sizes for the
4692 - * specified algorithm. Must be called while holding the algorithms lock.
4692 + * Update the minimum and maximum supported key sizes for the specified
4693 + * algorithm, which is either a member of a netstack alg array or about to be,
4694 + * and therefore must be called holding ipsec_alg_lock for write.
4693 4695 */
4694 4696 void
4695 4697 ipsec_alg_fix_min_max(ipsec_alginfo_t *alg, ipsec_algtype_t alg_type,
4696 4698 netstack_t *ns)
4697 4699 {
4698 4700 size_t crypto_min = (size_t)-1, crypto_max = 0;
4699 4701 size_t cur_crypto_min, cur_crypto_max;
4700 4702 boolean_t is_valid;
4701 4703 crypto_mechanism_info_t *mech_infos;
4702 4704 uint_t nmech_infos;
4703 4705 int crypto_rc, i;
4704 4706 crypto_mech_usage_t mask;
4705 4707 ipsec_stack_t *ipss = ns->netstack_ipsec;
4706 4708
4707 4709 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
4708 4710
4709 4711 /*
4710 4712 * Compute the min, max, and default key sizes (in number of
4711 4713 * increments to the default key size in bits) as defined
4712 4714 * by the algorithm mappings. This range of key sizes is used
4713 4715 * for policy related operations. The effective key sizes
4714 4716 * supported by the framework could be more limited than
4715 4717 * those defined for an algorithm.
4716 4718 */
4717 4719 alg->alg_default_bits = alg->alg_key_sizes[0];
4718 4720 alg->alg_default = 0;
4719 4721 if (alg->alg_increment != 0) {
4720 4722 /* key sizes are defined by range & increment */
4721 4723 alg->alg_minbits = alg->alg_key_sizes[1];
4722 4724 alg->alg_maxbits = alg->alg_key_sizes[2];
4723 4725 } else if (alg->alg_nkey_sizes == 0) {
4724 4726 /* no specified key size for algorithm */
4725 4727 alg->alg_minbits = alg->alg_maxbits = 0;
4726 4728 } else {
4727 4729 /* key sizes are defined by enumeration */
4728 4730 alg->alg_minbits = (uint16_t)-1;
4729 4731 alg->alg_maxbits = 0;
4730 4732
4731 4733 for (i = 0; i < alg->alg_nkey_sizes; i++) {
4732 4734 if (alg->alg_key_sizes[i] < alg->alg_minbits)
4733 4735 alg->alg_minbits = alg->alg_key_sizes[i];
4734 4736 if (alg->alg_key_sizes[i] > alg->alg_maxbits)
4735 4737 alg->alg_maxbits = alg->alg_key_sizes[i];
4736 4738 }
4737 4739 }
4738 4740
4739 4741 if (!(alg->alg_flags & ALG_FLAG_VALID))
4740 4742 return;
4741 4743
4742 4744 /*
4743 4745 * Mechanisms do not apply to the NULL encryption
4744 4746 * algorithm, so simply return for this case.
4745 4747 */
4746 4748 if (alg->alg_id == SADB_EALG_NULL)
4747 4749 return;
4748 4750
4749 4751 /*
4750 4752 * Find the min and max key sizes supported by the cryptographic
4751 4753 * framework providers.
4752 4754 */
4753 4755
4754 4756 /* get the key sizes supported by the framework */
4755 4757 crypto_rc = crypto_get_all_mech_info(alg->alg_mech_type,
4756 4758 &mech_infos, &nmech_infos, KM_SLEEP);
4757 4759 if (crypto_rc != CRYPTO_SUCCESS || nmech_infos == 0) {
4758 4760 alg->alg_flags &= ~ALG_FLAG_VALID;
4759 4761 return;
4760 4762 }
4761 4763
4762 4764 /* min and max key sizes supported by framework */
4763 4765 for (i = 0, is_valid = B_FALSE; i < nmech_infos; i++) {
4764 4766 int unit_bits;
4765 4767
4766 4768 /*
4767 4769 * Ignore entries that do not support the operations
4768 4770 * needed for the algorithm type.
4769 4771 */
4770 4772 if (alg_type == IPSEC_ALG_AUTH) {
4771 4773 mask = CRYPTO_MECH_USAGE_MAC;
4772 4774 } else {
4773 4775 mask = CRYPTO_MECH_USAGE_ENCRYPT |
4774 4776 CRYPTO_MECH_USAGE_DECRYPT;
4775 4777 }
4776 4778 if ((mech_infos[i].mi_usage & mask) != mask)
4777 4779 continue;
4778 4780
4779 4781 unit_bits = (mech_infos[i].mi_keysize_unit ==
4780 4782 CRYPTO_KEYSIZE_UNIT_IN_BYTES) ? 8 : 1;
4781 4783 /* adjust min/max supported by framework */
4782 4784 cur_crypto_min = mech_infos[i].mi_min_key_size * unit_bits;
4783 4785 cur_crypto_max = mech_infos[i].mi_max_key_size * unit_bits;
4784 4786
4785 4787 if (cur_crypto_min < crypto_min)
4786 4788 crypto_min = cur_crypto_min;
4787 4789
4788 4790 /*
4789 4791 * CRYPTO_EFFECTIVELY_INFINITE is a special value of
4790 4792 * the crypto framework which means "no upper limit".
4791 4793 */
4792 4794 if (mech_infos[i].mi_max_key_size ==
4793 4795 CRYPTO_EFFECTIVELY_INFINITE) {
4794 4796 crypto_max = (size_t)-1;
4795 4797 } else if (cur_crypto_max > crypto_max) {
4796 4798 crypto_max = cur_crypto_max;
4797 4799 }
4798 4800
4799 4801 is_valid = B_TRUE;
4800 4802 }
4801 4803
4802 4804 kmem_free(mech_infos, sizeof (crypto_mechanism_info_t) *
4803 4805 nmech_infos);
4804 4806
4805 4807 if (!is_valid) {
4806 4808 /* no key sizes supported by framework */
4807 4809 alg->alg_flags &= ~ALG_FLAG_VALID;
4808 4810 return;
4809 4811 }
4810 4812
4811 4813 /*
4812 4814 * Determine min and max key sizes from alg_key_sizes[].
4813 4815 * defined for the algorithm entry. Adjust key sizes based on
4814 4816 * those supported by the framework.
4815 4817 */
4816 4818 alg->alg_ef_default_bits = alg->alg_key_sizes[0];
4817 4819
4818 4820 /*
4819 4821 * For backwards compatability, assume that the IV length
4820 4822 * is the same as the data length.
4821 4823 */
4822 4824 alg->alg_ivlen = alg->alg_datalen;
4823 4825
4824 4826 /*
4825 4827 * Copy any algorithm parameters (if provided) into dedicated
4826 4828 * elements in the ipsec_alginfo_t structure.
4827 4829 * There may be a better place to put this code.
4828 4830 */
4829 4831 for (i = 0; i < alg->alg_nparams; i++) {
4830 4832 switch (i) {
4831 4833 case 0:
4832 4834 /* Initialisation Vector length (bytes) */
4833 4835 alg->alg_ivlen = alg->alg_params[0];
4834 4836 break;
4835 4837 case 1:
4836 4838 /* Integrity Check Vector length (bytes) */
4837 4839 alg->alg_icvlen = alg->alg_params[1];
4838 4840 break;
4839 4841 case 2:
4840 4842 /* Salt length (bytes) */
4841 4843 alg->alg_saltlen = (uint8_t)alg->alg_params[2];
4842 4844 break;
4843 4845 default:
4844 4846 break;
4845 4847 }
4846 4848 }
4847 4849
4848 4850 /* Default if the IV length is not specified. */
4849 4851 if (alg_type == IPSEC_ALG_ENCR && alg->alg_ivlen == 0)
4850 4852 alg->alg_ivlen = alg->alg_datalen;
4851 4853
4852 4854 alg_flag_check(alg);
4853 4855
4854 4856 if (alg->alg_increment != 0) {
4855 4857 /* supported key sizes are defined by range & increment */
4856 4858 crypto_min = ALGBITS_ROUND_UP(crypto_min, alg->alg_increment);
4857 4859 crypto_max = ALGBITS_ROUND_DOWN(crypto_max, alg->alg_increment);
4858 4860
4859 4861 alg->alg_ef_minbits = MAX(alg->alg_minbits,
4860 4862 (uint16_t)crypto_min);
4861 4863 alg->alg_ef_maxbits = MIN(alg->alg_maxbits,
4862 4864 (uint16_t)crypto_max);
4863 4865
4864 4866 /*
4865 4867 * If the sizes supported by the framework are outside
4866 4868 * the range of sizes defined by the algorithm mappings,
4867 4869 * the algorithm cannot be used. Check for this
4868 4870 * condition here.
4869 4871 */
4870 4872 if (alg->alg_ef_minbits > alg->alg_ef_maxbits) {
4871 4873 alg->alg_flags &= ~ALG_FLAG_VALID;
4872 4874 return;
4873 4875 }
4874 4876 if (alg->alg_ef_default_bits < alg->alg_ef_minbits)
4875 4877 alg->alg_ef_default_bits = alg->alg_ef_minbits;
4876 4878 if (alg->alg_ef_default_bits > alg->alg_ef_maxbits)
4877 4879 alg->alg_ef_default_bits = alg->alg_ef_maxbits;
4878 4880 } else if (alg->alg_nkey_sizes == 0) {
4879 4881 /* no specified key size for algorithm */
4880 4882 alg->alg_ef_minbits = alg->alg_ef_maxbits = 0;
4881 4883 } else {
4882 4884 /* supported key sizes are defined by enumeration */
4883 4885 alg->alg_ef_minbits = (uint16_t)-1;
4884 4886 alg->alg_ef_maxbits = 0;
4885 4887
4886 4888 for (i = 0, is_valid = B_FALSE; i < alg->alg_nkey_sizes; i++) {
4887 4889 /*
4888 4890 * Ignore the current key size if it is not in the
4889 4891 * range of sizes supported by the framework.
4890 4892 */
4891 4893 if (alg->alg_key_sizes[i] < crypto_min ||
4892 4894 alg->alg_key_sizes[i] > crypto_max)
4893 4895 continue;
4894 4896 if (alg->alg_key_sizes[i] < alg->alg_ef_minbits)
4895 4897 alg->alg_ef_minbits = alg->alg_key_sizes[i];
4896 4898 if (alg->alg_key_sizes[i] > alg->alg_ef_maxbits)
4897 4899 alg->alg_ef_maxbits = alg->alg_key_sizes[i];
4898 4900 is_valid = B_TRUE;
4899 4901 }
4900 4902
4901 4903 if (!is_valid) {
4902 4904 alg->alg_flags &= ~ALG_FLAG_VALID;
4903 4905 return;
4904 4906 }
4905 4907 alg->alg_ef_default = 0;
4906 4908 }
4907 4909 }
4908 4910
4909 4911 /*
4910 4912 * Sanity check parameters provided by ipsecalgs(1m). Assume that
4911 4913 * the algoritm is marked as valid, there is a check at the top
4912 4914 * of this function. If any of the checks below fail, the algorithm
4913 4915 * entry is invalid.
4914 4916 */
4915 4917 void
4916 4918 alg_flag_check(ipsec_alginfo_t *alg)
4917 4919 {
4918 4920 alg->alg_flags &= ~ALG_FLAG_VALID;
4919 4921
4920 4922 /*
4921 4923 * Can't have the algorithm marked as CCM and GCM.
4922 4924 * Check the ALG_FLAG_COMBINED and ALG_FLAG_COUNTERMODE
4923 4925 * flags are set for CCM & GCM.
4924 4926 */
4925 4927 if ((alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) ==
4926 4928 (ALG_FLAG_CCM|ALG_FLAG_GCM))
4927 4929 return;
4928 4930 if (alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) {
4929 4931 if (!(alg->alg_flags & ALG_FLAG_COUNTERMODE))
4930 4932 return;
4931 4933 if (!(alg->alg_flags & ALG_FLAG_COMBINED))
4932 4934 return;
4933 4935 }
4934 4936
4935 4937 /*
4936 4938 * For ALG_FLAG_COUNTERMODE, check the parameters
4937 4939 * fit in the ipsec_nonce_t structure.
4938 4940 */
4939 4941 if (alg->alg_flags & ALG_FLAG_COUNTERMODE) {
4940 4942 if (alg->alg_ivlen != sizeof (((ipsec_nonce_t *)NULL)->iv))
4941 4943 return;
4942 4944 if (alg->alg_saltlen > sizeof (((ipsec_nonce_t *)NULL)->salt))
4943 4945 return;
4944 4946 }
4945 4947 if ((alg->alg_flags & ALG_FLAG_COMBINED) &&
4946 4948 (alg->alg_icvlen == 0))
4947 4949 return;
4948 4950
4949 4951 /* all is well. */
4950 4952 alg->alg_flags |= ALG_FLAG_VALID;
4951 4953 }
4952 4954
4953 4955 /*
4954 4956 * Free the memory used by the specified algorithm.
4955 4957 */
4956 4958 void
4957 4959 ipsec_alg_free(ipsec_alginfo_t *alg)
4958 4960 {
4959 4961 if (alg == NULL)
4960 4962 return;
4961 4963
4962 4964 if (alg->alg_key_sizes != NULL) {
4963 4965 kmem_free(alg->alg_key_sizes,
4964 4966 (alg->alg_nkey_sizes + 1) * sizeof (uint16_t));
4965 4967 alg->alg_key_sizes = NULL;
4966 4968 }
4967 4969 if (alg->alg_block_sizes != NULL) {
4968 4970 kmem_free(alg->alg_block_sizes,
4969 4971 (alg->alg_nblock_sizes + 1) * sizeof (uint16_t));
4970 4972 alg->alg_block_sizes = NULL;
4971 4973 }
4972 4974 if (alg->alg_params != NULL) {
4973 4975 kmem_free(alg->alg_params,
4974 4976 (alg->alg_nparams + 1) * sizeof (uint16_t));
4975 4977 alg->alg_params = NULL;
4976 4978 }
4977 4979 kmem_free(alg, sizeof (*alg));
4978 4980 }
4979 4981
4980 4982 /*
4981 4983 * Check the validity of the specified key size for an algorithm.
4982 4984 * Returns B_TRUE if key size is valid, B_FALSE otherwise.
4983 4985 */
4984 4986 boolean_t
4985 4987 ipsec_valid_key_size(uint16_t key_size, ipsec_alginfo_t *alg)
4986 4988 {
4987 4989 if (key_size < alg->alg_ef_minbits || key_size > alg->alg_ef_maxbits)
4988 4990 return (B_FALSE);
4989 4991
4990 4992 if (alg->alg_increment == 0 && alg->alg_nkey_sizes != 0) {
4991 4993 /*
4992 4994 * If the key sizes are defined by enumeration, the new
4993 4995 * key size must be equal to one of the supported values.
4994 4996 */
4995 4997 int i;
4996 4998
4997 4999 for (i = 0; i < alg->alg_nkey_sizes; i++)
4998 5000 if (key_size == alg->alg_key_sizes[i])
4999 5001 break;
5000 5002 if (i == alg->alg_nkey_sizes)
5001 5003 return (B_FALSE);
5002 5004 }
5003 5005
5004 5006 return (B_TRUE);
5005 5007 }
5006 5008
5007 5009 /*
5008 5010 * Callback function invoked by the crypto framework when a provider
5009 5011 * registers or unregisters. This callback updates the algorithms
5010 5012 * tables when a crypto algorithm is no longer available or becomes
5011 5013 * available, and triggers the freeing/creation of context templates
5012 5014 * associated with existing SAs, if needed.
5013 5015 *
5014 5016 * Need to walk all stack instances since the callback is global
5015 5017 * for all instances
5016 5018 */
5017 5019 void
5018 5020 ipsec_prov_update_callback(uint32_t event, void *event_arg)
5019 5021 {
5020 5022 netstack_handle_t nh;
5021 5023 netstack_t *ns;
5022 5024
5023 5025 netstack_next_init(&nh);
5024 5026 while ((ns = netstack_next(&nh)) != NULL) {
5025 5027 ipsec_prov_update_callback_stack(event, event_arg, ns);
5026 5028 netstack_rele(ns);
5027 5029 }
5028 5030 netstack_next_fini(&nh);
5029 5031 }
5030 5032
5031 5033 static void
5032 5034 ipsec_prov_update_callback_stack(uint32_t event, void *event_arg,
5033 5035 netstack_t *ns)
5034 5036 {
5035 5037 crypto_notify_event_change_t *prov_change =
5036 5038 (crypto_notify_event_change_t *)event_arg;
5037 5039 uint_t algidx, algid, algtype, mech_count, mech_idx;
5038 5040 ipsec_alginfo_t *alg;
5039 5041 ipsec_alginfo_t oalg;
5040 5042 crypto_mech_name_t *mechs;
5041 5043 boolean_t alg_changed = B_FALSE;
5042 5044 ipsec_stack_t *ipss = ns->netstack_ipsec;
5043 5045
5044 5046 /* ignore events for which we didn't register */
5045 5047 if (event != CRYPTO_EVENT_MECHS_CHANGED) {
5046 5048 ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x "
5047 5049 " received from crypto framework\n", event));
5048 5050 return;
5049 5051 }
5050 5052
5051 5053 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
5052 5054 if (mechs == NULL)
5053 5055 return;
5054 5056
5055 5057 /*
5056 5058 * Walk the list of currently defined IPsec algorithm. Update
5057 5059 * the algorithm valid flag and trigger an update of the
5058 5060 * SAs that depend on that algorithm.
5059 5061 */
5060 5062 rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
5061 5063 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
5062 5064 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
5063 5065 algidx++) {
5064 5066
5065 5067 algid = ipss->ipsec_sortlist[algtype][algidx];
5066 5068 alg = ipss->ipsec_alglists[algtype][algid];
5067 5069 ASSERT(alg != NULL);
5068 5070
5069 5071 /*
5070 5072 * Skip the algorithms which do not map to the
5071 5073 * crypto framework provider being added or removed.
5072 5074 */
5073 5075 if (strncmp(alg->alg_mech_name,
5074 5076 prov_change->ec_mech_name,
5075 5077 CRYPTO_MAX_MECH_NAME) != 0)
5076 5078 continue;
5077 5079
5078 5080 /*
5079 5081 * Determine if the mechanism is valid. If it
5080 5082 * is not, mark the algorithm as being invalid. If
5081 5083 * it is, mark the algorithm as being valid.
5082 5084 */
5083 5085 for (mech_idx = 0; mech_idx < mech_count; mech_idx++)
5084 5086 if (strncmp(alg->alg_mech_name,
5085 5087 mechs[mech_idx], CRYPTO_MAX_MECH_NAME) == 0)
5086 5088 break;
5087 5089 if (mech_idx == mech_count &&
5088 5090 alg->alg_flags & ALG_FLAG_VALID) {
5089 5091 alg->alg_flags &= ~ALG_FLAG_VALID;
5090 5092 alg_changed = B_TRUE;
5091 5093 } else if (mech_idx < mech_count &&
5092 5094 !(alg->alg_flags & ALG_FLAG_VALID)) {
5093 5095 alg->alg_flags |= ALG_FLAG_VALID;
5094 5096 alg_changed = B_TRUE;
5095 5097 }
5096 5098
5097 5099 /*
5098 5100 * Update the supported key sizes, regardless
5099 5101 * of whether a crypto provider was added or
5100 5102 * removed.
5101 5103 */
5102 5104 oalg = *alg;
5103 5105 ipsec_alg_fix_min_max(alg, algtype, ns);
5104 5106 if (!alg_changed &&
5105 5107 alg->alg_ef_minbits != oalg.alg_ef_minbits ||
5106 5108 alg->alg_ef_maxbits != oalg.alg_ef_maxbits ||
5107 5109 alg->alg_ef_default != oalg.alg_ef_default ||
5108 5110 alg->alg_ef_default_bits !=
5109 5111 oalg.alg_ef_default_bits)
5110 5112 alg_changed = B_TRUE;
5111 5113
5112 5114 /*
5113 5115 * Update the affected SAs if a software provider is
5114 5116 * being added or removed.
5115 5117 */
5116 5118 if (prov_change->ec_provider_type ==
5117 5119 CRYPTO_SW_PROVIDER)
5118 5120 sadb_alg_update(algtype, alg->alg_id,
5119 5121 prov_change->ec_change ==
5120 5122 CRYPTO_MECH_ADDED, ns);
5121 5123 }
5122 5124 }
5123 5125 rw_exit(&ipss->ipsec_alg_lock);
5124 5126 crypto_free_mech_list(mechs, mech_count);
5125 5127
5126 5128 if (alg_changed) {
5127 5129 /*
5128 5130 * An algorithm has changed, i.e. it became valid or
5129 5131 * invalid, or its support key sizes have changed.
5130 5132 * Notify ipsecah and ipsecesp of this change so
5131 5133 * that they can send a SADB_REGISTER to their consumers.
5132 5134 */
5133 5135 ipsecah_algs_changed(ns);
5134 5136 ipsecesp_algs_changed(ns);
5135 5137 }
5136 5138 }
5137 5139
5138 5140 /*
5139 5141 * Registers with the crypto framework to be notified of crypto
5140 5142 * providers changes. Used to update the algorithm tables and
5141 5143 * to free or create context templates if needed. Invoked after IPsec
5142 5144 * is loaded successfully.
5143 5145 *
5144 5146 * This is called separately for each IP instance, so we ensure we only
5145 5147 * register once.
5146 5148 */
5147 5149 void
5148 5150 ipsec_register_prov_update(void)
5149 5151 {
5150 5152 if (prov_update_handle != NULL)
5151 5153 return;
5152 5154
5153 5155 prov_update_handle = crypto_notify_events(
5154 5156 ipsec_prov_update_callback, CRYPTO_EVENT_MECHS_CHANGED);
5155 5157 }
5156 5158
5157 5159 /*
5158 5160 * Unregisters from the framework to be notified of crypto providers
5159 5161 * changes. Called from ipsec_policy_g_destroy().
5160 5162 */
5161 5163 static void
5162 5164 ipsec_unregister_prov_update(void)
5163 5165 {
5164 5166 if (prov_update_handle != NULL)
5165 5167 crypto_unnotify_events(prov_update_handle);
5166 5168 }
5167 5169
5168 5170 /*
5169 5171 * Tunnel-mode support routines.
5170 5172 */
5171 5173
5172 5174 /*
5173 5175 * Returns an mblk chain suitable for putnext() if policies match and IPsec
5174 5176 * SAs are available. If there's no per-tunnel policy, or a match comes back
5175 5177 * with no match, then still return the packet and have global policy take
5176 5178 * a crack at it in IP.
5177 5179 * This updates the ip_xmit_attr with the IPsec policy.
5178 5180 *
5179 5181 * Remember -> we can be forwarding packets. Keep that in mind w.r.t.
5180 5182 * inner-packet contents.
5181 5183 */
5182 5184 mblk_t *
5183 5185 ipsec_tun_outbound(mblk_t *mp, iptun_t *iptun, ipha_t *inner_ipv4,
5184 5186 ip6_t *inner_ipv6, ipha_t *outer_ipv4, ip6_t *outer_ipv6, int outer_hdr_len,
5185 5187 ip_xmit_attr_t *ixa)
5186 5188 {
5187 5189 ipsec_policy_head_t *polhead;
5188 5190 ipsec_selector_t sel;
5189 5191 mblk_t *nmp;
5190 5192 boolean_t is_fragment;
5191 5193 ipsec_policy_t *pol;
5192 5194 ipsec_tun_pol_t *itp = iptun->iptun_itp;
5193 5195 netstack_t *ns = iptun->iptun_ns;
5194 5196 ipsec_stack_t *ipss = ns->netstack_ipsec;
5195 5197
5196 5198 ASSERT(outer_ipv6 != NULL && outer_ipv4 == NULL ||
5197 5199 outer_ipv4 != NULL && outer_ipv6 == NULL);
5198 5200 /* We take care of inners in a bit. */
5199 5201
5200 5202 /* Are the IPsec fields initialized at all? */
5201 5203 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE)) {
5202 5204 ASSERT(ixa->ixa_ipsec_policy == NULL);
5203 5205 ASSERT(ixa->ixa_ipsec_latch == NULL);
5204 5206 ASSERT(ixa->ixa_ipsec_action == NULL);
5205 5207 ASSERT(ixa->ixa_ipsec_ah_sa == NULL);
5206 5208 ASSERT(ixa->ixa_ipsec_esp_sa == NULL);
5207 5209 }
5208 5210
5209 5211 ASSERT(itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE));
5210 5212 polhead = itp->itp_policy;
5211 5213
5212 5214 bzero(&sel, sizeof (sel));
5213 5215 if (inner_ipv4 != NULL) {
5214 5216 ASSERT(inner_ipv6 == NULL);
5215 5217 sel.ips_isv4 = B_TRUE;
5216 5218 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5217 5219 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5218 5220 sel.ips_protocol = (uint8_t)inner_ipv4->ipha_protocol;
5219 5221 } else {
5220 5222 ASSERT(inner_ipv6 != NULL);
5221 5223 sel.ips_isv4 = B_FALSE;
5222 5224 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5223 5225 /*
5224 5226 * We don't care about routing-header dests in the
5225 5227 * forwarding/tunnel path, so just grab ip6_dst.
5226 5228 */
5227 5229 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5228 5230 }
5229 5231
5230 5232 if (itp->itp_flags & ITPF_P_PER_PORT_SECURITY) {
5231 5233 /*
5232 5234 * Caller can prepend the outer header, which means
5233 5235 * inner_ipv[46] may be stuck in the middle. Pullup the whole
5234 5236 * mess now if need-be, for easier processing later. Don't
5235 5237 * forget to rewire the outer header too.
5236 5238 */
5237 5239 if (mp->b_cont != NULL) {
5238 5240 nmp = msgpullup(mp, -1);
5239 5241 if (nmp == NULL) {
5240 5242 ip_drop_packet(mp, B_FALSE, NULL,
5241 5243 DROPPER(ipss, ipds_spd_nomem),
5242 5244 &ipss->ipsec_spd_dropper);
5243 5245 return (NULL);
5244 5246 }
5245 5247 freemsg(mp);
5246 5248 mp = nmp;
5247 5249 if (outer_ipv4 != NULL)
5248 5250 outer_ipv4 = (ipha_t *)mp->b_rptr;
5249 5251 else
5250 5252 outer_ipv6 = (ip6_t *)mp->b_rptr;
5251 5253 if (inner_ipv4 != NULL) {
5252 5254 inner_ipv4 =
5253 5255 (ipha_t *)(mp->b_rptr + outer_hdr_len);
5254 5256 } else {
5255 5257 inner_ipv6 =
5256 5258 (ip6_t *)(mp->b_rptr + outer_hdr_len);
5257 5259 }
5258 5260 }
5259 5261 if (inner_ipv4 != NULL) {
5260 5262 is_fragment = IS_V4_FRAGMENT(
5261 5263 inner_ipv4->ipha_fragment_offset_and_flags);
5262 5264 } else {
5263 5265 sel.ips_remote_addr_v6 = ip_get_dst_v6(inner_ipv6, mp,
5264 5266 &is_fragment);
5265 5267 }
5266 5268
5267 5269 if (is_fragment) {
5268 5270 ipha_t *oiph;
5269 5271 ipha_t *iph = NULL;
5270 5272 ip6_t *ip6h = NULL;
5271 5273 int hdr_len;
5272 5274 uint16_t ip6_hdr_length;
5273 5275 uint8_t v6_proto;
5274 5276 uint8_t *v6_proto_p;
5275 5277
5276 5278 /*
5277 5279 * We have a fragment we need to track!
5278 5280 */
5279 5281 mp = ipsec_fragcache_add(&itp->itp_fragcache, NULL, mp,
5280 5282 outer_hdr_len, ipss);
5281 5283 if (mp == NULL)
5282 5284 return (NULL);
5283 5285 ASSERT(mp->b_cont == NULL);
5284 5286
5285 5287 /*
5286 5288 * If we get here, we have a full fragment chain
5287 5289 */
5288 5290
5289 5291 oiph = (ipha_t *)mp->b_rptr;
5290 5292 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
5291 5293 hdr_len = ((outer_hdr_len != 0) ?
5292 5294 IPH_HDR_LENGTH(oiph) : 0);
5293 5295 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5294 5296 } else {
5295 5297 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
5296 5298 ip6h = (ip6_t *)mp->b_rptr;
5297 5299 if (!ip_hdr_length_nexthdr_v6(mp, ip6h,
5298 5300 &ip6_hdr_length, &v6_proto_p)) {
5299 5301 ip_drop_packet_chain(mp, B_FALSE, NULL,
5300 5302 DROPPER(ipss,
5301 5303 ipds_spd_malformed_packet),
5302 5304 &ipss->ipsec_spd_dropper);
5303 5305 return (NULL);
5304 5306 }
5305 5307 hdr_len = ip6_hdr_length;
5306 5308 }
5307 5309 outer_hdr_len = hdr_len;
5308 5310
5309 5311 if (sel.ips_isv4) {
5310 5312 if (iph == NULL) {
5311 5313 /* Was v6 outer */
5312 5314 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5313 5315 }
5314 5316 inner_ipv4 = iph;
5315 5317 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5316 5318 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5317 5319 sel.ips_protocol =
5318 5320 (uint8_t)inner_ipv4->ipha_protocol;
5319 5321 } else {
5320 5322 inner_ipv6 = (ip6_t *)(mp->b_rptr +
5321 5323 hdr_len);
5322 5324 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5323 5325 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5324 5326 if (!ip_hdr_length_nexthdr_v6(mp,
5325 5327 inner_ipv6, &ip6_hdr_length, &v6_proto_p)) {
5326 5328 ip_drop_packet_chain(mp, B_FALSE, NULL,
5327 5329 DROPPER(ipss,
5328 5330 ipds_spd_malformed_frag),
5329 5331 &ipss->ipsec_spd_dropper);
5330 5332 return (NULL);
5331 5333 }
5332 5334 v6_proto = *v6_proto_p;
5333 5335 sel.ips_protocol = v6_proto;
5334 5336 #ifdef FRAGCACHE_DEBUG
5335 5337 cmn_err(CE_WARN, "v6_sel.ips_protocol = %d\n",
5336 5338 sel.ips_protocol);
5337 5339 #endif
5338 5340 }
5339 5341 /* Ports are extracted below */
5340 5342 }
5341 5343
5342 5344 /* Get ports... */
5343 5345 if (!ipsec_init_outbound_ports(&sel, mp,
5344 5346 inner_ipv4, inner_ipv6, outer_hdr_len, ipss)) {
5345 5347 /* callee did ip_drop_packet_chain() on mp. */
5346 5348 return (NULL);
5347 5349 }
5348 5350 #ifdef FRAGCACHE_DEBUG
5349 5351 if (inner_ipv4 != NULL)
5350 5352 cmn_err(CE_WARN,
5351 5353 "(v4) sel.ips_protocol = %d, "
5352 5354 "sel.ips_local_port = %d, "
5353 5355 "sel.ips_remote_port = %d\n",
5354 5356 sel.ips_protocol, ntohs(sel.ips_local_port),
5355 5357 ntohs(sel.ips_remote_port));
5356 5358 if (inner_ipv6 != NULL)
5357 5359 cmn_err(CE_WARN,
5358 5360 "(v6) sel.ips_protocol = %d, "
5359 5361 "sel.ips_local_port = %d, "
5360 5362 "sel.ips_remote_port = %d\n",
5361 5363 sel.ips_protocol, ntohs(sel.ips_local_port),
5362 5364 ntohs(sel.ips_remote_port));
5363 5365 #endif
5364 5366 /* Success so far! */
5365 5367 }
5366 5368 rw_enter(&polhead->iph_lock, RW_READER);
5367 5369 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_OUTBOUND, &sel);
5368 5370 rw_exit(&polhead->iph_lock);
5369 5371 if (pol == NULL) {
5370 5372 /*
5371 5373 * No matching policy on this tunnel, drop the packet.
5372 5374 *
5373 5375 * NOTE: Tunnel-mode tunnels are different from the
5374 5376 * IP global transport mode policy head. For a tunnel-mode
5375 5377 * tunnel, we drop the packet in lieu of passing it
5376 5378 * along accepted the way a global-policy miss would.
5377 5379 *
5378 5380 * NOTE2: "negotiate transport" tunnels should match ALL
5379 5381 * inbound packets, but we do not uncomment the ASSERT()
5380 5382 * below because if/when we open PF_POLICY, a user can
5381 5383 * shoot themself in the foot with a 0 priority.
5382 5384 */
5383 5385
5384 5386 /* ASSERT(itp->itp_flags & ITPF_P_TUNNEL); */
5385 5387 #ifdef FRAGCACHE_DEBUG
5386 5388 cmn_err(CE_WARN, "ipsec_tun_outbound(): No matching tunnel "
5387 5389 "per-port policy\n");
5388 5390 #endif
5389 5391 ip_drop_packet_chain(mp, B_FALSE, NULL,
5390 5392 DROPPER(ipss, ipds_spd_explicit),
5391 5393 &ipss->ipsec_spd_dropper);
5392 5394 return (NULL);
5393 5395 }
5394 5396
5395 5397 #ifdef FRAGCACHE_DEBUG
5396 5398 cmn_err(CE_WARN, "Having matching tunnel per-port policy\n");
5397 5399 #endif
5398 5400
5399 5401 /*
5400 5402 * NOTE: ixa_cleanup() function will release pol references.
5401 5403 */
5402 5404 ixa->ixa_ipsec_policy = pol;
5403 5405 /*
5404 5406 * NOTE: There is a subtle difference between iptun_zoneid and
5405 5407 * iptun_connp->conn_zoneid explained in iptun_conn_create(). When
5406 5408 * interacting with the ip module, we must use conn_zoneid.
5407 5409 */
5408 5410 ixa->ixa_zoneid = iptun->iptun_connp->conn_zoneid;
5409 5411
5410 5412 ASSERT((outer_ipv4 != NULL) ? (ixa->ixa_flags & IXAF_IS_IPV4) :
5411 5413 !(ixa->ixa_flags & IXAF_IS_IPV4));
5412 5414 ASSERT(ixa->ixa_ipsec_policy != NULL);
5413 5415 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
5414 5416
5415 5417 if (!(itp->itp_flags & ITPF_P_TUNNEL)) {
5416 5418 /* Set up transport mode for tunnelled packets. */
5417 5419 ixa->ixa_ipsec_proto = (inner_ipv4 != NULL) ? IPPROTO_ENCAP :
5418 5420 IPPROTO_IPV6;
5419 5421 return (mp);
5420 5422 }
5421 5423
5422 5424 /* Fill in tunnel-mode goodies here. */
5423 5425 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
5424 5426 /* XXX Do I need to fill in all of the goodies here? */
5425 5427 if (inner_ipv4) {
5426 5428 ixa->ixa_ipsec_inaf = AF_INET;
5427 5429 ixa->ixa_ipsec_insrc[0] =
5428 5430 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v4;
5429 5431 ixa->ixa_ipsec_indst[0] =
5430 5432 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v4;
5431 5433 } else {
5432 5434 ixa->ixa_ipsec_inaf = AF_INET6;
5433 5435 ixa->ixa_ipsec_insrc[0] =
5434 5436 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[0];
5435 5437 ixa->ixa_ipsec_insrc[1] =
5436 5438 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[1];
5437 5439 ixa->ixa_ipsec_insrc[2] =
5438 5440 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[2];
5439 5441 ixa->ixa_ipsec_insrc[3] =
5440 5442 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[3];
5441 5443 ixa->ixa_ipsec_indst[0] =
5442 5444 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[0];
5443 5445 ixa->ixa_ipsec_indst[1] =
5444 5446 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[1];
5445 5447 ixa->ixa_ipsec_indst[2] =
5446 5448 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[2];
5447 5449 ixa->ixa_ipsec_indst[3] =
5448 5450 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[3];
5449 5451 }
5450 5452 ixa->ixa_ipsec_insrcpfx = pol->ipsp_sel->ipsl_key.ipsl_local_pfxlen;
5451 5453 ixa->ixa_ipsec_indstpfx = pol->ipsp_sel->ipsl_key.ipsl_remote_pfxlen;
5452 5454 /* NOTE: These are used for transport mode too. */
5453 5455 ixa->ixa_ipsec_src_port = pol->ipsp_sel->ipsl_key.ipsl_lport;
5454 5456 ixa->ixa_ipsec_dst_port = pol->ipsp_sel->ipsl_key.ipsl_rport;
5455 5457 ixa->ixa_ipsec_proto = pol->ipsp_sel->ipsl_key.ipsl_proto;
5456 5458
5457 5459 return (mp);
5458 5460 }
5459 5461
5460 5462 /*
5461 5463 * NOTE: The following releases pol's reference and
5462 5464 * calls ip_drop_packet() for me on NULL returns.
5463 5465 */
5464 5466 mblk_t *
5465 5467 ipsec_check_ipsecin_policy_reasm(mblk_t *attr_mp, ipsec_policy_t *pol,
5466 5468 ipha_t *inner_ipv4, ip6_t *inner_ipv6, uint64_t pkt_unique, netstack_t *ns)
5467 5469 {
5468 5470 /* Assume attr_mp is a chain of b_next-linked ip_recv_attr mblk. */
5469 5471 mblk_t *data_chain = NULL, *data_tail = NULL;
5470 5472 mblk_t *next;
5471 5473 mblk_t *data_mp;
5472 5474 ip_recv_attr_t iras;
5473 5475
5474 5476 while (attr_mp != NULL) {
5475 5477 ASSERT(ip_recv_attr_is_mblk(attr_mp));
5476 5478 next = attr_mp->b_next;
5477 5479 attr_mp->b_next = NULL; /* No tripping asserts. */
5478 5480
5479 5481 data_mp = attr_mp->b_cont;
5480 5482 attr_mp->b_cont = NULL;
5481 5483 if (!ip_recv_attr_from_mblk(attr_mp, &iras)) {
5482 5484 /* The ill or ip_stack_t disappeared on us */
5483 5485 freemsg(data_mp); /* ip_drop_packet?? */
5484 5486 ira_cleanup(&iras, B_TRUE);
5485 5487 goto fail;
5486 5488 }
5487 5489
5488 5490 /*
5489 5491 * Need IPPOL_REFHOLD(pol) for extras because
5490 5492 * ipsecin_policy does the refrele.
5491 5493 */
5492 5494 IPPOL_REFHOLD(pol);
5493 5495
5494 5496 data_mp = ipsec_check_ipsecin_policy(data_mp, pol, inner_ipv4,
5495 5497 inner_ipv6, pkt_unique, &iras, ns);
5496 5498 ira_cleanup(&iras, B_TRUE);
5497 5499
5498 5500 if (data_mp == NULL)
5499 5501 goto fail;
5500 5502
5501 5503 if (data_tail == NULL) {
5502 5504 /* First one */
5503 5505 data_chain = data_tail = data_mp;
5504 5506 } else {
5505 5507 data_tail->b_next = data_mp;
5506 5508 data_tail = data_mp;
5507 5509 }
5508 5510 attr_mp = next;
5509 5511 }
5510 5512 /*
5511 5513 * One last release because either the loop bumped it up, or we never
5512 5514 * called ipsec_check_ipsecin_policy().
5513 5515 */
5514 5516 IPPOL_REFRELE(pol);
5515 5517
5516 5518 /* data_chain is ready for return to tun module. */
5517 5519 return (data_chain);
5518 5520
5519 5521 fail:
5520 5522 /*
5521 5523 * Need to get rid of any extra pol
5522 5524 * references, and any remaining bits as well.
5523 5525 */
5524 5526 IPPOL_REFRELE(pol);
5525 5527 ipsec_freemsg_chain(data_chain);
5526 5528 ipsec_freemsg_chain(next); /* ipdrop stats? */
5527 5529 return (NULL);
5528 5530 }
5529 5531
5530 5532 /*
5531 5533 * Return a message if the inbound packet passed an IPsec policy check. Returns
5532 5534 * NULL if it failed or if it is a fragment needing its friends before a
5533 5535 * policy check can be performed.
5534 5536 *
5535 5537 * Expects a non-NULL data_mp, and a non-NULL polhead.
5536 5538 * The returned mblk may be a b_next chain of packets if fragments
5537 5539 * neeeded to be collected for a proper policy check.
5538 5540 *
5539 5541 * This function calls ip_drop_packet() on data_mp if need be.
5540 5542 *
5541 5543 * NOTE: outer_hdr_len is signed. If it's a negative value, the caller
5542 5544 * is inspecting an ICMP packet.
5543 5545 */
5544 5546 mblk_t *
5545 5547 ipsec_tun_inbound(ip_recv_attr_t *ira, mblk_t *data_mp, ipsec_tun_pol_t *itp,
5546 5548 ipha_t *inner_ipv4, ip6_t *inner_ipv6, ipha_t *outer_ipv4,
5547 5549 ip6_t *outer_ipv6, int outer_hdr_len, netstack_t *ns)
5548 5550 {
5549 5551 ipsec_policy_head_t *polhead;
5550 5552 ipsec_selector_t sel;
5551 5553 ipsec_policy_t *pol;
5552 5554 uint16_t tmpport;
5553 5555 selret_t rc;
5554 5556 boolean_t port_policy_present, is_icmp, global_present;
5555 5557 in6_addr_t tmpaddr;
5556 5558 ipaddr_t tmp4;
5557 5559 uint8_t flags, *inner_hdr;
5558 5560 ipsec_stack_t *ipss = ns->netstack_ipsec;
5559 5561
5560 5562 sel.ips_is_icmp_inv_acq = 0;
5561 5563
5562 5564 if (outer_ipv4 != NULL) {
5563 5565 ASSERT(outer_ipv6 == NULL);
5564 5566 global_present = ipss->ipsec_inbound_v4_policy_present;
5565 5567 } else {
5566 5568 ASSERT(outer_ipv6 != NULL);
5567 5569 global_present = ipss->ipsec_inbound_v6_policy_present;
5568 5570 }
5569 5571
5570 5572 ASSERT(inner_ipv4 != NULL && inner_ipv6 == NULL ||
5571 5573 inner_ipv4 == NULL && inner_ipv6 != NULL);
5572 5574
5573 5575 if (outer_hdr_len < 0) {
5574 5576 outer_hdr_len = (-outer_hdr_len);
5575 5577 is_icmp = B_TRUE;
5576 5578 } else {
5577 5579 is_icmp = B_FALSE;
5578 5580 }
5579 5581
5580 5582 if (itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE)) {
5581 5583 mblk_t *mp = data_mp;
5582 5584
5583 5585 polhead = itp->itp_policy;
5584 5586 /*
5585 5587 * We need to perform full Tunnel-Mode enforcement,
5586 5588 * and we need to have inner-header data for such enforcement.
5587 5589 *
5588 5590 * See ipsec_init_inbound_sel() for the 0x80000000 on inbound
5589 5591 * and on return.
5590 5592 */
5591 5593
5592 5594 port_policy_present = ((itp->itp_flags &
5593 5595 ITPF_P_PER_PORT_SECURITY) ? B_TRUE : B_FALSE);
5594 5596 /*
5595 5597 * NOTE: Even if our policy is transport mode, set the
5596 5598 * SEL_TUNNEL_MODE flag so ipsec_init_inbound_sel() can
5597 5599 * do the right thing w.r.t. outer headers.
5598 5600 */
5599 5601 flags = ((port_policy_present ? SEL_PORT_POLICY : SEL_NONE) |
5600 5602 (is_icmp ? SEL_IS_ICMP : SEL_NONE) | SEL_TUNNEL_MODE);
5601 5603
5602 5604 rc = ipsec_init_inbound_sel(&sel, data_mp, inner_ipv4,
5603 5605 inner_ipv6, flags);
5604 5606
5605 5607 switch (rc) {
5606 5608 case SELRET_NOMEM:
5607 5609 ip_drop_packet(data_mp, B_TRUE, NULL,
5608 5610 DROPPER(ipss, ipds_spd_nomem),
5609 5611 &ipss->ipsec_spd_dropper);
5610 5612 return (NULL);
5611 5613 case SELRET_TUNFRAG:
5612 5614 /*
5613 5615 * At this point, if we're cleartext, we don't want
5614 5616 * to go there.
5615 5617 */
5616 5618 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5617 5619 ip_drop_packet(data_mp, B_TRUE, NULL,
5618 5620 DROPPER(ipss, ipds_spd_got_clear),
5619 5621 &ipss->ipsec_spd_dropper);
5620 5622 return (NULL);
5621 5623 }
5622 5624
5623 5625 /*
5624 5626 * Inner and outer headers may not be contiguous.
5625 5627 * Pullup the data_mp now to satisfy assumptions of
5626 5628 * ipsec_fragcache_add()
5627 5629 */
5628 5630 if (data_mp->b_cont != NULL) {
5629 5631 mblk_t *nmp;
5630 5632
5631 5633 nmp = msgpullup(data_mp, -1);
5632 5634 if (nmp == NULL) {
5633 5635 ip_drop_packet(data_mp, B_TRUE, NULL,
5634 5636 DROPPER(ipss, ipds_spd_nomem),
5635 5637 &ipss->ipsec_spd_dropper);
5636 5638 return (NULL);
5637 5639 }
5638 5640 freemsg(data_mp);
5639 5641 data_mp = nmp;
5640 5642 if (outer_ipv4 != NULL)
5641 5643 outer_ipv4 =
5642 5644 (ipha_t *)data_mp->b_rptr;
5643 5645 else
5644 5646 outer_ipv6 =
5645 5647 (ip6_t *)data_mp->b_rptr;
5646 5648 if (inner_ipv4 != NULL) {
5647 5649 inner_ipv4 =
5648 5650 (ipha_t *)(data_mp->b_rptr +
5649 5651 outer_hdr_len);
5650 5652 } else {
5651 5653 inner_ipv6 =
5652 5654 (ip6_t *)(data_mp->b_rptr +
5653 5655 outer_hdr_len);
5654 5656 }
5655 5657 }
5656 5658
5657 5659 /*
5658 5660 * If we need to queue the packet. First we
5659 5661 * get an mblk with the attributes. ipsec_fragcache_add
5660 5662 * will prepend that to the queued data and return
5661 5663 * a list of b_next messages each of which starts with
5662 5664 * the attribute mblk.
5663 5665 */
5664 5666 mp = ip_recv_attr_to_mblk(ira);
5665 5667 if (mp == NULL) {
5666 5668 ip_drop_packet(data_mp, B_TRUE, NULL,
5667 5669 DROPPER(ipss, ipds_spd_nomem),
5668 5670 &ipss->ipsec_spd_dropper);
5669 5671 return (NULL);
5670 5672 }
5671 5673
5672 5674 mp = ipsec_fragcache_add(&itp->itp_fragcache,
5673 5675 mp, data_mp, outer_hdr_len, ipss);
5674 5676
5675 5677 if (mp == NULL) {
5676 5678 /*
5677 5679 * Data is cached, fragment chain is not
5678 5680 * complete.
5679 5681 */
5680 5682 return (NULL);
5681 5683 }
5682 5684
5683 5685 /*
5684 5686 * If we get here, we have a full fragment chain.
5685 5687 * Reacquire headers and selectors from first fragment.
5686 5688 */
5687 5689 ASSERT(ip_recv_attr_is_mblk(mp));
5688 5690 data_mp = mp->b_cont;
5689 5691 inner_hdr = data_mp->b_rptr;
5690 5692 if (outer_ipv4 != NULL) {
5691 5693 inner_hdr += IPH_HDR_LENGTH(
5692 5694 (ipha_t *)data_mp->b_rptr);
5693 5695 } else {
5694 5696 inner_hdr += ip_hdr_length_v6(data_mp,
5695 5697 (ip6_t *)data_mp->b_rptr);
5696 5698 }
5697 5699 ASSERT(inner_hdr <= data_mp->b_wptr);
5698 5700
5699 5701 if (inner_ipv4 != NULL) {
5700 5702 inner_ipv4 = (ipha_t *)inner_hdr;
5701 5703 inner_ipv6 = NULL;
5702 5704 } else {
5703 5705 inner_ipv6 = (ip6_t *)inner_hdr;
5704 5706 inner_ipv4 = NULL;
5705 5707 }
5706 5708
5707 5709 /*
5708 5710 * Use SEL_TUNNEL_MODE to take into account the outer
5709 5711 * header. Use SEL_POST_FRAG so we always get ports.
5710 5712 */
5711 5713 rc = ipsec_init_inbound_sel(&sel, data_mp,
5712 5714 inner_ipv4, inner_ipv6,
5713 5715 SEL_TUNNEL_MODE | SEL_POST_FRAG);
5714 5716 switch (rc) {
5715 5717 case SELRET_SUCCESS:
5716 5718 /*
5717 5719 * Get to same place as first caller's
5718 5720 * SELRET_SUCCESS case.
5719 5721 */
5720 5722 break;
5721 5723 case SELRET_NOMEM:
5722 5724 ip_drop_packet_chain(mp, B_TRUE, NULL,
5723 5725 DROPPER(ipss, ipds_spd_nomem),
5724 5726 &ipss->ipsec_spd_dropper);
5725 5727 return (NULL);
5726 5728 case SELRET_BADPKT:
5727 5729 ip_drop_packet_chain(mp, B_TRUE, NULL,
5728 5730 DROPPER(ipss, ipds_spd_malformed_frag),
5729 5731 &ipss->ipsec_spd_dropper);
5730 5732 return (NULL);
5731 5733 case SELRET_TUNFRAG:
5732 5734 cmn_err(CE_WARN, "(TUNFRAG on 2nd call...)");
5733 5735 /* FALLTHRU */
5734 5736 default:
5735 5737 cmn_err(CE_WARN, "ipsec_init_inbound_sel(mark2)"
5736 5738 " returns bizarro 0x%x", rc);
5737 5739 /* Guaranteed panic! */
5738 5740 ASSERT(rc == SELRET_NOMEM);
5739 5741 return (NULL);
5740 5742 }
5741 5743 /* FALLTHRU */
5742 5744 case SELRET_SUCCESS:
5743 5745 /*
5744 5746 * Common case:
5745 5747 * No per-port policy or a non-fragment. Keep going.
5746 5748 */
5747 5749 break;
5748 5750 case SELRET_BADPKT:
5749 5751 /*
5750 5752 * We may receive ICMP (with IPv6 inner) packets that
5751 5753 * trigger this return value. Send 'em in for
5752 5754 * enforcement checking.
5753 5755 */
5754 5756 cmn_err(CE_NOTE, "ipsec_tun_inbound(): "
5755 5757 "sending 'bad packet' in for enforcement");
5756 5758 break;
5757 5759 default:
5758 5760 cmn_err(CE_WARN,
5759 5761 "ipsec_init_inbound_sel() returns bizarro 0x%x",
5760 5762 rc);
5761 5763 ASSERT(rc == SELRET_NOMEM); /* Guaranteed panic! */
5762 5764 return (NULL);
5763 5765 }
5764 5766
5765 5767 if (is_icmp) {
5766 5768 /*
5767 5769 * Swap local/remote because this is an ICMP packet.
5768 5770 */
5769 5771 tmpaddr = sel.ips_local_addr_v6;
5770 5772 sel.ips_local_addr_v6 = sel.ips_remote_addr_v6;
5771 5773 sel.ips_remote_addr_v6 = tmpaddr;
5772 5774 tmpport = sel.ips_local_port;
5773 5775 sel.ips_local_port = sel.ips_remote_port;
5774 5776 sel.ips_remote_port = tmpport;
5775 5777 }
5776 5778
5777 5779 /* find_policy_head() */
5778 5780 rw_enter(&polhead->iph_lock, RW_READER);
5779 5781 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_INBOUND,
5780 5782 &sel);
5781 5783 rw_exit(&polhead->iph_lock);
5782 5784 if (pol != NULL) {
5783 5785 uint64_t pkt_unique;
5784 5786
5785 5787 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5786 5788 if (!pol->ipsp_act->ipa_allow_clear) {
5787 5789 /*
5788 5790 * XXX should never get here with
5789 5791 * tunnel reassembled fragments?
5790 5792 */
5791 5793 ASSERT(mp == data_mp);
5792 5794 ip_drop_packet(data_mp, B_TRUE, NULL,
5793 5795 DROPPER(ipss, ipds_spd_got_clear),
5794 5796 &ipss->ipsec_spd_dropper);
5795 5797 IPPOL_REFRELE(pol);
5796 5798 return (NULL);
5797 5799 } else {
5798 5800 IPPOL_REFRELE(pol);
5799 5801 return (mp);
5800 5802 }
5801 5803 }
5802 5804 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
5803 5805 sel.ips_local_port,
5804 5806 (inner_ipv4 == NULL) ? IPPROTO_IPV6 :
5805 5807 IPPROTO_ENCAP, sel.ips_protocol);
5806 5808
5807 5809 /*
5808 5810 * NOTE: The following releases pol's reference and
5809 5811 * calls ip_drop_packet() for me on NULL returns.
5810 5812 *
5811 5813 * "sel" is still good here, so let's use it!
5812 5814 */
5813 5815 if (data_mp == mp) {
5814 5816 /* A single packet without attributes */
5815 5817 data_mp = ipsec_check_ipsecin_policy(data_mp,
5816 5818 pol, inner_ipv4, inner_ipv6, pkt_unique,
5817 5819 ira, ns);
5818 5820 } else {
5819 5821 /*
5820 5822 * We pass in the b_next chain of attr_mp's
5821 5823 * and get back a b_next chain of data_mp's.
5822 5824 */
5823 5825 data_mp = ipsec_check_ipsecin_policy_reasm(mp,
5824 5826 pol, inner_ipv4, inner_ipv6, pkt_unique,
5825 5827 ns);
5826 5828 }
5827 5829 return (data_mp);
5828 5830 }
5829 5831
5830 5832 /*
5831 5833 * Else fallthru and check the global policy on the outer
5832 5834 * header(s) if this tunnel is an old-style transport-mode
5833 5835 * one. Drop the packet explicitly (no policy entry) for
5834 5836 * a new-style tunnel-mode tunnel.
5835 5837 */
5836 5838 if ((itp->itp_flags & ITPF_P_TUNNEL) && !is_icmp) {
5837 5839 ip_drop_packet_chain(data_mp, B_TRUE, NULL,
5838 5840 DROPPER(ipss, ipds_spd_explicit),
5839 5841 &ipss->ipsec_spd_dropper);
5840 5842 return (NULL);
5841 5843 }
5842 5844 }
5843 5845
5844 5846 /*
5845 5847 * NOTE: If we reach here, we will not have packet chains from
5846 5848 * fragcache_add(), because the only way I get chains is on a
5847 5849 * tunnel-mode tunnel, which either returns with a pass, or gets
5848 5850 * hit by the ip_drop_packet_chain() call right above here.
5849 5851 */
5850 5852 ASSERT(data_mp->b_next == NULL);
5851 5853
5852 5854 /* If no per-tunnel security, check global policy now. */
5853 5855 if ((ira->ira_flags & IRAF_IPSEC_SECURE) && !global_present) {
5854 5856 if (ira->ira_flags & IRAF_TRUSTED_ICMP) {
5855 5857 /*
5856 5858 * This is an ICMP message that was geenrated locally.
5857 5859 * We should accept it.
5858 5860 */
5859 5861 return (data_mp);
5860 5862 }
5861 5863
5862 5864 ip_drop_packet(data_mp, B_TRUE, NULL,
5863 5865 DROPPER(ipss, ipds_spd_got_secure),
5864 5866 &ipss->ipsec_spd_dropper);
5865 5867 return (NULL);
5866 5868 }
5867 5869
5868 5870 if (is_icmp) {
5869 5871 /*
5870 5872 * For ICMP packets, "outer_ipvN" is set to the outer header
5871 5873 * that is *INSIDE* the ICMP payload. For global policy
5872 5874 * checking, we need to reverse src/dst on the payload in
5873 5875 * order to construct selectors appropriately. See "ripha"
5874 5876 * constructions in ip.c. To avoid a bug like 6478464 (see
5875 5877 * earlier in this file), we will actually exchange src/dst
5876 5878 * in the packet, and reverse if after the call to
5877 5879 * ipsec_check_global_policy().
5878 5880 */
5879 5881 if (outer_ipv4 != NULL) {
5880 5882 tmp4 = outer_ipv4->ipha_src;
5881 5883 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5882 5884 outer_ipv4->ipha_dst = tmp4;
5883 5885 } else {
5884 5886 ASSERT(outer_ipv6 != NULL);
5885 5887 tmpaddr = outer_ipv6->ip6_src;
5886 5888 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5887 5889 outer_ipv6->ip6_dst = tmpaddr;
5888 5890 }
5889 5891 }
5890 5892
5891 5893 data_mp = ipsec_check_global_policy(data_mp, NULL, outer_ipv4,
5892 5894 outer_ipv6, ira, ns);
5893 5895 if (data_mp == NULL)
5894 5896 return (NULL);
5895 5897
5896 5898 if (is_icmp) {
5897 5899 /* Set things back to normal. */
5898 5900 if (outer_ipv4 != NULL) {
5899 5901 tmp4 = outer_ipv4->ipha_src;
5900 5902 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5901 5903 outer_ipv4->ipha_dst = tmp4;
5902 5904 } else {
5903 5905 /* No need for ASSERT()s now. */
5904 5906 tmpaddr = outer_ipv6->ip6_src;
5905 5907 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5906 5908 outer_ipv6->ip6_dst = tmpaddr;
5907 5909 }
5908 5910 }
5909 5911
5910 5912 /*
5911 5913 * At this point, we pretend it's a cleartext accepted
5912 5914 * packet.
5913 5915 */
5914 5916 return (data_mp);
5915 5917 }
5916 5918
5917 5919 /*
5918 5920 * AVL comparison routine for our list of tunnel polheads.
5919 5921 */
5920 5922 static int
5921 5923 tunnel_compare(const void *arg1, const void *arg2)
5922 5924 {
5923 5925 ipsec_tun_pol_t *left, *right;
5924 5926 int rc;
5925 5927
5926 5928 left = (ipsec_tun_pol_t *)arg1;
5927 5929 right = (ipsec_tun_pol_t *)arg2;
5928 5930
5929 5931 rc = strncmp(left->itp_name, right->itp_name, LIFNAMSIZ);
5930 5932 return (rc == 0 ? rc : (rc > 0 ? 1 : -1));
5931 5933 }
5932 5934
5933 5935 /*
5934 5936 * Free a tunnel policy node.
5935 5937 */
5936 5938 void
5937 5939 itp_free(ipsec_tun_pol_t *node, netstack_t *ns)
5938 5940 {
5939 5941 if (node->itp_policy != NULL) {
5940 5942 IPPH_REFRELE(node->itp_policy, ns);
5941 5943 node->itp_policy = NULL;
5942 5944 }
5943 5945 if (node->itp_inactive != NULL) {
5944 5946 IPPH_REFRELE(node->itp_inactive, ns);
5945 5947 node->itp_inactive = NULL;
5946 5948 }
5947 5949 mutex_destroy(&node->itp_lock);
5948 5950 kmem_free(node, sizeof (*node));
5949 5951 }
5950 5952
5951 5953 void
5952 5954 itp_unlink(ipsec_tun_pol_t *node, netstack_t *ns)
5953 5955 {
5954 5956 ipsec_stack_t *ipss = ns->netstack_ipsec;
5955 5957
5956 5958 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
5957 5959 ipss->ipsec_tunnel_policy_gen++;
5958 5960 ipsec_fragcache_uninit(&node->itp_fragcache, ipss);
5959 5961 avl_remove(&ipss->ipsec_tunnel_policies, node);
5960 5962 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5961 5963 ITP_REFRELE(node, ns);
5962 5964 }
5963 5965
5964 5966 /*
5965 5967 * Public interface to look up a tunnel security policy by name. Used by
5966 5968 * spdsock mostly. Returns "node" with a bumped refcnt.
5967 5969 */
5968 5970 ipsec_tun_pol_t *
5969 5971 get_tunnel_policy(char *name, netstack_t *ns)
5970 5972 {
5971 5973 ipsec_tun_pol_t *node, lookup;
5972 5974 ipsec_stack_t *ipss = ns->netstack_ipsec;
5973 5975
5974 5976 (void) strncpy(lookup.itp_name, name, LIFNAMSIZ);
5975 5977
5976 5978 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5977 5979 node = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
5978 5980 &lookup, NULL);
5979 5981 if (node != NULL) {
5980 5982 ITP_REFHOLD(node);
5981 5983 }
5982 5984 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5983 5985
5984 5986 return (node);
5985 5987 }
5986 5988
5987 5989 /*
5988 5990 * Public interface to walk all tunnel security polcies. Useful for spdsock
5989 5991 * DUMP operations. iterator() will not consume a reference.
5990 5992 */
5991 5993 void
5992 5994 itp_walk(void (*iterator)(ipsec_tun_pol_t *, void *, netstack_t *),
5993 5995 void *arg, netstack_t *ns)
5994 5996 {
5995 5997 ipsec_tun_pol_t *node;
5996 5998 ipsec_stack_t *ipss = ns->netstack_ipsec;
5997 5999
5998 6000 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5999 6001 for (node = avl_first(&ipss->ipsec_tunnel_policies); node != NULL;
6000 6002 node = AVL_NEXT(&ipss->ipsec_tunnel_policies, node)) {
6001 6003 iterator(node, arg, ns);
6002 6004 }
6003 6005 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6004 6006 }
6005 6007
6006 6008 /*
6007 6009 * Initialize policy head. This can only fail if there's a memory problem.
6008 6010 */
6009 6011 static boolean_t
6010 6012 tunnel_polhead_init(ipsec_policy_head_t *iph, netstack_t *ns)
6011 6013 {
6012 6014 ipsec_stack_t *ipss = ns->netstack_ipsec;
6013 6015
6014 6016 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
6015 6017 iph->iph_refs = 1;
6016 6018 iph->iph_gen = 0;
6017 6019 if (ipsec_alloc_table(iph, ipss->ipsec_tun_spd_hashsize,
6018 6020 KM_SLEEP, B_FALSE, ns) != 0) {
6019 6021 ipsec_polhead_free_table(iph);
6020 6022 return (B_FALSE);
6021 6023 }
6022 6024 ipsec_polhead_init(iph, ipss->ipsec_tun_spd_hashsize);
6023 6025 return (B_TRUE);
6024 6026 }
6025 6027
6026 6028 /*
6027 6029 * Create a tunnel policy node with "name". Set errno with
6028 6030 * ENOMEM if there's a memory problem, and EEXIST if there's an existing
6029 6031 * node.
6030 6032 */
6031 6033 ipsec_tun_pol_t *
6032 6034 create_tunnel_policy(char *name, int *errno, uint64_t *gen, netstack_t *ns)
6033 6035 {
6034 6036 ipsec_tun_pol_t *newbie, *existing;
6035 6037 avl_index_t where;
6036 6038 ipsec_stack_t *ipss = ns->netstack_ipsec;
6037 6039
6038 6040 newbie = kmem_zalloc(sizeof (*newbie), KM_NOSLEEP);
6039 6041 if (newbie == NULL) {
6040 6042 *errno = ENOMEM;
6041 6043 return (NULL);
6042 6044 }
6043 6045 if (!ipsec_fragcache_init(&newbie->itp_fragcache)) {
6044 6046 kmem_free(newbie, sizeof (*newbie));
6045 6047 *errno = ENOMEM;
6046 6048 return (NULL);
6047 6049 }
6048 6050
6049 6051 (void) strncpy(newbie->itp_name, name, LIFNAMSIZ);
6050 6052
6051 6053 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
6052 6054 existing = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
6053 6055 newbie, &where);
6054 6056 if (existing != NULL) {
6055 6057 itp_free(newbie, ns);
6056 6058 *errno = EEXIST;
6057 6059 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6058 6060 return (NULL);
6059 6061 }
6060 6062 ipss->ipsec_tunnel_policy_gen++;
6061 6063 *gen = ipss->ipsec_tunnel_policy_gen;
6062 6064 newbie->itp_refcnt = 2; /* One for the caller, one for the tree. */
6063 6065 newbie->itp_next_policy_index = 1;
6064 6066 avl_insert(&ipss->ipsec_tunnel_policies, newbie, where);
6065 6067 mutex_init(&newbie->itp_lock, NULL, MUTEX_DEFAULT, NULL);
6066 6068 newbie->itp_policy = kmem_zalloc(sizeof (ipsec_policy_head_t),
6067 6069 KM_NOSLEEP);
6068 6070 if (newbie->itp_policy == NULL)
6069 6071 goto nomem;
6070 6072 newbie->itp_inactive = kmem_zalloc(sizeof (ipsec_policy_head_t),
6071 6073 KM_NOSLEEP);
6072 6074 if (newbie->itp_inactive == NULL) {
6073 6075 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6074 6076 goto nomem;
6075 6077 }
6076 6078
6077 6079 if (!tunnel_polhead_init(newbie->itp_policy, ns)) {
6078 6080 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6079 6081 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6080 6082 goto nomem;
6081 6083 } else if (!tunnel_polhead_init(newbie->itp_inactive, ns)) {
6082 6084 IPPH_REFRELE(newbie->itp_policy, ns);
6083 6085 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6084 6086 goto nomem;
6085 6087 }
6086 6088 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6087 6089
6088 6090 return (newbie);
6089 6091 nomem:
6090 6092 *errno = ENOMEM;
6091 6093 kmem_free(newbie, sizeof (*newbie));
6092 6094 return (NULL);
6093 6095 }
6094 6096
6095 6097 /*
6096 6098 * Given two addresses, find a tunnel instance's IPsec policy heads.
6097 6099 * Returns NULL on failure.
6098 6100 */
6099 6101 ipsec_tun_pol_t *
6100 6102 itp_get_byaddr(uint32_t *laddr, uint32_t *faddr, int af, ip_stack_t *ipst)
6101 6103 {
6102 6104 conn_t *connp;
6103 6105 iptun_t *iptun;
6104 6106 ipsec_tun_pol_t *itp = NULL;
6105 6107
6106 6108 /* Classifiers are used to "src" being foreign. */
6107 6109 if (af == AF_INET) {
6108 6110 connp = ipcl_iptun_classify_v4((ipaddr_t *)faddr,
6109 6111 (ipaddr_t *)laddr, ipst);
6110 6112 } else {
6111 6113 ASSERT(af == AF_INET6);
6112 6114 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)laddr));
6113 6115 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)faddr));
6114 6116 connp = ipcl_iptun_classify_v6((in6_addr_t *)faddr,
6115 6117 (in6_addr_t *)laddr, ipst);
6116 6118 }
6117 6119
6118 6120 if (connp == NULL)
6119 6121 return (NULL);
6120 6122
6121 6123 if (IPCL_IS_IPTUN(connp)) {
6122 6124 iptun = connp->conn_iptun;
6123 6125 if (iptun != NULL) {
6124 6126 itp = iptun->iptun_itp;
6125 6127 if (itp != NULL) {
6126 6128 /* Braces due to the macro's nature... */
6127 6129 ITP_REFHOLD(itp);
6128 6130 }
6129 6131 } /* Else itp is already NULL. */
6130 6132 }
6131 6133
6132 6134 CONN_DEC_REF(connp);
6133 6135 return (itp);
6134 6136 }
6135 6137
6136 6138 /*
6137 6139 * Frag cache code, based on SunScreen 3.2 source
6138 6140 * screen/kernel/common/screen_fragcache.c
6139 6141 */
6140 6142
6141 6143 #define IPSEC_FRAG_TTL_MAX 5
6142 6144 /*
6143 6145 * Note that the following parameters create 256 hash buckets
6144 6146 * with 1024 free entries to be distributed. Things are cleaned
6145 6147 * periodically and are attempted to be cleaned when there is no
6146 6148 * free space, but this system errs on the side of dropping packets
6147 6149 * over creating memory exhaustion. We may decide to make hash
6148 6150 * factor a tunable if this proves to be a bad decision.
6149 6151 */
6150 6152 #define IPSEC_FRAG_HASH_SLOTS (1<<8)
6151 6153 #define IPSEC_FRAG_HASH_FACTOR 4
6152 6154 #define IPSEC_FRAG_HASH_SIZE (IPSEC_FRAG_HASH_SLOTS * IPSEC_FRAG_HASH_FACTOR)
6153 6155
6154 6156 #define IPSEC_FRAG_HASH_MASK (IPSEC_FRAG_HASH_SLOTS - 1)
6155 6157 #define IPSEC_FRAG_HASH_FUNC(id) (((id) & IPSEC_FRAG_HASH_MASK) ^ \
6156 6158 (((id) / \
6157 6159 (ushort_t)IPSEC_FRAG_HASH_SLOTS) & \
6158 6160 IPSEC_FRAG_HASH_MASK))
6159 6161
6160 6162 /* Maximum fragments per packet. 48 bytes payload x 1366 packets > 64KB */
6161 6163 #define IPSEC_MAX_FRAGS 1366
6162 6164
6163 6165 #define V4_FRAG_OFFSET(ipha) ((ntohs(ipha->ipha_fragment_offset_and_flags) & \
6164 6166 IPH_OFFSET) << 3)
6165 6167 #define V4_MORE_FRAGS(ipha) (ntohs(ipha->ipha_fragment_offset_and_flags) & \
6166 6168 IPH_MF)
6167 6169
6168 6170 /*
6169 6171 * Initialize an ipsec fragcache instance.
6170 6172 * Returns B_FALSE if memory allocation fails.
6171 6173 */
6172 6174 boolean_t
6173 6175 ipsec_fragcache_init(ipsec_fragcache_t *frag)
6174 6176 {
6175 6177 ipsec_fragcache_entry_t *ftemp;
6176 6178 int i;
6177 6179
6178 6180 mutex_init(&frag->itpf_lock, NULL, MUTEX_DEFAULT, NULL);
6179 6181 frag->itpf_ptr = (ipsec_fragcache_entry_t **)
6180 6182 kmem_zalloc(sizeof (ipsec_fragcache_entry_t *) *
6181 6183 IPSEC_FRAG_HASH_SLOTS, KM_NOSLEEP);
6182 6184 if (frag->itpf_ptr == NULL)
6183 6185 return (B_FALSE);
6184 6186
6185 6187 ftemp = (ipsec_fragcache_entry_t *)
6186 6188 kmem_zalloc(sizeof (ipsec_fragcache_entry_t) *
6187 6189 IPSEC_FRAG_HASH_SIZE, KM_NOSLEEP);
6188 6190 if (ftemp == NULL) {
6189 6191 kmem_free(frag->itpf_ptr, sizeof (ipsec_fragcache_entry_t *) *
6190 6192 IPSEC_FRAG_HASH_SLOTS);
6191 6193 return (B_FALSE);
6192 6194 }
6193 6195
6194 6196 frag->itpf_freelist = NULL;
6195 6197
6196 6198 for (i = 0; i < IPSEC_FRAG_HASH_SIZE; i++) {
6197 6199 ftemp->itpfe_next = frag->itpf_freelist;
6198 6200 frag->itpf_freelist = ftemp;
6199 6201 ftemp++;
6200 6202 }
6201 6203
6202 6204 frag->itpf_expire_hint = 0;
6203 6205
6204 6206 return (B_TRUE);
6205 6207 }
6206 6208
6207 6209 void
6208 6210 ipsec_fragcache_uninit(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6209 6211 {
6210 6212 ipsec_fragcache_entry_t *fep;
6211 6213 int i;
6212 6214
6213 6215 mutex_enter(&frag->itpf_lock);
6214 6216 if (frag->itpf_ptr) {
6215 6217 /* Delete any existing fragcache entry chains */
6216 6218 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6217 6219 fep = (frag->itpf_ptr)[i];
6218 6220 while (fep != NULL) {
6219 6221 /* Returned fep is next in chain or NULL */
6220 6222 fep = fragcache_delentry(i, fep, frag, ipss);
6221 6223 }
6222 6224 }
6223 6225 /*
6224 6226 * Chase the pointers back to the beginning
6225 6227 * of the memory allocation and then
6226 6228 * get rid of the allocated freelist
6227 6229 */
6228 6230 while (frag->itpf_freelist->itpfe_next != NULL)
6229 6231 frag->itpf_freelist = frag->itpf_freelist->itpfe_next;
6230 6232 /*
6231 6233 * XXX - If we ever dynamically grow the freelist
6232 6234 * then we'll have to free entries individually
6233 6235 * or determine how many entries or chunks we have
6234 6236 * grown since the initial allocation.
6235 6237 */
6236 6238 kmem_free(frag->itpf_freelist,
6237 6239 sizeof (ipsec_fragcache_entry_t) *
6238 6240 IPSEC_FRAG_HASH_SIZE);
6239 6241 /* Free the fragcache structure */
6240 6242 kmem_free(frag->itpf_ptr,
6241 6243 sizeof (ipsec_fragcache_entry_t *) *
6242 6244 IPSEC_FRAG_HASH_SLOTS);
6243 6245 }
6244 6246 mutex_exit(&frag->itpf_lock);
6245 6247 mutex_destroy(&frag->itpf_lock);
6246 6248 }
6247 6249
6248 6250 /*
6249 6251 * Add a fragment to the fragment cache. Consumes mp if NULL is returned.
6250 6252 * Returns mp if a whole fragment has been assembled, NULL otherwise
6251 6253 * The returned mp could be a b_next chain of fragments.
6252 6254 *
6253 6255 * The iramp argument is set on inbound; NULL if outbound.
6254 6256 */
6255 6257 mblk_t *
6256 6258 ipsec_fragcache_add(ipsec_fragcache_t *frag, mblk_t *iramp, mblk_t *mp,
6257 6259 int outer_hdr_len, ipsec_stack_t *ipss)
6258 6260 {
6259 6261 boolean_t is_v4;
6260 6262 time_t itpf_time;
6261 6263 ipha_t *iph;
6262 6264 ipha_t *oiph;
6263 6265 ip6_t *ip6h = NULL;
6264 6266 uint8_t v6_proto;
6265 6267 uint8_t *v6_proto_p;
6266 6268 uint16_t ip6_hdr_length;
6267 6269 ip_pkt_t ipp;
6268 6270 ip6_frag_t *fraghdr;
6269 6271 ipsec_fragcache_entry_t *fep;
6270 6272 int i;
6271 6273 mblk_t *nmp, *prevmp;
6272 6274 int firstbyte, lastbyte;
6273 6275 int offset;
6274 6276 int last;
6275 6277 boolean_t inbound = (iramp != NULL);
6276 6278
6277 6279 #ifdef FRAGCACHE_DEBUG
6278 6280 cmn_err(CE_WARN, "Fragcache: %s\n", inbound ? "INBOUND" : "OUTBOUND");
6279 6281 #endif
6280 6282 /*
6281 6283 * You're on the slow path, so insure that every packet in the
6282 6284 * cache is a single-mblk one.
6283 6285 */
6284 6286 if (mp->b_cont != NULL) {
6285 6287 nmp = msgpullup(mp, -1);
6286 6288 if (nmp == NULL) {
6287 6289 ip_drop_packet(mp, inbound, NULL,
6288 6290 DROPPER(ipss, ipds_spd_nomem),
6289 6291 &ipss->ipsec_spd_dropper);
6290 6292 if (inbound)
6291 6293 (void) ip_recv_attr_free_mblk(iramp);
6292 6294 return (NULL);
6293 6295 }
6294 6296 freemsg(mp);
6295 6297 mp = nmp;
6296 6298 }
6297 6299
6298 6300 mutex_enter(&frag->itpf_lock);
6299 6301
6300 6302 oiph = (ipha_t *)mp->b_rptr;
6301 6303 iph = (ipha_t *)(mp->b_rptr + outer_hdr_len);
6302 6304
6303 6305 if (IPH_HDR_VERSION(iph) == IPV4_VERSION) {
6304 6306 is_v4 = B_TRUE;
6305 6307 } else {
6306 6308 ASSERT(IPH_HDR_VERSION(iph) == IPV6_VERSION);
6307 6309 ip6h = (ip6_t *)(mp->b_rptr + outer_hdr_len);
6308 6310
6309 6311 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip6_hdr_length,
6310 6312 &v6_proto_p)) {
6311 6313 /*
6312 6314 * Find upper layer protocol.
6313 6315 * If it fails we have a malformed packet
6314 6316 */
6315 6317 mutex_exit(&frag->itpf_lock);
6316 6318 ip_drop_packet(mp, inbound, NULL,
6317 6319 DROPPER(ipss, ipds_spd_malformed_packet),
6318 6320 &ipss->ipsec_spd_dropper);
6319 6321 if (inbound)
6320 6322 (void) ip_recv_attr_free_mblk(iramp);
6321 6323 return (NULL);
6322 6324 } else {
6323 6325 v6_proto = *v6_proto_p;
6324 6326 }
6325 6327
6326 6328
6327 6329 bzero(&ipp, sizeof (ipp));
6328 6330 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
6329 6331 if (!(ipp.ipp_fields & IPPF_FRAGHDR)) {
6330 6332 /*
6331 6333 * We think this is a fragment, but didn't find
6332 6334 * a fragment header. Something is wrong.
6333 6335 */
6334 6336 mutex_exit(&frag->itpf_lock);
6335 6337 ip_drop_packet(mp, inbound, NULL,
6336 6338 DROPPER(ipss, ipds_spd_malformed_frag),
6337 6339 &ipss->ipsec_spd_dropper);
6338 6340 if (inbound)
6339 6341 (void) ip_recv_attr_free_mblk(iramp);
6340 6342 return (NULL);
6341 6343 }
6342 6344 fraghdr = ipp.ipp_fraghdr;
6343 6345 is_v4 = B_FALSE;
6344 6346 }
6345 6347
6346 6348 /* Anything to cleanup? */
6347 6349
6348 6350 /*
6349 6351 * This cleanup call could be put in a timer loop
6350 6352 * but it may actually be just as reasonable a decision to
6351 6353 * leave it here. The disadvantage is this only gets called when
6352 6354 * frags are added. The advantage is that it is not
6353 6355 * susceptible to race conditions like a time-based cleanup
6354 6356 * may be.
6355 6357 */
6356 6358 itpf_time = gethrestime_sec();
6357 6359 if (itpf_time >= frag->itpf_expire_hint)
6358 6360 ipsec_fragcache_clean(frag, ipss);
6359 6361
6360 6362 /* Lookup to see if there is an existing entry */
6361 6363
6362 6364 if (is_v4)
6363 6365 i = IPSEC_FRAG_HASH_FUNC(iph->ipha_ident);
6364 6366 else
6365 6367 i = IPSEC_FRAG_HASH_FUNC(fraghdr->ip6f_ident);
6366 6368
6367 6369 for (fep = (frag->itpf_ptr)[i]; fep; fep = fep->itpfe_next) {
6368 6370 if (is_v4) {
6369 6371 ASSERT(iph != NULL);
6370 6372 if ((fep->itpfe_id == iph->ipha_ident) &&
6371 6373 (fep->itpfe_src == iph->ipha_src) &&
6372 6374 (fep->itpfe_dst == iph->ipha_dst) &&
6373 6375 (fep->itpfe_proto == iph->ipha_protocol))
6374 6376 break;
6375 6377 } else {
6376 6378 ASSERT(fraghdr != NULL);
6377 6379 ASSERT(fep != NULL);
6378 6380 if ((fep->itpfe_id == fraghdr->ip6f_ident) &&
6379 6381 IN6_ARE_ADDR_EQUAL(&fep->itpfe_src6,
6380 6382 &ip6h->ip6_src) &&
6381 6383 IN6_ARE_ADDR_EQUAL(&fep->itpfe_dst6,
6382 6384 &ip6h->ip6_dst) && (fep->itpfe_proto == v6_proto))
6383 6385 break;
6384 6386 }
6385 6387 }
6386 6388
6387 6389 if (is_v4) {
6388 6390 firstbyte = V4_FRAG_OFFSET(iph);
6389 6391 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6390 6392 IPH_HDR_LENGTH(iph);
6391 6393 last = (V4_MORE_FRAGS(iph) == 0);
6392 6394 #ifdef FRAGCACHE_DEBUG
6393 6395 cmn_err(CE_WARN, "V4 fragcache: firstbyte = %d, lastbyte = %d, "
6394 6396 "is_last_frag = %d, id = %d, mp = %p\n", firstbyte,
6395 6397 lastbyte, last, iph->ipha_ident, mp);
6396 6398 #endif
6397 6399 } else {
6398 6400 firstbyte = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
6399 6401 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6400 6402 sizeof (ip6_t) - ip6_hdr_length;
6401 6403 last = (fraghdr->ip6f_offlg & IP6F_MORE_FRAG) == 0;
6402 6404 #ifdef FRAGCACHE_DEBUG
6403 6405 cmn_err(CE_WARN, "V6 fragcache: firstbyte = %d, lastbyte = %d, "
6404 6406 "is_last_frag = %d, id = %d, fraghdr = %p, mp = %p\n",
6405 6407 firstbyte, lastbyte, last, fraghdr->ip6f_ident, fraghdr,
6406 6408 mp);
6407 6409 #endif
6408 6410 }
6409 6411
6410 6412 /* check for bogus fragments and delete the entry */
6411 6413 if (firstbyte > 0 && firstbyte <= 8) {
6412 6414 if (fep != NULL)
6413 6415 (void) fragcache_delentry(i, fep, frag, ipss);
6414 6416 mutex_exit(&frag->itpf_lock);
6415 6417 ip_drop_packet(mp, inbound, NULL,
6416 6418 DROPPER(ipss, ipds_spd_malformed_frag),
6417 6419 &ipss->ipsec_spd_dropper);
6418 6420 if (inbound)
6419 6421 (void) ip_recv_attr_free_mblk(iramp);
6420 6422 return (NULL);
6421 6423 }
6422 6424
6423 6425 /* Not found, allocate a new entry */
6424 6426 if (fep == NULL) {
6425 6427 if (frag->itpf_freelist == NULL) {
6426 6428 /* see if there is some space */
6427 6429 ipsec_fragcache_clean(frag, ipss);
6428 6430 if (frag->itpf_freelist == NULL) {
6429 6431 mutex_exit(&frag->itpf_lock);
6430 6432 ip_drop_packet(mp, inbound, NULL,
6431 6433 DROPPER(ipss, ipds_spd_nomem),
6432 6434 &ipss->ipsec_spd_dropper);
6433 6435 if (inbound)
6434 6436 (void) ip_recv_attr_free_mblk(iramp);
6435 6437 return (NULL);
6436 6438 }
6437 6439 }
6438 6440
6439 6441 fep = frag->itpf_freelist;
6440 6442 frag->itpf_freelist = fep->itpfe_next;
6441 6443
6442 6444 if (is_v4) {
6443 6445 bcopy((caddr_t)&iph->ipha_src, (caddr_t)&fep->itpfe_src,
6444 6446 sizeof (struct in_addr));
6445 6447 bcopy((caddr_t)&iph->ipha_dst, (caddr_t)&fep->itpfe_dst,
6446 6448 sizeof (struct in_addr));
6447 6449 fep->itpfe_id = iph->ipha_ident;
6448 6450 fep->itpfe_proto = iph->ipha_protocol;
6449 6451 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6450 6452 } else {
6451 6453 bcopy((in6_addr_t *)&ip6h->ip6_src,
6452 6454 (in6_addr_t *)&fep->itpfe_src6,
6453 6455 sizeof (struct in6_addr));
6454 6456 bcopy((in6_addr_t *)&ip6h->ip6_dst,
6455 6457 (in6_addr_t *)&fep->itpfe_dst6,
6456 6458 sizeof (struct in6_addr));
6457 6459 fep->itpfe_id = fraghdr->ip6f_ident;
6458 6460 fep->itpfe_proto = v6_proto;
6459 6461 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6460 6462 }
6461 6463 itpf_time = gethrestime_sec();
6462 6464 fep->itpfe_exp = itpf_time + IPSEC_FRAG_TTL_MAX + 1;
6463 6465 fep->itpfe_last = 0;
6464 6466 fep->itpfe_fraglist = NULL;
6465 6467 fep->itpfe_depth = 0;
6466 6468 fep->itpfe_next = (frag->itpf_ptr)[i];
6467 6469 (frag->itpf_ptr)[i] = fep;
6468 6470
6469 6471 if (frag->itpf_expire_hint > fep->itpfe_exp)
6470 6472 frag->itpf_expire_hint = fep->itpfe_exp;
6471 6473
6472 6474 }
6473 6475
6474 6476 /* Insert it in the frag list */
6475 6477 /* List is in order by starting offset of fragments */
6476 6478
6477 6479 prevmp = NULL;
6478 6480 for (nmp = fep->itpfe_fraglist; nmp; nmp = nmp->b_next) {
6479 6481 ipha_t *niph;
6480 6482 ipha_t *oniph;
6481 6483 ip6_t *nip6h;
6482 6484 ip_pkt_t nipp;
6483 6485 ip6_frag_t *nfraghdr;
6484 6486 uint16_t nip6_hdr_length;
6485 6487 uint8_t *nv6_proto_p;
6486 6488 int nfirstbyte, nlastbyte;
6487 6489 char *data, *ndata;
6488 6490 mblk_t *ndata_mp = (inbound ? nmp->b_cont : nmp);
6489 6491 int hdr_len;
6490 6492
6491 6493 oniph = (ipha_t *)mp->b_rptr;
6492 6494 nip6h = NULL;
6493 6495 niph = NULL;
6494 6496
6495 6497 /*
6496 6498 * Determine outer header type and length and set
6497 6499 * pointers appropriately
6498 6500 */
6499 6501
6500 6502 if (IPH_HDR_VERSION(oniph) == IPV4_VERSION) {
6501 6503 hdr_len = ((outer_hdr_len != 0) ?
6502 6504 IPH_HDR_LENGTH(oiph) : 0);
6503 6505 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6504 6506 } else {
6505 6507 ASSERT(IPH_HDR_VERSION(oniph) == IPV6_VERSION);
6506 6508 ASSERT(ndata_mp->b_cont == NULL);
6507 6509 nip6h = (ip6_t *)ndata_mp->b_rptr;
6508 6510 (void) ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6509 6511 &nip6_hdr_length, &v6_proto_p);
6510 6512 hdr_len = ((outer_hdr_len != 0) ? nip6_hdr_length : 0);
6511 6513 }
6512 6514
6513 6515 /*
6514 6516 * Determine inner header type and length and set
6515 6517 * pointers appropriately
6516 6518 */
6517 6519
6518 6520 if (is_v4) {
6519 6521 if (niph == NULL) {
6520 6522 /* Was v6 outer */
6521 6523 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6522 6524 }
6523 6525 nfirstbyte = V4_FRAG_OFFSET(niph);
6524 6526 nlastbyte = nfirstbyte + ntohs(niph->ipha_length) -
6525 6527 IPH_HDR_LENGTH(niph);
6526 6528 } else {
6527 6529 ASSERT(ndata_mp->b_cont == NULL);
6528 6530 nip6h = (ip6_t *)(ndata_mp->b_rptr + hdr_len);
6529 6531 if (!ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6530 6532 &nip6_hdr_length, &nv6_proto_p)) {
6531 6533 mutex_exit(&frag->itpf_lock);
6532 6534 ip_drop_packet_chain(nmp, inbound, NULL,
6533 6535 DROPPER(ipss, ipds_spd_malformed_frag),
6534 6536 &ipss->ipsec_spd_dropper);
6535 6537 ipsec_freemsg_chain(ndata_mp);
6536 6538 if (inbound)
6537 6539 (void) ip_recv_attr_free_mblk(iramp);
6538 6540 return (NULL);
6539 6541 }
6540 6542 bzero(&nipp, sizeof (nipp));
6541 6543 (void) ip_find_hdr_v6(ndata_mp, nip6h, B_FALSE, &nipp,
6542 6544 NULL);
6543 6545 nfraghdr = nipp.ipp_fraghdr;
6544 6546 nfirstbyte = ntohs(nfraghdr->ip6f_offlg &
6545 6547 IP6F_OFF_MASK);
6546 6548 nlastbyte = nfirstbyte + ntohs(nip6h->ip6_plen) +
6547 6549 sizeof (ip6_t) - nip6_hdr_length;
6548 6550 }
6549 6551
6550 6552 /* Check for overlapping fragments */
6551 6553 if (firstbyte >= nfirstbyte && firstbyte < nlastbyte) {
6552 6554 /*
6553 6555 * Overlap Check:
6554 6556 * ~~~~--------- # Check if the newly
6555 6557 * ~ ndata_mp| # received fragment
6556 6558 * ~~~~--------- # overlaps with the
6557 6559 * ---------~~~~~~ # current fragment.
6558 6560 * | mp ~
6559 6561 * ---------~~~~~~
6560 6562 */
6561 6563 if (is_v4) {
6562 6564 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6563 6565 firstbyte - nfirstbyte;
6564 6566 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6565 6567 } else {
6566 6568 data = (char *)ip6h +
6567 6569 nip6_hdr_length + firstbyte -
6568 6570 nfirstbyte;
6569 6571 ndata = (char *)nip6h + nip6_hdr_length;
6570 6572 }
6571 6573 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte) -
6572 6574 firstbyte)) {
6573 6575 /* Overlapping data does not match */
6574 6576 (void) fragcache_delentry(i, fep, frag, ipss);
6575 6577 mutex_exit(&frag->itpf_lock);
6576 6578 ip_drop_packet(mp, inbound, NULL,
6577 6579 DROPPER(ipss, ipds_spd_overlap_frag),
6578 6580 &ipss->ipsec_spd_dropper);
6579 6581 if (inbound)
6580 6582 (void) ip_recv_attr_free_mblk(iramp);
6581 6583 return (NULL);
6582 6584 }
6583 6585 /* Part of defense for jolt2.c fragmentation attack */
6584 6586 if (firstbyte >= nfirstbyte && lastbyte <= nlastbyte) {
6585 6587 /*
6586 6588 * Check for identical or subset fragments:
6587 6589 * ---------- ~~~~--------~~~~~
6588 6590 * | nmp | or ~ nmp ~
6589 6591 * ---------- ~~~~--------~~~~~
6590 6592 * ---------- ------
6591 6593 * | mp | | mp |
6592 6594 * ---------- ------
6593 6595 */
6594 6596 mutex_exit(&frag->itpf_lock);
6595 6597 ip_drop_packet(mp, inbound, NULL,
6596 6598 DROPPER(ipss, ipds_spd_evil_frag),
6597 6599 &ipss->ipsec_spd_dropper);
6598 6600 if (inbound)
6599 6601 (void) ip_recv_attr_free_mblk(iramp);
6600 6602 return (NULL);
6601 6603 }
6602 6604
6603 6605 }
6604 6606
6605 6607 /* Correct location for this fragment? */
6606 6608 if (firstbyte <= nfirstbyte) {
6607 6609 /*
6608 6610 * Check if the tail end of the new fragment overlaps
6609 6611 * with the head of the current fragment.
6610 6612 * --------~~~~~~~
6611 6613 * | nmp ~
6612 6614 * --------~~~~~~~
6613 6615 * ~~~~~--------
6614 6616 * ~ mp |
6615 6617 * ~~~~~--------
6616 6618 */
6617 6619 if (lastbyte > nfirstbyte) {
6618 6620 /* Fragments overlap */
6619 6621 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6620 6622 firstbyte - nfirstbyte;
6621 6623 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6622 6624 if (is_v4) {
6623 6625 data = (char *)iph +
6624 6626 IPH_HDR_LENGTH(iph) + firstbyte -
6625 6627 nfirstbyte;
6626 6628 ndata = (char *)niph +
6627 6629 IPH_HDR_LENGTH(niph);
6628 6630 } else {
6629 6631 data = (char *)ip6h +
6630 6632 nip6_hdr_length + firstbyte -
6631 6633 nfirstbyte;
6632 6634 ndata = (char *)nip6h + nip6_hdr_length;
6633 6635 }
6634 6636 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte)
6635 6637 - nfirstbyte)) {
6636 6638 /* Overlap mismatch */
6637 6639 (void) fragcache_delentry(i, fep, frag,
6638 6640 ipss);
6639 6641 mutex_exit(&frag->itpf_lock);
6640 6642 ip_drop_packet(mp, inbound, NULL,
6641 6643 DROPPER(ipss,
6642 6644 ipds_spd_overlap_frag),
6643 6645 &ipss->ipsec_spd_dropper);
6644 6646 if (inbound) {
6645 6647 (void) ip_recv_attr_free_mblk(
6646 6648 iramp);
6647 6649 }
6648 6650 return (NULL);
6649 6651 }
6650 6652 }
6651 6653
6652 6654 /*
6653 6655 * Fragment does not illegally overlap and can now
6654 6656 * be inserted into the chain
6655 6657 */
6656 6658 break;
6657 6659 }
6658 6660
6659 6661 prevmp = nmp;
6660 6662 }
6661 6663 /* Prepend the attributes before we link it in */
6662 6664 if (iramp != NULL) {
6663 6665 ASSERT(iramp->b_cont == NULL);
6664 6666 iramp->b_cont = mp;
6665 6667 mp = iramp;
6666 6668 iramp = NULL;
6667 6669 }
6668 6670 mp->b_next = nmp;
6669 6671
6670 6672 if (prevmp == NULL) {
6671 6673 fep->itpfe_fraglist = mp;
6672 6674 } else {
6673 6675 prevmp->b_next = mp;
6674 6676 }
6675 6677 if (last)
6676 6678 fep->itpfe_last = 1;
6677 6679
6678 6680 /* Part of defense for jolt2.c fragmentation attack */
6679 6681 if (++(fep->itpfe_depth) > IPSEC_MAX_FRAGS) {
6680 6682 (void) fragcache_delentry(i, fep, frag, ipss);
6681 6683 mutex_exit(&frag->itpf_lock);
6682 6684 if (inbound)
6683 6685 mp = ip_recv_attr_free_mblk(mp);
6684 6686
6685 6687 ip_drop_packet(mp, inbound, NULL,
6686 6688 DROPPER(ipss, ipds_spd_max_frags),
6687 6689 &ipss->ipsec_spd_dropper);
6688 6690 return (NULL);
6689 6691 }
6690 6692
6691 6693 /* Check for complete packet */
6692 6694
6693 6695 if (!fep->itpfe_last) {
6694 6696 mutex_exit(&frag->itpf_lock);
6695 6697 #ifdef FRAGCACHE_DEBUG
6696 6698 cmn_err(CE_WARN, "Fragment cached, last not yet seen.\n");
6697 6699 #endif
6698 6700 return (NULL);
6699 6701 }
6700 6702
6701 6703 offset = 0;
6702 6704 for (mp = fep->itpfe_fraglist; mp; mp = mp->b_next) {
6703 6705 mblk_t *data_mp = (inbound ? mp->b_cont : mp);
6704 6706 int hdr_len;
6705 6707
6706 6708 oiph = (ipha_t *)data_mp->b_rptr;
6707 6709 ip6h = NULL;
6708 6710 iph = NULL;
6709 6711
6710 6712 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
6711 6713 hdr_len = ((outer_hdr_len != 0) ?
6712 6714 IPH_HDR_LENGTH(oiph) : 0);
6713 6715 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6714 6716 } else {
6715 6717 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
6716 6718 ASSERT(data_mp->b_cont == NULL);
6717 6719 ip6h = (ip6_t *)data_mp->b_rptr;
6718 6720 (void) ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6719 6721 &ip6_hdr_length, &v6_proto_p);
6720 6722 hdr_len = ((outer_hdr_len != 0) ? ip6_hdr_length : 0);
6721 6723 }
6722 6724
6723 6725 /* Calculate current fragment start/end */
6724 6726 if (is_v4) {
6725 6727 if (iph == NULL) {
6726 6728 /* Was v6 outer */
6727 6729 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6728 6730 }
6729 6731 firstbyte = V4_FRAG_OFFSET(iph);
6730 6732 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6731 6733 IPH_HDR_LENGTH(iph);
6732 6734 } else {
6733 6735 ASSERT(data_mp->b_cont == NULL);
6734 6736 ip6h = (ip6_t *)(data_mp->b_rptr + hdr_len);
6735 6737 if (!ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6736 6738 &ip6_hdr_length, &v6_proto_p)) {
6737 6739 mutex_exit(&frag->itpf_lock);
6738 6740 ip_drop_packet_chain(mp, inbound, NULL,
6739 6741 DROPPER(ipss, ipds_spd_malformed_frag),
6740 6742 &ipss->ipsec_spd_dropper);
6741 6743 return (NULL);
6742 6744 }
6743 6745 v6_proto = *v6_proto_p;
6744 6746 bzero(&ipp, sizeof (ipp));
6745 6747 (void) ip_find_hdr_v6(data_mp, ip6h, B_FALSE, &ipp,
6746 6748 NULL);
6747 6749 fraghdr = ipp.ipp_fraghdr;
6748 6750 firstbyte = ntohs(fraghdr->ip6f_offlg &
6749 6751 IP6F_OFF_MASK);
6750 6752 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6751 6753 sizeof (ip6_t) - ip6_hdr_length;
6752 6754 }
6753 6755
6754 6756 /*
6755 6757 * If this fragment is greater than current offset,
6756 6758 * we have a missing fragment so return NULL
6757 6759 */
6758 6760 if (firstbyte > offset) {
6759 6761 mutex_exit(&frag->itpf_lock);
6760 6762 #ifdef FRAGCACHE_DEBUG
6761 6763 /*
6762 6764 * Note, this can happen when the last frag
6763 6765 * gets sent through because it is smaller
6764 6766 * than the MTU. It is not necessarily an
6765 6767 * error condition.
6766 6768 */
6767 6769 cmn_err(CE_WARN, "Frag greater than offset! : "
6768 6770 "missing fragment: firstbyte = %d, offset = %d, "
6769 6771 "mp = %p\n", firstbyte, offset, mp);
6770 6772 #endif
6771 6773 return (NULL);
6772 6774 }
6773 6775 #ifdef FRAGCACHE_DEBUG
6774 6776 cmn_err(CE_WARN, "Frag offsets : "
6775 6777 "firstbyte = %d, offset = %d, mp = %p\n",
6776 6778 firstbyte, offset, mp);
6777 6779 #endif
6778 6780
6779 6781 /*
6780 6782 * If we are at the last fragment, we have the complete
6781 6783 * packet, so rechain things and return it to caller
6782 6784 * for processing
6783 6785 */
6784 6786
6785 6787 if ((is_v4 && !V4_MORE_FRAGS(iph)) ||
6786 6788 (!is_v4 && !(fraghdr->ip6f_offlg & IP6F_MORE_FRAG))) {
6787 6789 mp = fep->itpfe_fraglist;
6788 6790 fep->itpfe_fraglist = NULL;
6789 6791 (void) fragcache_delentry(i, fep, frag, ipss);
6790 6792 mutex_exit(&frag->itpf_lock);
6791 6793
6792 6794 if ((is_v4 && (firstbyte + ntohs(iph->ipha_length) >
6793 6795 65535)) || (!is_v4 && (firstbyte +
6794 6796 ntohs(ip6h->ip6_plen) > 65535))) {
6795 6797 /* It is an invalid "ping-o-death" packet */
6796 6798 /* Discard it */
6797 6799 ip_drop_packet_chain(mp, inbound, NULL,
6798 6800 DROPPER(ipss, ipds_spd_evil_frag),
6799 6801 &ipss->ipsec_spd_dropper);
6800 6802 return (NULL);
6801 6803 }
6802 6804 #ifdef FRAGCACHE_DEBUG
6803 6805 cmn_err(CE_WARN, "Fragcache returning mp = %p, "
6804 6806 "mp->b_next = %p", mp, mp->b_next);
6805 6807 #endif
6806 6808 /*
6807 6809 * For inbound case, mp has attrmp b_next'd chain
6808 6810 * For outbound case, it is just data mp chain
6809 6811 */
6810 6812 return (mp);
6811 6813 }
6812 6814
6813 6815 /*
6814 6816 * Update new ending offset if this
6815 6817 * fragment extends the packet
6816 6818 */
6817 6819 if (offset < lastbyte)
6818 6820 offset = lastbyte;
6819 6821 }
6820 6822
6821 6823 mutex_exit(&frag->itpf_lock);
6822 6824
6823 6825 /* Didn't find last fragment, so return NULL */
6824 6826 return (NULL);
6825 6827 }
6826 6828
6827 6829 static void
6828 6830 ipsec_fragcache_clean(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6829 6831 {
6830 6832 ipsec_fragcache_entry_t *fep;
6831 6833 int i;
6832 6834 ipsec_fragcache_entry_t *earlyfep = NULL;
6833 6835 time_t itpf_time;
6834 6836 int earlyexp;
6835 6837 int earlyi = 0;
6836 6838
6837 6839 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6838 6840
6839 6841 itpf_time = gethrestime_sec();
6840 6842 earlyexp = itpf_time + 10000;
6841 6843
6842 6844 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6843 6845 fep = (frag->itpf_ptr)[i];
6844 6846 while (fep) {
6845 6847 if (fep->itpfe_exp < itpf_time) {
6846 6848 /* found */
6847 6849 fep = fragcache_delentry(i, fep, frag, ipss);
6848 6850 } else {
6849 6851 if (fep->itpfe_exp < earlyexp) {
6850 6852 earlyfep = fep;
6851 6853 earlyexp = fep->itpfe_exp;
6852 6854 earlyi = i;
6853 6855 }
6854 6856 fep = fep->itpfe_next;
6855 6857 }
6856 6858 }
6857 6859 }
6858 6860
6859 6861 frag->itpf_expire_hint = earlyexp;
6860 6862
6861 6863 /* if (!found) */
6862 6864 if (frag->itpf_freelist == NULL)
6863 6865 (void) fragcache_delentry(earlyi, earlyfep, frag, ipss);
6864 6866 }
6865 6867
6866 6868 static ipsec_fragcache_entry_t *
6867 6869 fragcache_delentry(int slot, ipsec_fragcache_entry_t *fep,
6868 6870 ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6869 6871 {
6870 6872 ipsec_fragcache_entry_t *targp;
6871 6873 ipsec_fragcache_entry_t *nextp = fep->itpfe_next;
6872 6874
6873 6875 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6874 6876
6875 6877 /* Free up any fragment list still in cache entry */
6876 6878 if (fep->itpfe_fraglist != NULL) {
6877 6879 ip_drop_packet_chain(fep->itpfe_fraglist,
6878 6880 ip_recv_attr_is_mblk(fep->itpfe_fraglist), NULL,
6879 6881 DROPPER(ipss, ipds_spd_expired_frags),
6880 6882 &ipss->ipsec_spd_dropper);
6881 6883 }
6882 6884 fep->itpfe_fraglist = NULL;
6883 6885
6884 6886 targp = (frag->itpf_ptr)[slot];
6885 6887 ASSERT(targp != 0);
6886 6888
6887 6889 if (targp == fep) {
6888 6890 /* unlink from head of hash chain */
6889 6891 (frag->itpf_ptr)[slot] = nextp;
6890 6892 /* link into free list */
6891 6893 fep->itpfe_next = frag->itpf_freelist;
6892 6894 frag->itpf_freelist = fep;
6893 6895 return (nextp);
6894 6896 }
6895 6897
6896 6898 /* maybe should use double linked list to make update faster */
6897 6899 /* must be past front of chain */
6898 6900 while (targp) {
6899 6901 if (targp->itpfe_next == fep) {
6900 6902 /* unlink from hash chain */
6901 6903 targp->itpfe_next = nextp;
6902 6904 /* link into free list */
6903 6905 fep->itpfe_next = frag->itpf_freelist;
6904 6906 frag->itpf_freelist = fep;
6905 6907 return (nextp);
6906 6908 }
6907 6909 targp = targp->itpfe_next;
6908 6910 ASSERT(targp != 0);
6909 6911 }
6910 6912 /* NOTREACHED */
6911 6913 return (NULL);
6912 6914 }
|
↓ open down ↓ |
2210 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX