Print this page
8560 Reference leak on ipsec_action_t
Reviewed by: Norm Jacobs <naj@snapcon.com>
Reviewed by: Andy Fiddaman <omnios@citrus-it.net>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ip/spd.c
+++ new/usr/src/uts/common/inet/ip/spd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 - * Copyright (c) 2017, Joyent, Inc.
26 + * Copyright (c) 2018, Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * IPsec Security Policy Database.
31 31 *
32 32 * This module maintains the SPD and provides routines used by ip and ip6
33 33 * to apply IPsec policy to inbound and outbound datagrams.
34 34 */
35 35
36 36 #include <sys/types.h>
37 37 #include <sys/stream.h>
38 38 #include <sys/stropts.h>
39 39 #include <sys/sysmacros.h>
40 40 #include <sys/strsubr.h>
41 41 #include <sys/strsun.h>
42 42 #include <sys/strlog.h>
43 43 #include <sys/strsun.h>
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/zone.h>
46 46
47 47 #include <sys/systm.h>
48 48 #include <sys/param.h>
49 49 #include <sys/kmem.h>
50 50 #include <sys/ddi.h>
51 51
52 52 #include <sys/crypto/api.h>
53 53
54 54 #include <inet/common.h>
55 55 #include <inet/mi.h>
56 56
57 57 #include <netinet/ip6.h>
58 58 #include <netinet/icmp6.h>
59 59 #include <netinet/udp.h>
60 60
61 61 #include <inet/ip.h>
62 62 #include <inet/ip6.h>
63 63
64 64 #include <net/pfkeyv2.h>
65 65 #include <net/pfpolicy.h>
66 66 #include <inet/sadb.h>
67 67 #include <inet/ipsec_impl.h>
68 68
69 69 #include <inet/ip_impl.h> /* For IP_MOD_ID */
70 70
71 71 #include <inet/ipsecah.h>
72 72 #include <inet/ipsecesp.h>
73 73 #include <inet/ipdrop.h>
74 74 #include <inet/ipclassifier.h>
75 75 #include <inet/iptun.h>
76 76 #include <inet/iptun/iptun_impl.h>
77 77
78 78 static void ipsec_update_present_flags(ipsec_stack_t *);
79 79 static ipsec_act_t *ipsec_act_wildcard_expand(ipsec_act_t *, uint_t *,
80 80 netstack_t *);
81 81 static mblk_t *ipsec_check_ipsecin_policy(mblk_t *, ipsec_policy_t *,
82 82 ipha_t *, ip6_t *, uint64_t, ip_recv_attr_t *, netstack_t *);
83 83 static void ipsec_action_free_table(ipsec_action_t *);
84 84 static void ipsec_action_reclaim(void *);
85 85 static void ipsec_action_reclaim_stack(ipsec_stack_t *);
86 86 static void ipsid_init(netstack_t *);
87 87 static void ipsid_fini(netstack_t *);
88 88
89 89 /* sel_flags values for ipsec_init_inbound_sel(). */
90 90 #define SEL_NONE 0x0000
91 91 #define SEL_PORT_POLICY 0x0001
92 92 #define SEL_IS_ICMP 0x0002
93 93 #define SEL_TUNNEL_MODE 0x0004
94 94 #define SEL_POST_FRAG 0x0008
95 95
96 96 /* Return values for ipsec_init_inbound_sel(). */
97 97 typedef enum { SELRET_NOMEM, SELRET_BADPKT, SELRET_SUCCESS, SELRET_TUNFRAG}
98 98 selret_t;
99 99
100 100 static selret_t ipsec_init_inbound_sel(ipsec_selector_t *, mblk_t *,
101 101 ipha_t *, ip6_t *, uint8_t);
102 102
103 103 static boolean_t ipsec_check_ipsecin_action(ip_recv_attr_t *, mblk_t *,
104 104 struct ipsec_action_s *, ipha_t *ipha, ip6_t *ip6h, const char **,
105 105 kstat_named_t **, netstack_t *);
106 106 static void ipsec_unregister_prov_update(void);
107 107 static void ipsec_prov_update_callback_stack(uint32_t, void *, netstack_t *);
108 108 static boolean_t ipsec_compare_action(ipsec_policy_t *, ipsec_policy_t *);
109 109 static uint32_t selector_hash(ipsec_selector_t *, ipsec_policy_root_t *);
110 110 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
111 111 static void ipsec_kstat_destroy(ipsec_stack_t *);
112 112 static int ipsec_free_tables(ipsec_stack_t *);
113 113 static int tunnel_compare(const void *, const void *);
114 114 static void ipsec_freemsg_chain(mblk_t *);
115 115 static void ip_drop_packet_chain(mblk_t *, boolean_t, ill_t *,
116 116 struct kstat_named *, ipdropper_t *);
117 117 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
118 118 static void ipsec_kstat_destroy(ipsec_stack_t *);
119 119 static int ipsec_free_tables(ipsec_stack_t *);
120 120 static int tunnel_compare(const void *, const void *);
121 121 static void ipsec_freemsg_chain(mblk_t *);
122 122
123 123 /*
124 124 * Selector hash table is statically sized at module load time.
125 125 * we default to 251 buckets, which is the largest prime number under 255
126 126 */
127 127
128 128 #define IPSEC_SPDHASH_DEFAULT 251
129 129
130 130 /* SPD hash-size tunable per tunnel. */
131 131 #define TUN_SPDHASH_DEFAULT 5
132 132
133 133 uint32_t ipsec_spd_hashsize;
134 134 uint32_t tun_spd_hashsize;
135 135
136 136 #define IPSEC_SEL_NOHASH ((uint32_t)(~0))
137 137
138 138 /*
139 139 * Handle global across all stack instances
140 140 */
141 141 static crypto_notify_handle_t prov_update_handle = NULL;
142 142
143 143 static kmem_cache_t *ipsec_action_cache;
144 144 static kmem_cache_t *ipsec_sel_cache;
145 145 static kmem_cache_t *ipsec_pol_cache;
146 146
147 147 /* Frag cache prototypes */
148 148 static void ipsec_fragcache_clean(ipsec_fragcache_t *, ipsec_stack_t *);
149 149 static ipsec_fragcache_entry_t *fragcache_delentry(int,
150 150 ipsec_fragcache_entry_t *, ipsec_fragcache_t *, ipsec_stack_t *);
151 151 boolean_t ipsec_fragcache_init(ipsec_fragcache_t *);
152 152 void ipsec_fragcache_uninit(ipsec_fragcache_t *, ipsec_stack_t *ipss);
153 153 mblk_t *ipsec_fragcache_add(ipsec_fragcache_t *, mblk_t *, mblk_t *,
154 154 int, ipsec_stack_t *);
155 155
156 156 int ipsec_hdr_pullup_needed = 0;
157 157 int ipsec_weird_null_inbound_policy = 0;
158 158
159 159 #define ALGBITS_ROUND_DOWN(x, align) (((x)/(align))*(align))
160 160 #define ALGBITS_ROUND_UP(x, align) ALGBITS_ROUND_DOWN((x)+(align)-1, align)
161 161
162 162 /*
163 163 * Inbound traffic should have matching identities for both SA's.
164 164 */
165 165
166 166 #define SA_IDS_MATCH(sa1, sa2) \
167 167 (((sa1) == NULL) || ((sa2) == NULL) || \
168 168 (((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) && \
169 169 (((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid))))
170 170
171 171 /*
172 172 * IPv6 Fragments
173 173 */
174 174 #define IS_V6_FRAGMENT(ipp) (ipp.ipp_fields & IPPF_FRAGHDR)
175 175
176 176 /*
177 177 * Policy failure messages.
178 178 */
179 179 static char *ipsec_policy_failure_msgs[] = {
180 180
181 181 /* IPSEC_POLICY_NOT_NEEDED */
182 182 "%s: Dropping the datagram because the incoming packet "
183 183 "is %s, but the recipient expects clear; Source %s, "
184 184 "Destination %s.\n",
185 185
186 186 /* IPSEC_POLICY_MISMATCH */
187 187 "%s: Policy Failure for the incoming packet (%s); Source %s, "
188 188 "Destination %s.\n",
189 189
190 190 /* IPSEC_POLICY_AUTH_NOT_NEEDED */
191 191 "%s: Authentication present while not expected in the "
192 192 "incoming %s packet; Source %s, Destination %s.\n",
193 193
194 194 /* IPSEC_POLICY_ENCR_NOT_NEEDED */
195 195 "%s: Encryption present while not expected in the "
196 196 "incoming %s packet; Source %s, Destination %s.\n",
197 197
198 198 /* IPSEC_POLICY_SE_NOT_NEEDED */
199 199 "%s: Self-Encapsulation present while not expected in the "
200 200 "incoming %s packet; Source %s, Destination %s.\n",
201 201 };
202 202
203 203 /*
204 204 * General overviews:
205 205 *
206 206 * Locking:
207 207 *
208 208 * All of the system policy structures are protected by a single
209 209 * rwlock. These structures are threaded in a
210 210 * fairly complex fashion and are not expected to change on a
211 211 * regular basis, so this should not cause scaling/contention
212 212 * problems. As a result, policy checks should (hopefully) be MT-hot.
213 213 *
214 214 * Allocation policy:
215 215 *
216 216 * We use custom kmem cache types for the various
217 217 * bits & pieces of the policy data structures. All allocations
218 218 * use KM_NOSLEEP instead of KM_SLEEP for policy allocation. The
219 219 * policy table is of potentially unbounded size, so we don't
220 220 * want to provide a way to hog all system memory with policy
221 221 * entries..
222 222 */
223 223
224 224 /* Convenient functions for freeing or dropping a b_next linked mblk chain */
225 225
226 226 /* Free all messages in an mblk chain */
227 227 static void
228 228 ipsec_freemsg_chain(mblk_t *mp)
229 229 {
230 230 mblk_t *mpnext;
231 231 while (mp != NULL) {
232 232 ASSERT(mp->b_prev == NULL);
233 233 mpnext = mp->b_next;
234 234 mp->b_next = NULL;
235 235 freemsg(mp);
236 236 mp = mpnext;
237 237 }
238 238 }
239 239
240 240 /*
241 241 * ip_drop all messages in an mblk chain
242 242 * Can handle a b_next chain of ip_recv_attr_t mblks, or just a b_next chain
243 243 * of data.
244 244 */
245 245 static void
246 246 ip_drop_packet_chain(mblk_t *mp, boolean_t inbound, ill_t *ill,
247 247 struct kstat_named *counter, ipdropper_t *who_called)
248 248 {
249 249 mblk_t *mpnext;
250 250 while (mp != NULL) {
251 251 ASSERT(mp->b_prev == NULL);
252 252 mpnext = mp->b_next;
253 253 mp->b_next = NULL;
254 254 if (ip_recv_attr_is_mblk(mp))
255 255 mp = ip_recv_attr_free_mblk(mp);
256 256 ip_drop_packet(mp, inbound, ill, counter, who_called);
257 257 mp = mpnext;
258 258 }
259 259 }
260 260
261 261 /*
262 262 * AVL tree comparison function.
263 263 * the in-kernel avl assumes unique keys for all objects.
264 264 * Since sometimes policy will duplicate rules, we may insert
265 265 * multiple rules with the same rule id, so we need a tie-breaker.
266 266 */
267 267 static int
268 268 ipsec_policy_cmpbyid(const void *a, const void *b)
269 269 {
270 270 const ipsec_policy_t *ipa, *ipb;
271 271 uint64_t idxa, idxb;
272 272
273 273 ipa = (const ipsec_policy_t *)a;
274 274 ipb = (const ipsec_policy_t *)b;
275 275 idxa = ipa->ipsp_index;
276 276 idxb = ipb->ipsp_index;
277 277
278 278 if (idxa < idxb)
279 279 return (-1);
280 280 if (idxa > idxb)
281 281 return (1);
282 282 /*
283 283 * Tie-breaker #1: All installed policy rules have a non-NULL
284 284 * ipsl_sel (selector set), so an entry with a NULL ipsp_sel is not
285 285 * actually in-tree but rather a template node being used in
286 286 * an avl_find query; see ipsec_policy_delete(). This gives us
287 287 * a placeholder in the ordering just before the first entry with
288 288 * a key >= the one we're looking for, so we can walk forward from
289 289 * that point to get the remaining entries with the same id.
290 290 */
291 291 if ((ipa->ipsp_sel == NULL) && (ipb->ipsp_sel != NULL))
292 292 return (-1);
293 293 if ((ipb->ipsp_sel == NULL) && (ipa->ipsp_sel != NULL))
294 294 return (1);
295 295 /*
296 296 * At most one of the arguments to the comparison should have a
297 297 * NULL selector pointer; if not, the tree is broken.
298 298 */
299 299 ASSERT(ipa->ipsp_sel != NULL);
300 300 ASSERT(ipb->ipsp_sel != NULL);
301 301 /*
302 302 * Tie-breaker #2: use the virtual address of the policy node
303 303 * to arbitrarily break ties. Since we use the new tree node in
304 304 * the avl_find() in ipsec_insert_always, the new node will be
305 305 * inserted into the tree in the right place in the sequence.
306 306 */
307 307 if (ipa < ipb)
308 308 return (-1);
309 309 if (ipa > ipb)
310 310 return (1);
311 311 return (0);
312 312 }
313 313
314 314 /*
315 315 * Free what ipsec_alloc_table allocated.
316 316 */
317 317 void
318 318 ipsec_polhead_free_table(ipsec_policy_head_t *iph)
319 319 {
320 320 int dir;
321 321 int i;
322 322
323 323 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
324 324 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
325 325
326 326 if (ipr->ipr_hash == NULL)
327 327 continue;
328 328
329 329 for (i = 0; i < ipr->ipr_nchains; i++) {
330 330 ASSERT(ipr->ipr_hash[i].hash_head == NULL);
331 331 }
332 332 kmem_free(ipr->ipr_hash, ipr->ipr_nchains *
333 333 sizeof (ipsec_policy_hash_t));
334 334 ipr->ipr_hash = NULL;
335 335 }
336 336 }
337 337
338 338 void
339 339 ipsec_polhead_destroy(ipsec_policy_head_t *iph)
340 340 {
341 341 int dir;
342 342
343 343 avl_destroy(&iph->iph_rulebyid);
344 344 rw_destroy(&iph->iph_lock);
345 345
346 346 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
347 347 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
348 348 int chain;
349 349
350 350 for (chain = 0; chain < ipr->ipr_nchains; chain++)
351 351 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
352 352
353 353 }
354 354 ipsec_polhead_free_table(iph);
355 355 }
356 356
357 357 /*
358 358 * Free the IPsec stack instance.
359 359 */
360 360 /* ARGSUSED */
361 361 static void
362 362 ipsec_stack_fini(netstackid_t stackid, void *arg)
363 363 {
364 364 ipsec_stack_t *ipss = (ipsec_stack_t *)arg;
365 365 void *cookie;
366 366 ipsec_tun_pol_t *node;
367 367 netstack_t *ns = ipss->ipsec_netstack;
368 368 int i;
369 369 ipsec_algtype_t algtype;
370 370
371 371 ipsec_loader_destroy(ipss);
372 372
373 373 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
374 374 /*
375 375 * It's possible we can just ASSERT() the tree is empty. After all,
376 376 * we aren't called until IP is ready to unload (and presumably all
377 377 * tunnels have been unplumbed). But we'll play it safe for now, the
378 378 * loop will just exit immediately if it's empty.
379 379 */
380 380 cookie = NULL;
381 381 while ((node = (ipsec_tun_pol_t *)
382 382 avl_destroy_nodes(&ipss->ipsec_tunnel_policies,
383 383 &cookie)) != NULL) {
384 384 ITP_REFRELE(node, ns);
385 385 }
386 386 avl_destroy(&ipss->ipsec_tunnel_policies);
387 387 rw_exit(&ipss->ipsec_tunnel_policy_lock);
388 388 rw_destroy(&ipss->ipsec_tunnel_policy_lock);
389 389
390 390 ipsec_config_flush(ns);
391 391
392 392 ipsec_kstat_destroy(ipss);
393 393
394 394 ip_drop_unregister(&ipss->ipsec_dropper);
395 395
396 396 ip_drop_unregister(&ipss->ipsec_spd_dropper);
397 397 ip_drop_destroy(ipss);
398 398 /*
399 399 * Globals start with ref == 1 to prevent IPPH_REFRELE() from
400 400 * attempting to free them, hence they should have 1 now.
401 401 */
402 402 ipsec_polhead_destroy(&ipss->ipsec_system_policy);
403 403 ASSERT(ipss->ipsec_system_policy.iph_refs == 1);
404 404 ipsec_polhead_destroy(&ipss->ipsec_inactive_policy);
405 405 ASSERT(ipss->ipsec_inactive_policy.iph_refs == 1);
406 406
407 407 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
408 408 ipsec_action_free_table(ipss->ipsec_action_hash[i].hash_head);
409 409 ipss->ipsec_action_hash[i].hash_head = NULL;
410 410 mutex_destroy(&(ipss->ipsec_action_hash[i].hash_lock));
411 411 }
412 412
413 413 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
414 414 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
415 415 mutex_destroy(&(ipss->ipsec_sel_hash[i].hash_lock));
416 416 }
417 417
418 418 rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
419 419 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype ++) {
420 420 for (i = 0; i < IPSEC_MAX_ALGS; i++) {
421 421 if (ipss->ipsec_alglists[algtype][i] != NULL)
422 422 ipsec_alg_unreg(algtype, i, ns);
423 423 }
424 424 }
425 425 rw_exit(&ipss->ipsec_alg_lock);
426 426 rw_destroy(&ipss->ipsec_alg_lock);
427 427
428 428 ipsid_gc(ns);
429 429 ipsid_fini(ns);
430 430
431 431 (void) ipsec_free_tables(ipss);
432 432 kmem_free(ipss, sizeof (*ipss));
433 433 }
434 434
435 435 void
436 436 ipsec_policy_g_destroy(void)
437 437 {
438 438 kmem_cache_destroy(ipsec_action_cache);
439 439 kmem_cache_destroy(ipsec_sel_cache);
440 440 kmem_cache_destroy(ipsec_pol_cache);
441 441
442 442 ipsec_unregister_prov_update();
443 443
444 444 netstack_unregister(NS_IPSEC);
445 445 }
446 446
447 447
448 448 /*
449 449 * Free what ipsec_alloc_tables allocated.
450 450 * Called when table allocation fails to free the table.
451 451 */
452 452 static int
453 453 ipsec_free_tables(ipsec_stack_t *ipss)
454 454 {
455 455 int i;
456 456
457 457 if (ipss->ipsec_sel_hash != NULL) {
458 458 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
459 459 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
460 460 }
461 461 kmem_free(ipss->ipsec_sel_hash, ipss->ipsec_spd_hashsize *
462 462 sizeof (*ipss->ipsec_sel_hash));
463 463 ipss->ipsec_sel_hash = NULL;
464 464 ipss->ipsec_spd_hashsize = 0;
465 465 }
466 466 ipsec_polhead_free_table(&ipss->ipsec_system_policy);
467 467 ipsec_polhead_free_table(&ipss->ipsec_inactive_policy);
468 468
469 469 return (ENOMEM);
470 470 }
471 471
472 472 /*
473 473 * Attempt to allocate the tables in a single policy head.
474 474 * Return nonzero on failure after cleaning up any work in progress.
475 475 */
476 476 int
477 477 ipsec_alloc_table(ipsec_policy_head_t *iph, int nchains, int kmflag,
478 478 boolean_t global_cleanup, netstack_t *ns)
479 479 {
480 480 int dir;
481 481
482 482 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
483 483 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
484 484
485 485 ipr->ipr_nchains = nchains;
486 486 ipr->ipr_hash = kmem_zalloc(nchains *
487 487 sizeof (ipsec_policy_hash_t), kmflag);
488 488 if (ipr->ipr_hash == NULL)
489 489 return (global_cleanup ?
490 490 ipsec_free_tables(ns->netstack_ipsec) :
491 491 ENOMEM);
492 492 }
493 493 return (0);
494 494 }
495 495
496 496 /*
497 497 * Attempt to allocate the various tables. Return nonzero on failure
498 498 * after cleaning up any work in progress.
499 499 */
500 500 static int
501 501 ipsec_alloc_tables(int kmflag, netstack_t *ns)
502 502 {
503 503 int error;
504 504 ipsec_stack_t *ipss = ns->netstack_ipsec;
505 505
506 506 error = ipsec_alloc_table(&ipss->ipsec_system_policy,
507 507 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
508 508 if (error != 0)
509 509 return (error);
510 510
511 511 error = ipsec_alloc_table(&ipss->ipsec_inactive_policy,
512 512 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
513 513 if (error != 0)
514 514 return (error);
515 515
516 516 ipss->ipsec_sel_hash = kmem_zalloc(ipss->ipsec_spd_hashsize *
517 517 sizeof (*ipss->ipsec_sel_hash), kmflag);
518 518
519 519 if (ipss->ipsec_sel_hash == NULL)
520 520 return (ipsec_free_tables(ipss));
521 521
522 522 return (0);
523 523 }
524 524
525 525 /*
526 526 * After table allocation, initialize a policy head.
527 527 */
528 528 void
529 529 ipsec_polhead_init(ipsec_policy_head_t *iph, int nchains)
530 530 {
531 531 int dir, chain;
532 532
533 533 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
534 534 avl_create(&iph->iph_rulebyid, ipsec_policy_cmpbyid,
535 535 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
536 536
537 537 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
538 538 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
539 539 ipr->ipr_nchains = nchains;
540 540
541 541 for (chain = 0; chain < nchains; chain++) {
542 542 mutex_init(&(ipr->ipr_hash[chain].hash_lock),
543 543 NULL, MUTEX_DEFAULT, NULL);
544 544 }
545 545 }
546 546 }
547 547
548 548 static boolean_t
549 549 ipsec_kstat_init(ipsec_stack_t *ipss)
550 550 {
551 551 ipss->ipsec_ksp = kstat_create_netstack("ip", 0, "ipsec_stat", "net",
552 552 KSTAT_TYPE_NAMED, sizeof (ipsec_kstats_t) / sizeof (kstat_named_t),
553 553 KSTAT_FLAG_PERSISTENT, ipss->ipsec_netstack->netstack_stackid);
554 554
555 555 if (ipss->ipsec_ksp == NULL || ipss->ipsec_ksp->ks_data == NULL)
556 556 return (B_FALSE);
557 557
558 558 ipss->ipsec_kstats = ipss->ipsec_ksp->ks_data;
559 559
560 560 #define KI(x) kstat_named_init(&ipss->ipsec_kstats->x, #x, KSTAT_DATA_UINT64)
561 561 KI(esp_stat_in_requests);
562 562 KI(esp_stat_in_discards);
563 563 KI(esp_stat_lookup_failure);
564 564 KI(ah_stat_in_requests);
565 565 KI(ah_stat_in_discards);
566 566 KI(ah_stat_lookup_failure);
567 567 KI(sadb_acquire_maxpackets);
568 568 KI(sadb_acquire_qhiwater);
569 569 #undef KI
570 570
571 571 kstat_install(ipss->ipsec_ksp);
572 572 return (B_TRUE);
573 573 }
574 574
575 575 static void
576 576 ipsec_kstat_destroy(ipsec_stack_t *ipss)
577 577 {
578 578 kstat_delete_netstack(ipss->ipsec_ksp,
579 579 ipss->ipsec_netstack->netstack_stackid);
580 580 ipss->ipsec_kstats = NULL;
581 581
582 582 }
583 583
584 584 /*
585 585 * Initialize the IPsec stack instance.
586 586 */
587 587 /* ARGSUSED */
588 588 static void *
589 589 ipsec_stack_init(netstackid_t stackid, netstack_t *ns)
590 590 {
591 591 ipsec_stack_t *ipss;
592 592 int i;
593 593
594 594 ipss = (ipsec_stack_t *)kmem_zalloc(sizeof (*ipss), KM_SLEEP);
595 595 ipss->ipsec_netstack = ns;
596 596
597 597 /*
598 598 * FIXME: netstack_ipsec is used by some of the routines we call
599 599 * below, but it isn't set until this routine returns.
600 600 * Either we introduce optional xxx_stack_alloc() functions
601 601 * that will be called by the netstack framework before xxx_stack_init,
602 602 * or we switch spd.c and sadb.c to operate on ipsec_stack_t
603 603 * (latter has some include file order issues for sadb.h, but makes
604 604 * sense if we merge some of the ipsec related stack_t's together.
605 605 */
606 606 ns->netstack_ipsec = ipss;
607 607
608 608 /*
609 609 * Make two attempts to allocate policy hash tables; try it at
610 610 * the "preferred" size (may be set in /etc/system) first,
611 611 * then fall back to the default size.
612 612 */
613 613 ipss->ipsec_spd_hashsize = (ipsec_spd_hashsize == 0) ?
614 614 IPSEC_SPDHASH_DEFAULT : ipsec_spd_hashsize;
615 615
616 616 if (ipsec_alloc_tables(KM_NOSLEEP, ns) != 0) {
617 617 cmn_err(CE_WARN,
618 618 "Unable to allocate %d entry IPsec policy hash table",
619 619 ipss->ipsec_spd_hashsize);
620 620 ipss->ipsec_spd_hashsize = IPSEC_SPDHASH_DEFAULT;
621 621 cmn_err(CE_WARN, "Falling back to %d entries",
622 622 ipss->ipsec_spd_hashsize);
623 623 (void) ipsec_alloc_tables(KM_SLEEP, ns);
624 624 }
625 625
626 626 /* Just set a default for tunnels. */
627 627 ipss->ipsec_tun_spd_hashsize = (tun_spd_hashsize == 0) ?
628 628 TUN_SPDHASH_DEFAULT : tun_spd_hashsize;
629 629
630 630 ipsid_init(ns);
631 631 /*
632 632 * Globals need ref == 1 to prevent IPPH_REFRELE() from attempting
633 633 * to free them.
634 634 */
635 635 ipss->ipsec_system_policy.iph_refs = 1;
636 636 ipss->ipsec_inactive_policy.iph_refs = 1;
637 637 ipsec_polhead_init(&ipss->ipsec_system_policy,
638 638 ipss->ipsec_spd_hashsize);
639 639 ipsec_polhead_init(&ipss->ipsec_inactive_policy,
640 640 ipss->ipsec_spd_hashsize);
641 641 rw_init(&ipss->ipsec_tunnel_policy_lock, NULL, RW_DEFAULT, NULL);
642 642 avl_create(&ipss->ipsec_tunnel_policies, tunnel_compare,
643 643 sizeof (ipsec_tun_pol_t), 0);
644 644
645 645 ipss->ipsec_next_policy_index = 1;
646 646
647 647 rw_init(&ipss->ipsec_system_policy.iph_lock, NULL, RW_DEFAULT, NULL);
648 648 rw_init(&ipss->ipsec_inactive_policy.iph_lock, NULL, RW_DEFAULT, NULL);
649 649
650 650 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++)
651 651 mutex_init(&(ipss->ipsec_action_hash[i].hash_lock),
652 652 NULL, MUTEX_DEFAULT, NULL);
653 653
654 654 for (i = 0; i < ipss->ipsec_spd_hashsize; i++)
655 655 mutex_init(&(ipss->ipsec_sel_hash[i].hash_lock),
656 656 NULL, MUTEX_DEFAULT, NULL);
657 657
658 658 rw_init(&ipss->ipsec_alg_lock, NULL, RW_DEFAULT, NULL);
659 659 for (i = 0; i < IPSEC_NALGTYPES; i++) {
660 660 ipss->ipsec_nalgs[i] = 0;
661 661 }
662 662
663 663 ip_drop_init(ipss);
664 664 ip_drop_register(&ipss->ipsec_spd_dropper, "IPsec SPD");
665 665
666 666 /* IP's IPsec code calls the packet dropper */
667 667 ip_drop_register(&ipss->ipsec_dropper, "IP IPsec processing");
668 668
669 669 (void) ipsec_kstat_init(ipss);
670 670
671 671 ipsec_loader_init(ipss);
672 672 ipsec_loader_start(ipss);
673 673
674 674 return (ipss);
675 675 }
676 676
677 677 /* Global across all stack instances */
678 678 void
679 679 ipsec_policy_g_init(void)
680 680 {
681 681 ipsec_action_cache = kmem_cache_create("ipsec_actions",
682 682 sizeof (ipsec_action_t), _POINTER_ALIGNMENT, NULL, NULL,
683 683 ipsec_action_reclaim, NULL, NULL, 0);
684 684 ipsec_sel_cache = kmem_cache_create("ipsec_selectors",
685 685 sizeof (ipsec_sel_t), _POINTER_ALIGNMENT, NULL, NULL,
686 686 NULL, NULL, NULL, 0);
687 687 ipsec_pol_cache = kmem_cache_create("ipsec_policy",
688 688 sizeof (ipsec_policy_t), _POINTER_ALIGNMENT, NULL, NULL,
689 689 NULL, NULL, NULL, 0);
690 690
691 691 /*
692 692 * We want to be informed each time a stack is created or
693 693 * destroyed in the kernel, so we can maintain the
694 694 * set of ipsec_stack_t's.
695 695 */
696 696 netstack_register(NS_IPSEC, ipsec_stack_init, NULL, ipsec_stack_fini);
697 697 }
698 698
699 699 /*
700 700 * Sort algorithm lists.
701 701 *
702 702 * I may need to split this based on
703 703 * authentication/encryption, and I may wish to have an administrator
704 704 * configure this list. Hold on to some NDD variables...
705 705 *
706 706 * XXX For now, sort on minimum key size (GAG!). While minimum key size is
707 707 * not the ideal metric, it's the only quantifiable measure available.
708 708 * We need a better metric for sorting algorithms by preference.
709 709 */
710 710 static void
711 711 alg_insert_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
712 712 {
713 713 ipsec_stack_t *ipss = ns->netstack_ipsec;
714 714 ipsec_alginfo_t *ai = ipss->ipsec_alglists[at][algid];
715 715 uint8_t holder, swap;
716 716 uint_t i;
717 717 uint_t count = ipss->ipsec_nalgs[at];
718 718 ASSERT(ai != NULL);
719 719 ASSERT(algid == ai->alg_id);
720 720
721 721 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
722 722
723 723 holder = algid;
724 724
725 725 for (i = 0; i < count - 1; i++) {
726 726 ipsec_alginfo_t *alt;
727 727
728 728 alt = ipss->ipsec_alglists[at][ipss->ipsec_sortlist[at][i]];
729 729 /*
730 730 * If you want to give precedence to newly added algs,
731 731 * add the = in the > comparison.
732 732 */
733 733 if ((holder != algid) || (ai->alg_minbits > alt->alg_minbits)) {
734 734 /* Swap sortlist[i] and holder. */
735 735 swap = ipss->ipsec_sortlist[at][i];
736 736 ipss->ipsec_sortlist[at][i] = holder;
737 737 holder = swap;
738 738 ai = alt;
739 739 } /* Else just continue. */
740 740 }
741 741
742 742 /* Store holder in last slot. */
743 743 ipss->ipsec_sortlist[at][i] = holder;
744 744 }
745 745
746 746 /*
747 747 * Remove an algorithm from a sorted algorithm list.
748 748 * This should be considerably easier, even with complex sorting.
749 749 */
750 750 static void
751 751 alg_remove_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
752 752 {
753 753 boolean_t copyback = B_FALSE;
754 754 int i;
755 755 ipsec_stack_t *ipss = ns->netstack_ipsec;
756 756 int newcount = ipss->ipsec_nalgs[at];
757 757
758 758 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
759 759
760 760 for (i = 0; i <= newcount; i++) {
761 761 if (copyback) {
762 762 ipss->ipsec_sortlist[at][i-1] =
763 763 ipss->ipsec_sortlist[at][i];
764 764 } else if (ipss->ipsec_sortlist[at][i] == algid) {
765 765 copyback = B_TRUE;
766 766 }
767 767 }
768 768 }
769 769
770 770 /*
771 771 * Add the specified algorithm to the algorithm tables.
772 772 * Must be called while holding the algorithm table writer lock.
773 773 */
774 774 void
775 775 ipsec_alg_reg(ipsec_algtype_t algtype, ipsec_alginfo_t *alg, netstack_t *ns)
776 776 {
777 777 ipsec_stack_t *ipss = ns->netstack_ipsec;
778 778
779 779 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
780 780
781 781 ASSERT(ipss->ipsec_alglists[algtype][alg->alg_id] == NULL);
782 782 ipsec_alg_fix_min_max(alg, algtype, ns);
783 783 ipss->ipsec_alglists[algtype][alg->alg_id] = alg;
784 784
785 785 ipss->ipsec_nalgs[algtype]++;
786 786 alg_insert_sortlist(algtype, alg->alg_id, ns);
787 787 }
788 788
789 789 /*
790 790 * Remove the specified algorithm from the algorithm tables.
791 791 * Must be called while holding the algorithm table writer lock.
792 792 */
793 793 void
794 794 ipsec_alg_unreg(ipsec_algtype_t algtype, uint8_t algid, netstack_t *ns)
795 795 {
796 796 ipsec_stack_t *ipss = ns->netstack_ipsec;
797 797
798 798 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
799 799
800 800 ASSERT(ipss->ipsec_alglists[algtype][algid] != NULL);
801 801 ipsec_alg_free(ipss->ipsec_alglists[algtype][algid]);
802 802 ipss->ipsec_alglists[algtype][algid] = NULL;
803 803
804 804 ipss->ipsec_nalgs[algtype]--;
805 805 alg_remove_sortlist(algtype, algid, ns);
806 806 }
807 807
808 808 /*
809 809 * Hooks for spdsock to get a grip on system policy.
810 810 */
811 811
812 812 ipsec_policy_head_t *
813 813 ipsec_system_policy(netstack_t *ns)
814 814 {
815 815 ipsec_stack_t *ipss = ns->netstack_ipsec;
816 816 ipsec_policy_head_t *h = &ipss->ipsec_system_policy;
817 817
818 818 IPPH_REFHOLD(h);
819 819 return (h);
820 820 }
821 821
822 822 ipsec_policy_head_t *
823 823 ipsec_inactive_policy(netstack_t *ns)
824 824 {
825 825 ipsec_stack_t *ipss = ns->netstack_ipsec;
826 826 ipsec_policy_head_t *h = &ipss->ipsec_inactive_policy;
827 827
828 828 IPPH_REFHOLD(h);
829 829 return (h);
830 830 }
831 831
832 832 /*
833 833 * Lock inactive policy, then active policy, then exchange policy root
834 834 * pointers.
835 835 */
836 836 void
837 837 ipsec_swap_policy(ipsec_policy_head_t *active, ipsec_policy_head_t *inactive,
838 838 netstack_t *ns)
839 839 {
840 840 int af, dir;
841 841 avl_tree_t r1, r2;
842 842
843 843 rw_enter(&inactive->iph_lock, RW_WRITER);
844 844 rw_enter(&active->iph_lock, RW_WRITER);
845 845
846 846 r1 = active->iph_rulebyid;
847 847 r2 = inactive->iph_rulebyid;
848 848 active->iph_rulebyid = r2;
849 849 inactive->iph_rulebyid = r1;
850 850
851 851 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
852 852 ipsec_policy_hash_t *h1, *h2;
853 853
854 854 h1 = active->iph_root[dir].ipr_hash;
855 855 h2 = inactive->iph_root[dir].ipr_hash;
856 856 active->iph_root[dir].ipr_hash = h2;
857 857 inactive->iph_root[dir].ipr_hash = h1;
858 858
859 859 for (af = 0; af < IPSEC_NAF; af++) {
860 860 ipsec_policy_t *t1, *t2;
861 861
862 862 t1 = active->iph_root[dir].ipr_nonhash[af];
863 863 t2 = inactive->iph_root[dir].ipr_nonhash[af];
864 864 active->iph_root[dir].ipr_nonhash[af] = t2;
865 865 inactive->iph_root[dir].ipr_nonhash[af] = t1;
866 866 if (t1 != NULL) {
867 867 t1->ipsp_hash.hash_pp =
868 868 &(inactive->iph_root[dir].ipr_nonhash[af]);
869 869 }
870 870 if (t2 != NULL) {
871 871 t2->ipsp_hash.hash_pp =
872 872 &(active->iph_root[dir].ipr_nonhash[af]);
873 873 }
874 874
875 875 }
876 876 }
877 877 active->iph_gen++;
878 878 inactive->iph_gen++;
879 879 ipsec_update_present_flags(ns->netstack_ipsec);
880 880 rw_exit(&active->iph_lock);
881 881 rw_exit(&inactive->iph_lock);
882 882 }
883 883
884 884 /*
885 885 * Swap global policy primary/secondary.
886 886 */
887 887 void
888 888 ipsec_swap_global_policy(netstack_t *ns)
889 889 {
890 890 ipsec_stack_t *ipss = ns->netstack_ipsec;
891 891
892 892 ipsec_swap_policy(&ipss->ipsec_system_policy,
893 893 &ipss->ipsec_inactive_policy, ns);
894 894 }
895 895
896 896 /*
897 897 * Clone one policy rule..
898 898 */
899 899 static ipsec_policy_t *
900 900 ipsec_copy_policy(const ipsec_policy_t *src)
901 901 {
902 902 ipsec_policy_t *dst = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
903 903
904 904 if (dst == NULL)
905 905 return (NULL);
906 906
907 907 /*
908 908 * Adjust refcounts of cloned state.
909 909 */
910 910 IPACT_REFHOLD(src->ipsp_act);
911 911 src->ipsp_sel->ipsl_refs++;
912 912
913 913 HASH_NULL(dst, ipsp_hash);
914 914 dst->ipsp_netstack = src->ipsp_netstack;
915 915 dst->ipsp_refs = 1;
916 916 dst->ipsp_sel = src->ipsp_sel;
917 917 dst->ipsp_act = src->ipsp_act;
918 918 dst->ipsp_prio = src->ipsp_prio;
919 919 dst->ipsp_index = src->ipsp_index;
920 920
921 921 return (dst);
922 922 }
923 923
924 924 void
925 925 ipsec_insert_always(avl_tree_t *tree, void *new_node)
926 926 {
927 927 void *node;
928 928 avl_index_t where;
929 929
930 930 node = avl_find(tree, new_node, &where);
931 931 ASSERT(node == NULL);
932 932 avl_insert(tree, new_node, where);
933 933 }
934 934
935 935
936 936 static int
937 937 ipsec_copy_chain(ipsec_policy_head_t *dph, ipsec_policy_t *src,
938 938 ipsec_policy_t **dstp)
939 939 {
940 940 for (; src != NULL; src = src->ipsp_hash.hash_next) {
941 941 ipsec_policy_t *dst = ipsec_copy_policy(src);
942 942 if (dst == NULL)
943 943 return (ENOMEM);
944 944
945 945 HASHLIST_INSERT(dst, ipsp_hash, *dstp);
946 946 ipsec_insert_always(&dph->iph_rulebyid, dst);
947 947 }
948 948 return (0);
949 949 }
950 950
951 951
952 952
953 953 /*
954 954 * Make one policy head look exactly like another.
955 955 *
956 956 * As with ipsec_swap_policy, we lock the destination policy head first, then
957 957 * the source policy head. Note that we only need to read-lock the source
958 958 * policy head as we are not changing it.
959 959 */
960 960 int
961 961 ipsec_copy_polhead(ipsec_policy_head_t *sph, ipsec_policy_head_t *dph,
962 962 netstack_t *ns)
963 963 {
964 964 int af, dir, chain, nchains;
965 965
966 966 rw_enter(&dph->iph_lock, RW_WRITER);
967 967
968 968 ipsec_polhead_flush(dph, ns);
969 969
970 970 rw_enter(&sph->iph_lock, RW_READER);
971 971
972 972 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
973 973 ipsec_policy_root_t *dpr = &dph->iph_root[dir];
974 974 ipsec_policy_root_t *spr = &sph->iph_root[dir];
975 975 nchains = dpr->ipr_nchains;
976 976
977 977 ASSERT(dpr->ipr_nchains == spr->ipr_nchains);
978 978
979 979 for (af = 0; af < IPSEC_NAF; af++) {
980 980 if (ipsec_copy_chain(dph, spr->ipr_nonhash[af],
981 981 &dpr->ipr_nonhash[af]))
982 982 goto abort_copy;
983 983 }
984 984
985 985 for (chain = 0; chain < nchains; chain++) {
986 986 if (ipsec_copy_chain(dph,
987 987 spr->ipr_hash[chain].hash_head,
988 988 &dpr->ipr_hash[chain].hash_head))
989 989 goto abort_copy;
990 990 }
991 991 }
992 992
993 993 dph->iph_gen++;
994 994
995 995 rw_exit(&sph->iph_lock);
996 996 rw_exit(&dph->iph_lock);
997 997 return (0);
998 998
999 999 abort_copy:
1000 1000 ipsec_polhead_flush(dph, ns);
1001 1001 rw_exit(&sph->iph_lock);
1002 1002 rw_exit(&dph->iph_lock);
1003 1003 return (ENOMEM);
1004 1004 }
1005 1005
1006 1006 /*
1007 1007 * Clone currently active policy to the inactive policy list.
1008 1008 */
1009 1009 int
1010 1010 ipsec_clone_system_policy(netstack_t *ns)
1011 1011 {
1012 1012 ipsec_stack_t *ipss = ns->netstack_ipsec;
1013 1013
1014 1014 return (ipsec_copy_polhead(&ipss->ipsec_system_policy,
1015 1015 &ipss->ipsec_inactive_policy, ns));
1016 1016 }
1017 1017
1018 1018 /*
1019 1019 * Extract the string from ipsec_policy_failure_msgs[type] and
1020 1020 * log it.
1021 1021 *
1022 1022 */
1023 1023 void
1024 1024 ipsec_log_policy_failure(int type, char *func_name, ipha_t *ipha, ip6_t *ip6h,
1025 1025 boolean_t secure, netstack_t *ns)
1026 1026 {
1027 1027 char sbuf[INET6_ADDRSTRLEN];
1028 1028 char dbuf[INET6_ADDRSTRLEN];
1029 1029 char *s;
1030 1030 char *d;
1031 1031 ipsec_stack_t *ipss = ns->netstack_ipsec;
1032 1032
1033 1033 ASSERT((ipha == NULL && ip6h != NULL) ||
1034 1034 (ip6h == NULL && ipha != NULL));
1035 1035
1036 1036 if (ipha != NULL) {
1037 1037 s = inet_ntop(AF_INET, &ipha->ipha_src, sbuf, sizeof (sbuf));
1038 1038 d = inet_ntop(AF_INET, &ipha->ipha_dst, dbuf, sizeof (dbuf));
1039 1039 } else {
1040 1040 s = inet_ntop(AF_INET6, &ip6h->ip6_src, sbuf, sizeof (sbuf));
1041 1041 d = inet_ntop(AF_INET6, &ip6h->ip6_dst, dbuf, sizeof (dbuf));
1042 1042
1043 1043 }
1044 1044
1045 1045 /* Always bump the policy failure counter. */
1046 1046 ipss->ipsec_policy_failure_count[type]++;
1047 1047
1048 1048 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1049 1049 ipsec_policy_failure_msgs[type], func_name,
1050 1050 (secure ? "secure" : "not secure"), s, d);
1051 1051 }
1052 1052
1053 1053 /*
1054 1054 * Rate-limiting front-end to strlog() for AH and ESP. Uses the ndd variables
1055 1055 * in /dev/ip and the same rate-limiting clock so that there's a single
1056 1056 * knob to turn to throttle the rate of messages.
1057 1057 */
1058 1058 void
1059 1059 ipsec_rl_strlog(netstack_t *ns, short mid, short sid, char level, ushort_t sl,
1060 1060 char *fmt, ...)
1061 1061 {
1062 1062 va_list adx;
1063 1063 hrtime_t current = gethrtime();
1064 1064 ip_stack_t *ipst = ns->netstack_ip;
1065 1065 ipsec_stack_t *ipss = ns->netstack_ipsec;
1066 1066
1067 1067 sl |= SL_CONSOLE;
1068 1068 /*
1069 1069 * Throttle logging to stop syslog from being swamped. If variable
1070 1070 * 'ipsec_policy_log_interval' is zero, don't log any messages at
1071 1071 * all, otherwise log only one message every 'ipsec_policy_log_interval'
1072 1072 * msec. Convert interval (in msec) to hrtime (in nsec).
1073 1073 */
1074 1074
1075 1075 if (ipst->ips_ipsec_policy_log_interval) {
1076 1076 if (ipss->ipsec_policy_failure_last +
1077 1077 MSEC2NSEC(ipst->ips_ipsec_policy_log_interval) <= current) {
1078 1078 va_start(adx, fmt);
1079 1079 (void) vstrlog(mid, sid, level, sl, fmt, adx);
1080 1080 va_end(adx);
1081 1081 ipss->ipsec_policy_failure_last = current;
1082 1082 }
1083 1083 }
1084 1084 }
1085 1085
1086 1086 void
1087 1087 ipsec_config_flush(netstack_t *ns)
1088 1088 {
1089 1089 ipsec_stack_t *ipss = ns->netstack_ipsec;
1090 1090
1091 1091 rw_enter(&ipss->ipsec_system_policy.iph_lock, RW_WRITER);
1092 1092 ipsec_polhead_flush(&ipss->ipsec_system_policy, ns);
1093 1093 ipss->ipsec_next_policy_index = 1;
1094 1094 rw_exit(&ipss->ipsec_system_policy.iph_lock);
1095 1095 ipsec_action_reclaim_stack(ipss);
1096 1096 }
1097 1097
1098 1098 /*
1099 1099 * Clip a policy's min/max keybits vs. the capabilities of the
1100 1100 * algorithm.
1101 1101 */
1102 1102 static void
1103 1103 act_alg_adjust(uint_t algtype, uint_t algid,
1104 1104 uint16_t *minbits, uint16_t *maxbits, netstack_t *ns)
1105 1105 {
1106 1106 ipsec_stack_t *ipss = ns->netstack_ipsec;
1107 1107 ipsec_alginfo_t *algp = ipss->ipsec_alglists[algtype][algid];
1108 1108
1109 1109 if (algp != NULL) {
1110 1110 /*
1111 1111 * If passed-in minbits is zero, we assume the caller trusts
1112 1112 * us with setting the minimum key size. We pick the
1113 1113 * algorithms DEFAULT key size for the minimum in this case.
1114 1114 */
1115 1115 if (*minbits == 0) {
1116 1116 *minbits = algp->alg_default_bits;
1117 1117 ASSERT(*minbits >= algp->alg_minbits);
1118 1118 } else {
1119 1119 *minbits = MAX(MIN(*minbits, algp->alg_maxbits),
1120 1120 algp->alg_minbits);
1121 1121 }
1122 1122 if (*maxbits == 0)
1123 1123 *maxbits = algp->alg_maxbits;
1124 1124 else
1125 1125 *maxbits = MIN(MAX(*maxbits, algp->alg_minbits),
1126 1126 algp->alg_maxbits);
1127 1127 ASSERT(*minbits <= *maxbits);
1128 1128 } else {
1129 1129 *minbits = 0;
1130 1130 *maxbits = 0;
1131 1131 }
1132 1132 }
1133 1133
1134 1134 /*
1135 1135 * Check an action's requested algorithms against the algorithms currently
1136 1136 * loaded in the system.
1137 1137 */
1138 1138 boolean_t
1139 1139 ipsec_check_action(ipsec_act_t *act, int *diag, netstack_t *ns)
1140 1140 {
1141 1141 ipsec_prot_t *ipp;
1142 1142 ipsec_stack_t *ipss = ns->netstack_ipsec;
1143 1143
1144 1144 ipp = &act->ipa_apply;
1145 1145
1146 1146 if (ipp->ipp_use_ah &&
1147 1147 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_auth_alg] == NULL) {
1148 1148 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
1149 1149 return (B_FALSE);
1150 1150 }
1151 1151 if (ipp->ipp_use_espa &&
1152 1152 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_esp_auth_alg] ==
1153 1153 NULL) {
1154 1154 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
1155 1155 return (B_FALSE);
1156 1156 }
1157 1157 if (ipp->ipp_use_esp &&
1158 1158 ipss->ipsec_alglists[IPSEC_ALG_ENCR][ipp->ipp_encr_alg] == NULL) {
1159 1159 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
1160 1160 return (B_FALSE);
1161 1161 }
1162 1162
1163 1163 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_auth_alg,
1164 1164 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1165 1165 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_esp_auth_alg,
1166 1166 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1167 1167 act_alg_adjust(IPSEC_ALG_ENCR, ipp->ipp_encr_alg,
1168 1168 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1169 1169
1170 1170 if (ipp->ipp_ah_minbits > ipp->ipp_ah_maxbits) {
1171 1171 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_KEYSIZE;
1172 1172 return (B_FALSE);
1173 1173 }
1174 1174 if (ipp->ipp_espa_minbits > ipp->ipp_espa_maxbits) {
1175 1175 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_KEYSIZE;
1176 1176 return (B_FALSE);
1177 1177 }
1178 1178 if (ipp->ipp_espe_minbits > ipp->ipp_espe_maxbits) {
1179 1179 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_KEYSIZE;
1180 1180 return (B_FALSE);
1181 1181 }
1182 1182 /* TODO: sanity check lifetimes */
1183 1183 return (B_TRUE);
1184 1184 }
1185 1185
1186 1186 /*
1187 1187 * Set up a single action during wildcard expansion..
1188 1188 */
1189 1189 static void
1190 1190 ipsec_setup_act(ipsec_act_t *outact, ipsec_act_t *act,
1191 1191 uint_t auth_alg, uint_t encr_alg, uint_t eauth_alg, netstack_t *ns)
1192 1192 {
1193 1193 ipsec_prot_t *ipp;
1194 1194
1195 1195 *outact = *act;
1196 1196 ipp = &outact->ipa_apply;
1197 1197 ipp->ipp_auth_alg = (uint8_t)auth_alg;
1198 1198 ipp->ipp_encr_alg = (uint8_t)encr_alg;
1199 1199 ipp->ipp_esp_auth_alg = (uint8_t)eauth_alg;
1200 1200
1201 1201 act_alg_adjust(IPSEC_ALG_AUTH, auth_alg,
1202 1202 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1203 1203 act_alg_adjust(IPSEC_ALG_AUTH, eauth_alg,
1204 1204 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1205 1205 act_alg_adjust(IPSEC_ALG_ENCR, encr_alg,
1206 1206 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1207 1207 }
1208 1208
1209 1209 /*
1210 1210 * combinatoric expansion time: expand a wildcarded action into an
1211 1211 * array of wildcarded actions; we return the exploded action list,
1212 1212 * and return a count in *nact (output only).
1213 1213 */
1214 1214 static ipsec_act_t *
1215 1215 ipsec_act_wildcard_expand(ipsec_act_t *act, uint_t *nact, netstack_t *ns)
1216 1216 {
1217 1217 boolean_t use_ah, use_esp, use_espa;
1218 1218 boolean_t wild_auth, wild_encr, wild_eauth;
1219 1219 uint_t auth_alg, auth_idx, auth_min, auth_max;
1220 1220 uint_t eauth_alg, eauth_idx, eauth_min, eauth_max;
1221 1221 uint_t encr_alg, encr_idx, encr_min, encr_max;
1222 1222 uint_t action_count, ai;
1223 1223 ipsec_act_t *outact;
1224 1224 ipsec_stack_t *ipss = ns->netstack_ipsec;
1225 1225
1226 1226 if (act->ipa_type != IPSEC_ACT_APPLY) {
1227 1227 outact = kmem_alloc(sizeof (*act), KM_NOSLEEP);
1228 1228 *nact = 1;
1229 1229 if (outact != NULL)
1230 1230 bcopy(act, outact, sizeof (*act));
1231 1231 return (outact);
1232 1232 }
1233 1233 /*
1234 1234 * compute the combinatoric explosion..
1235 1235 *
1236 1236 * we assume a request for encr if esp_req is PREF_REQUIRED
1237 1237 * we assume a request for ah auth if ah_req is PREF_REQUIRED.
1238 1238 * we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED
1239 1239 */
1240 1240
1241 1241 use_ah = act->ipa_apply.ipp_use_ah;
1242 1242 use_esp = act->ipa_apply.ipp_use_esp;
1243 1243 use_espa = act->ipa_apply.ipp_use_espa;
1244 1244 auth_alg = act->ipa_apply.ipp_auth_alg;
1245 1245 eauth_alg = act->ipa_apply.ipp_esp_auth_alg;
1246 1246 encr_alg = act->ipa_apply.ipp_encr_alg;
1247 1247
1248 1248 wild_auth = use_ah && (auth_alg == 0);
1249 1249 wild_eauth = use_espa && (eauth_alg == 0);
1250 1250 wild_encr = use_esp && (encr_alg == 0);
1251 1251
1252 1252 action_count = 1;
1253 1253 auth_min = auth_max = auth_alg;
1254 1254 eauth_min = eauth_max = eauth_alg;
1255 1255 encr_min = encr_max = encr_alg;
1256 1256
1257 1257 /*
1258 1258 * set up for explosion.. for each dimension, expand output
1259 1259 * size by the explosion factor.
1260 1260 *
1261 1261 * Don't include the "any" algorithms, if defined, as no
1262 1262 * kernel policies should be set for these algorithms.
1263 1263 */
1264 1264
1265 1265 #define SET_EXP_MINMAX(type, wild, alg, min, max, ipss) \
1266 1266 if (wild) { \
1267 1267 int nalgs = ipss->ipsec_nalgs[type]; \
1268 1268 if (ipss->ipsec_alglists[type][alg] != NULL) \
1269 1269 nalgs--; \
1270 1270 action_count *= nalgs; \
1271 1271 min = 0; \
1272 1272 max = ipss->ipsec_nalgs[type] - 1; \
1273 1273 }
1274 1274
1275 1275 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_auth, SADB_AALG_NONE,
1276 1276 auth_min, auth_max, ipss);
1277 1277 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_eauth, SADB_AALG_NONE,
1278 1278 eauth_min, eauth_max, ipss);
1279 1279 SET_EXP_MINMAX(IPSEC_ALG_ENCR, wild_encr, SADB_EALG_NONE,
1280 1280 encr_min, encr_max, ipss);
1281 1281
1282 1282 #undef SET_EXP_MINMAX
1283 1283
1284 1284 /*
1285 1285 * ok, allocate the whole mess..
1286 1286 */
1287 1287
1288 1288 outact = kmem_alloc(sizeof (*outact) * action_count, KM_NOSLEEP);
1289 1289 if (outact == NULL)
1290 1290 return (NULL);
1291 1291
1292 1292 /*
1293 1293 * Now compute all combinations. Note that non-wildcarded
1294 1294 * dimensions just get a single value from auth_min, while
1295 1295 * wildcarded dimensions indirect through the sortlist.
1296 1296 *
1297 1297 * We do encryption outermost since, at this time, there's
1298 1298 * greater difference in security and performance between
1299 1299 * encryption algorithms vs. authentication algorithms.
1300 1300 */
1301 1301
1302 1302 ai = 0;
1303 1303
1304 1304 #define WHICH_ALG(type, wild, idx, ipss) \
1305 1305 ((wild)?(ipss->ipsec_sortlist[type][idx]):(idx))
1306 1306
1307 1307 for (encr_idx = encr_min; encr_idx <= encr_max; encr_idx++) {
1308 1308 encr_alg = WHICH_ALG(IPSEC_ALG_ENCR, wild_encr, encr_idx, ipss);
1309 1309 if (wild_encr && encr_alg == SADB_EALG_NONE)
1310 1310 continue;
1311 1311 for (auth_idx = auth_min; auth_idx <= auth_max; auth_idx++) {
1312 1312 auth_alg = WHICH_ALG(IPSEC_ALG_AUTH, wild_auth,
1313 1313 auth_idx, ipss);
1314 1314 if (wild_auth && auth_alg == SADB_AALG_NONE)
1315 1315 continue;
1316 1316 for (eauth_idx = eauth_min; eauth_idx <= eauth_max;
1317 1317 eauth_idx++) {
1318 1318 eauth_alg = WHICH_ALG(IPSEC_ALG_AUTH,
1319 1319 wild_eauth, eauth_idx, ipss);
1320 1320 if (wild_eauth && eauth_alg == SADB_AALG_NONE)
1321 1321 continue;
1322 1322
1323 1323 ipsec_setup_act(&outact[ai], act,
1324 1324 auth_alg, encr_alg, eauth_alg, ns);
1325 1325 ai++;
1326 1326 }
1327 1327 }
1328 1328 }
1329 1329
1330 1330 #undef WHICH_ALG
1331 1331
1332 1332 ASSERT(ai == action_count);
1333 1333 *nact = action_count;
1334 1334 return (outact);
1335 1335 }
1336 1336
1337 1337 /*
1338 1338 * Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t.
1339 1339 */
1340 1340 static void
1341 1341 ipsec_prot_from_req(const ipsec_req_t *req, ipsec_prot_t *ipp)
1342 1342 {
1343 1343 bzero(ipp, sizeof (*ipp));
1344 1344 /*
1345 1345 * ipp_use_* are bitfields. Look at "!!" in the following as a
1346 1346 * "boolean canonicalization" operator.
1347 1347 */
1348 1348 ipp->ipp_use_ah = !!(req->ipsr_ah_req & IPSEC_PREF_REQUIRED);
1349 1349 ipp->ipp_use_esp = !!(req->ipsr_esp_req & IPSEC_PREF_REQUIRED);
1350 1350 ipp->ipp_use_espa = !!(req->ipsr_esp_auth_alg);
1351 1351 ipp->ipp_use_se = !!(req->ipsr_self_encap_req & IPSEC_PREF_REQUIRED);
1352 1352 ipp->ipp_use_unique = !!((req->ipsr_ah_req|req->ipsr_esp_req) &
1353 1353 IPSEC_PREF_UNIQUE);
1354 1354 ipp->ipp_encr_alg = req->ipsr_esp_alg;
1355 1355 /*
1356 1356 * SADB_AALG_ANY is a placeholder to distinguish "any" from
1357 1357 * "none" above. If auth is required, as determined above,
1358 1358 * SADB_AALG_ANY becomes 0, which is the representation
1359 1359 * of "any" and "none" in PF_KEY v2.
1360 1360 */
1361 1361 ipp->ipp_auth_alg = (req->ipsr_auth_alg != SADB_AALG_ANY) ?
1362 1362 req->ipsr_auth_alg : 0;
1363 1363 ipp->ipp_esp_auth_alg = (req->ipsr_esp_auth_alg != SADB_AALG_ANY) ?
1364 1364 req->ipsr_esp_auth_alg : 0;
1365 1365 }
1366 1366
1367 1367 /*
1368 1368 * Extract a new-style action from a request.
1369 1369 */
1370 1370 void
1371 1371 ipsec_actvec_from_req(const ipsec_req_t *req, ipsec_act_t **actp, uint_t *nactp,
1372 1372 netstack_t *ns)
1373 1373 {
1374 1374 struct ipsec_act act;
1375 1375
1376 1376 bzero(&act, sizeof (act));
1377 1377 if ((req->ipsr_ah_req & IPSEC_PREF_NEVER) &&
1378 1378 (req->ipsr_esp_req & IPSEC_PREF_NEVER)) {
1379 1379 act.ipa_type = IPSEC_ACT_BYPASS;
1380 1380 } else {
1381 1381 act.ipa_type = IPSEC_ACT_APPLY;
1382 1382 ipsec_prot_from_req(req, &act.ipa_apply);
1383 1383 }
1384 1384 *actp = ipsec_act_wildcard_expand(&act, nactp, ns);
1385 1385 }
1386 1386
1387 1387 /*
1388 1388 * Convert a new-style "prot" back to an ipsec_req_t (more backwards compat).
1389 1389 * We assume caller has already zero'ed *req for us.
1390 1390 */
1391 1391 static int
1392 1392 ipsec_req_from_prot(ipsec_prot_t *ipp, ipsec_req_t *req)
1393 1393 {
1394 1394 req->ipsr_esp_alg = ipp->ipp_encr_alg;
1395 1395 req->ipsr_auth_alg = ipp->ipp_auth_alg;
1396 1396 req->ipsr_esp_auth_alg = ipp->ipp_esp_auth_alg;
1397 1397
1398 1398 if (ipp->ipp_use_unique) {
1399 1399 req->ipsr_ah_req |= IPSEC_PREF_UNIQUE;
1400 1400 req->ipsr_esp_req |= IPSEC_PREF_UNIQUE;
1401 1401 }
1402 1402 if (ipp->ipp_use_se)
1403 1403 req->ipsr_self_encap_req |= IPSEC_PREF_REQUIRED;
1404 1404 if (ipp->ipp_use_ah)
1405 1405 req->ipsr_ah_req |= IPSEC_PREF_REQUIRED;
1406 1406 if (ipp->ipp_use_esp)
1407 1407 req->ipsr_esp_req |= IPSEC_PREF_REQUIRED;
1408 1408 return (sizeof (*req));
1409 1409 }
1410 1410
1411 1411 /*
1412 1412 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1413 1413 * We assume caller has already zero'ed *req for us.
1414 1414 */
1415 1415 static int
1416 1416 ipsec_req_from_act(ipsec_action_t *ap, ipsec_req_t *req)
1417 1417 {
1418 1418 switch (ap->ipa_act.ipa_type) {
1419 1419 case IPSEC_ACT_BYPASS:
1420 1420 req->ipsr_ah_req = IPSEC_PREF_NEVER;
1421 1421 req->ipsr_esp_req = IPSEC_PREF_NEVER;
1422 1422 return (sizeof (*req));
1423 1423 case IPSEC_ACT_APPLY:
1424 1424 return (ipsec_req_from_prot(&ap->ipa_act.ipa_apply, req));
1425 1425 }
1426 1426 return (sizeof (*req));
1427 1427 }
1428 1428
1429 1429 /*
1430 1430 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1431 1431 * We assume caller has already zero'ed *req for us.
1432 1432 */
1433 1433 int
1434 1434 ipsec_req_from_head(ipsec_policy_head_t *ph, ipsec_req_t *req, int af)
1435 1435 {
1436 1436 ipsec_policy_t *p;
1437 1437
1438 1438 /*
1439 1439 * FULL-PERSOCK: consult hash table, too?
1440 1440 */
1441 1441 for (p = ph->iph_root[IPSEC_INBOUND].ipr_nonhash[af];
1442 1442 p != NULL;
1443 1443 p = p->ipsp_hash.hash_next) {
1444 1444 if ((p->ipsp_sel->ipsl_key.ipsl_valid & IPSL_WILDCARD) == 0)
1445 1445 return (ipsec_req_from_act(p->ipsp_act, req));
1446 1446 }
1447 1447 return (sizeof (*req));
1448 1448 }
1449 1449
1450 1450 /*
1451 1451 * Based on per-socket or latched policy, convert to an appropriate
1452 1452 * IP_SEC_OPT ipsec_req_t for the socket option; return size so we can
1453 1453 * be tail-called from ip.
1454 1454 */
1455 1455 int
1456 1456 ipsec_req_from_conn(conn_t *connp, ipsec_req_t *req, int af)
1457 1457 {
1458 1458 ipsec_latch_t *ipl;
1459 1459 int rv = sizeof (ipsec_req_t);
1460 1460
1461 1461 bzero(req, sizeof (*req));
1462 1462
1463 1463 ASSERT(MUTEX_HELD(&connp->conn_lock));
1464 1464 ipl = connp->conn_latch;
1465 1465
1466 1466 /*
1467 1467 * Find appropriate policy. First choice is latched action;
1468 1468 * failing that, see latched policy; failing that,
1469 1469 * look at configured policy.
1470 1470 */
1471 1471 if (ipl != NULL) {
1472 1472 if (connp->conn_latch_in_action != NULL) {
1473 1473 rv = ipsec_req_from_act(connp->conn_latch_in_action,
1474 1474 req);
1475 1475 goto done;
1476 1476 }
1477 1477 if (connp->conn_latch_in_policy != NULL) {
1478 1478 rv = ipsec_req_from_act(
1479 1479 connp->conn_latch_in_policy->ipsp_act, req);
1480 1480 goto done;
1481 1481 }
1482 1482 }
1483 1483 if (connp->conn_policy != NULL)
1484 1484 rv = ipsec_req_from_head(connp->conn_policy, req, af);
1485 1485 done:
1486 1486 return (rv);
1487 1487 }
1488 1488
1489 1489 void
1490 1490 ipsec_actvec_free(ipsec_act_t *act, uint_t nact)
1491 1491 {
1492 1492 kmem_free(act, nact * sizeof (*act));
1493 1493 }
1494 1494
1495 1495 /*
1496 1496 * Consumes a reference to ipsp.
1497 1497 */
1498 1498 static mblk_t *
1499 1499 ipsec_check_loopback_policy(mblk_t *data_mp, ip_recv_attr_t *ira,
1500 1500 ipsec_policy_t *ipsp)
1501 1501 {
1502 1502 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
1503 1503 return (data_mp);
1504 1504
1505 1505 ASSERT(ira->ira_flags & IRAF_LOOPBACK);
1506 1506
1507 1507 IPPOL_REFRELE(ipsp);
1508 1508
1509 1509 /*
1510 1510 * We should do an actual policy check here. Revisit this
1511 1511 * when we revisit the IPsec API. (And pass a conn_t in when we
1512 1512 * get there.)
1513 1513 */
1514 1514
1515 1515 return (data_mp);
1516 1516 }
1517 1517
1518 1518 /*
1519 1519 * Check that packet's inbound ports & proto match the selectors
1520 1520 * expected by the SAs it traversed on the way in.
1521 1521 */
1522 1522 static boolean_t
1523 1523 ipsec_check_ipsecin_unique(ip_recv_attr_t *ira, const char **reason,
1524 1524 kstat_named_t **counter, uint64_t pkt_unique, netstack_t *ns)
1525 1525 {
1526 1526 uint64_t ah_mask, esp_mask;
1527 1527 ipsa_t *ah_assoc;
1528 1528 ipsa_t *esp_assoc;
1529 1529 ipsec_stack_t *ipss = ns->netstack_ipsec;
1530 1530
1531 1531 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1532 1532 ASSERT(!(ira->ira_flags & IRAF_LOOPBACK));
1533 1533
1534 1534 ah_assoc = ira->ira_ipsec_ah_sa;
1535 1535 esp_assoc = ira->ira_ipsec_esp_sa;
1536 1536 ASSERT((ah_assoc != NULL) || (esp_assoc != NULL));
1537 1537
1538 1538 ah_mask = (ah_assoc != NULL) ? ah_assoc->ipsa_unique_mask : 0;
1539 1539 esp_mask = (esp_assoc != NULL) ? esp_assoc->ipsa_unique_mask : 0;
1540 1540
1541 1541 if ((ah_mask == 0) && (esp_mask == 0))
1542 1542 return (B_TRUE);
1543 1543
1544 1544 /*
1545 1545 * The pkt_unique check will also check for tunnel mode on the SA
1546 1546 * vs. the tunneled_packet boolean. "Be liberal in what you receive"
1547 1547 * should not apply in this case. ;)
1548 1548 */
1549 1549
1550 1550 if (ah_mask != 0 &&
1551 1551 ah_assoc->ipsa_unique_id != (pkt_unique & ah_mask)) {
1552 1552 *reason = "AH inner header mismatch";
1553 1553 *counter = DROPPER(ipss, ipds_spd_ah_innermismatch);
1554 1554 return (B_FALSE);
1555 1555 }
1556 1556 if (esp_mask != 0 &&
1557 1557 esp_assoc->ipsa_unique_id != (pkt_unique & esp_mask)) {
1558 1558 *reason = "ESP inner header mismatch";
1559 1559 *counter = DROPPER(ipss, ipds_spd_esp_innermismatch);
1560 1560 return (B_FALSE);
1561 1561 }
1562 1562 return (B_TRUE);
1563 1563 }
1564 1564
1565 1565 static boolean_t
1566 1566 ipsec_check_ipsecin_action(ip_recv_attr_t *ira, mblk_t *mp, ipsec_action_t *ap,
1567 1567 ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter,
1568 1568 netstack_t *ns)
1569 1569 {
1570 1570 boolean_t ret = B_TRUE;
1571 1571 ipsec_prot_t *ipp;
1572 1572 ipsa_t *ah_assoc;
1573 1573 ipsa_t *esp_assoc;
1574 1574 boolean_t decaps;
1575 1575 ipsec_stack_t *ipss = ns->netstack_ipsec;
1576 1576
1577 1577 ASSERT((ipha == NULL && ip6h != NULL) ||
1578 1578 (ip6h == NULL && ipha != NULL));
1579 1579
1580 1580 if (ira->ira_flags & IRAF_LOOPBACK) {
1581 1581 /*
1582 1582 * Besides accepting pointer-equivalent actions, we also
1583 1583 * accept any ICMP errors we generated for ourselves,
1584 1584 * regardless of policy. If we do not wish to make this
1585 1585 * assumption in the future, check here, and where
1586 1586 * IXAF_TRUSTED_ICMP is initialized in ip.c and ip6.c.
1587 1587 */
1588 1588 if (ap == ira->ira_ipsec_action ||
1589 1589 (ira->ira_flags & IRAF_TRUSTED_ICMP))
1590 1590 return (B_TRUE);
1591 1591
1592 1592 /* Deep compare necessary here?? */
1593 1593 *counter = DROPPER(ipss, ipds_spd_loopback_mismatch);
1594 1594 *reason = "loopback policy mismatch";
1595 1595 return (B_FALSE);
1596 1596 }
1597 1597 ASSERT(!(ira->ira_flags & IRAF_TRUSTED_ICMP));
1598 1598 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1599 1599
1600 1600 ah_assoc = ira->ira_ipsec_ah_sa;
1601 1601 esp_assoc = ira->ira_ipsec_esp_sa;
1602 1602
1603 1603 decaps = (ira->ira_flags & IRAF_IPSEC_DECAPS);
1604 1604
1605 1605 switch (ap->ipa_act.ipa_type) {
1606 1606 case IPSEC_ACT_DISCARD:
1607 1607 case IPSEC_ACT_REJECT:
1608 1608 /* Should "fail hard" */
1609 1609 *counter = DROPPER(ipss, ipds_spd_explicit);
1610 1610 *reason = "blocked by policy";
1611 1611 return (B_FALSE);
1612 1612
1613 1613 case IPSEC_ACT_BYPASS:
1614 1614 case IPSEC_ACT_CLEAR:
1615 1615 *counter = DROPPER(ipss, ipds_spd_got_secure);
1616 1616 *reason = "expected clear, got protected";
1617 1617 return (B_FALSE);
1618 1618
1619 1619 case IPSEC_ACT_APPLY:
1620 1620 ipp = &ap->ipa_act.ipa_apply;
1621 1621 /*
1622 1622 * As of now we do the simple checks of whether
1623 1623 * the datagram has gone through the required IPSEC
1624 1624 * protocol constraints or not. We might have more
1625 1625 * in the future like sensitive levels, key bits, etc.
1626 1626 * If it fails the constraints, check whether we would
1627 1627 * have accepted this if it had come in clear.
1628 1628 */
1629 1629 if (ipp->ipp_use_ah) {
1630 1630 if (ah_assoc == NULL) {
1631 1631 ret = ipsec_inbound_accept_clear(mp, ipha,
1632 1632 ip6h);
1633 1633 *counter = DROPPER(ipss, ipds_spd_got_clear);
1634 1634 *reason = "unprotected not accepted";
1635 1635 break;
1636 1636 }
1637 1637 ASSERT(ah_assoc != NULL);
1638 1638 ASSERT(ipp->ipp_auth_alg != 0);
1639 1639
1640 1640 if (ah_assoc->ipsa_auth_alg !=
1641 1641 ipp->ipp_auth_alg) {
1642 1642 *counter = DROPPER(ipss, ipds_spd_bad_ahalg);
1643 1643 *reason = "unacceptable ah alg";
1644 1644 ret = B_FALSE;
1645 1645 break;
1646 1646 }
1647 1647 } else if (ah_assoc != NULL) {
1648 1648 /*
1649 1649 * Don't allow this. Check IPSEC NOTE above
1650 1650 * ip_fanout_proto().
1651 1651 */
1652 1652 *counter = DROPPER(ipss, ipds_spd_got_ah);
1653 1653 *reason = "unexpected AH";
1654 1654 ret = B_FALSE;
1655 1655 break;
1656 1656 }
1657 1657 if (ipp->ipp_use_esp) {
1658 1658 if (esp_assoc == NULL) {
1659 1659 ret = ipsec_inbound_accept_clear(mp, ipha,
1660 1660 ip6h);
1661 1661 *counter = DROPPER(ipss, ipds_spd_got_clear);
1662 1662 *reason = "unprotected not accepted";
1663 1663 break;
1664 1664 }
1665 1665 ASSERT(esp_assoc != NULL);
1666 1666 ASSERT(ipp->ipp_encr_alg != 0);
1667 1667
1668 1668 if (esp_assoc->ipsa_encr_alg !=
1669 1669 ipp->ipp_encr_alg) {
1670 1670 *counter = DROPPER(ipss, ipds_spd_bad_espealg);
1671 1671 *reason = "unacceptable esp alg";
1672 1672 ret = B_FALSE;
1673 1673 break;
1674 1674 }
1675 1675 /*
1676 1676 * If the client does not need authentication,
1677 1677 * we don't verify the alogrithm.
1678 1678 */
1679 1679 if (ipp->ipp_use_espa) {
1680 1680 if (esp_assoc->ipsa_auth_alg !=
1681 1681 ipp->ipp_esp_auth_alg) {
1682 1682 *counter = DROPPER(ipss,
1683 1683 ipds_spd_bad_espaalg);
1684 1684 *reason = "unacceptable esp auth alg";
1685 1685 ret = B_FALSE;
1686 1686 break;
1687 1687 }
1688 1688 }
1689 1689 } else if (esp_assoc != NULL) {
1690 1690 /*
1691 1691 * Don't allow this. Check IPSEC NOTE above
1692 1692 * ip_fanout_proto().
1693 1693 */
1694 1694 *counter = DROPPER(ipss, ipds_spd_got_esp);
1695 1695 *reason = "unexpected ESP";
1696 1696 ret = B_FALSE;
1697 1697 break;
1698 1698 }
1699 1699 if (ipp->ipp_use_se) {
1700 1700 if (!decaps) {
1701 1701 ret = ipsec_inbound_accept_clear(mp, ipha,
1702 1702 ip6h);
1703 1703 if (!ret) {
1704 1704 /* XXX mutant? */
1705 1705 *counter = DROPPER(ipss,
1706 1706 ipds_spd_bad_selfencap);
1707 1707 *reason = "self encap not found";
1708 1708 break;
1709 1709 }
1710 1710 }
1711 1711 } else if (decaps) {
1712 1712 /*
1713 1713 * XXX If the packet comes in tunneled and the
1714 1714 * recipient does not expect it to be tunneled, it
1715 1715 * is okay. But we drop to be consistent with the
1716 1716 * other cases.
1717 1717 */
1718 1718 *counter = DROPPER(ipss, ipds_spd_got_selfencap);
1719 1719 *reason = "unexpected self encap";
1720 1720 ret = B_FALSE;
1721 1721 break;
1722 1722 }
1723 1723 if (ira->ira_ipsec_action != NULL) {
1724 1724 /*
1725 1725 * This can happen if we do a double policy-check on
1726 1726 * a packet
1727 1727 * XXX XXX should fix this case!
1728 1728 */
1729 1729 IPACT_REFRELE(ira->ira_ipsec_action);
1730 1730 }
1731 1731 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1732 1732 ASSERT(ira->ira_ipsec_action == NULL);
1733 1733 IPACT_REFHOLD(ap);
1734 1734 ira->ira_ipsec_action = ap;
1735 1735 break; /* from switch */
1736 1736 }
1737 1737 return (ret);
1738 1738 }
1739 1739
1740 1740 static boolean_t
1741 1741 spd_match_inbound_ids(ipsec_latch_t *ipl, ipsa_t *sa)
1742 1742 {
1743 1743 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1744 1744 return ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_src_cid) &&
1745 1745 ipsid_equal(ipl->ipl_local_cid, sa->ipsa_dst_cid);
1746 1746 }
1747 1747
1748 1748 /*
1749 1749 * Takes a latched conn and an inbound packet and returns a unique_id suitable
1750 1750 * for SA comparisons. Most of the time we will copy from the conn_t, but
1751 1751 * there are cases when the conn_t is latched but it has wildcard selectors,
1752 1752 * and then we need to fallback to scooping them out of the packet.
1753 1753 *
1754 1754 * Assume we'll never have 0 with a conn_t present, so use 0 as a failure. We
1755 1755 * can get away with this because we only have non-zero ports/proto for
1756 1756 * latched conn_ts.
1757 1757 *
1758 1758 * Ideal candidate for an "inline" keyword, as we're JUST convoluted enough
1759 1759 * to not be a nice macro.
1760 1760 */
1761 1761 static uint64_t
1762 1762 conn_to_unique(conn_t *connp, mblk_t *data_mp, ipha_t *ipha, ip6_t *ip6h)
1763 1763 {
1764 1764 ipsec_selector_t sel;
1765 1765 uint8_t ulp = connp->conn_proto;
1766 1766
1767 1767 ASSERT(connp->conn_latch_in_policy != NULL);
1768 1768
1769 1769 if ((ulp == IPPROTO_TCP || ulp == IPPROTO_UDP || ulp == IPPROTO_SCTP) &&
1770 1770 (connp->conn_fport == 0 || connp->conn_lport == 0)) {
1771 1771 /* Slow path - we gotta grab from the packet. */
1772 1772 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
1773 1773 SEL_NONE) != SELRET_SUCCESS) {
1774 1774 /* Failure -> have caller free packet with ENOMEM. */
1775 1775 return (0);
1776 1776 }
1777 1777 return (SA_UNIQUE_ID(sel.ips_remote_port, sel.ips_local_port,
1778 1778 sel.ips_protocol, 0));
1779 1779 }
1780 1780
1781 1781 #ifdef DEBUG_NOT_UNTIL_6478464
1782 1782 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h, SEL_NONE) ==
1783 1783 SELRET_SUCCESS) {
1784 1784 ASSERT(sel.ips_local_port == connp->conn_lport);
1785 1785 ASSERT(sel.ips_remote_port == connp->conn_fport);
1786 1786 ASSERT(sel.ips_protocol == connp->conn_proto);
1787 1787 }
1788 1788 ASSERT(connp->conn_proto != 0);
1789 1789 #endif
1790 1790
1791 1791 return (SA_UNIQUE_ID(connp->conn_fport, connp->conn_lport, ulp, 0));
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * Called to check policy on a latched connection.
1796 1796 * Note that we don't dereference conn_latch or conn_ihere since the conn might
1797 1797 * be closing. The caller passes a held ipsec_latch_t instead.
1798 1798 */
1799 1799 static boolean_t
1800 1800 ipsec_check_ipsecin_latch(ip_recv_attr_t *ira, mblk_t *mp, ipsec_latch_t *ipl,
1801 1801 ipsec_action_t *ap, ipha_t *ipha, ip6_t *ip6h, const char **reason,
1802 1802 kstat_named_t **counter, conn_t *connp, netstack_t *ns)
1803 1803 {
1804 1804 ipsec_stack_t *ipss = ns->netstack_ipsec;
1805 1805
1806 1806 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1807 1807 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1808 1808
1809 1809 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
1810 1810 /*
1811 1811 * Over loopback, there aren't real security associations,
1812 1812 * so there are neither identities nor "unique" values
1813 1813 * for us to check the packet against.
1814 1814 */
1815 1815 if (ira->ira_ipsec_ah_sa != NULL) {
1816 1816 if (!spd_match_inbound_ids(ipl,
1817 1817 ira->ira_ipsec_ah_sa)) {
1818 1818 *counter = DROPPER(ipss, ipds_spd_ah_badid);
1819 1819 *reason = "AH identity mismatch";
1820 1820 return (B_FALSE);
1821 1821 }
1822 1822 }
1823 1823
1824 1824 if (ira->ira_ipsec_esp_sa != NULL) {
1825 1825 if (!spd_match_inbound_ids(ipl,
1826 1826 ira->ira_ipsec_esp_sa)) {
1827 1827 *counter = DROPPER(ipss, ipds_spd_esp_badid);
1828 1828 *reason = "ESP identity mismatch";
1829 1829 return (B_FALSE);
1830 1830 }
1831 1831 }
1832 1832
1833 1833 /*
1834 1834 * Can fudge pkt_unique from connp because we're latched.
1835 1835 * In DEBUG kernels (see conn_to_unique()'s implementation),
1836 1836 * verify this even if it REALLY slows things down.
1837 1837 */
1838 1838 if (!ipsec_check_ipsecin_unique(ira, reason, counter,
1839 1839 conn_to_unique(connp, mp, ipha, ip6h), ns)) {
1840 1840 return (B_FALSE);
1841 1841 }
1842 1842 }
1843 1843 return (ipsec_check_ipsecin_action(ira, mp, ap, ipha, ip6h, reason,
1844 1844 counter, ns));
1845 1845 }
1846 1846
1847 1847 /*
1848 1848 * Check to see whether this secured datagram meets the policy
1849 1849 * constraints specified in ipsp.
1850 1850 *
1851 1851 * Called from ipsec_check_global_policy, and ipsec_check_inbound_policy.
1852 1852 *
1853 1853 * Consumes a reference to ipsp.
1854 1854 * Returns the mblk if ok.
1855 1855 */
1856 1856 static mblk_t *
1857 1857 ipsec_check_ipsecin_policy(mblk_t *data_mp, ipsec_policy_t *ipsp,
1858 1858 ipha_t *ipha, ip6_t *ip6h, uint64_t pkt_unique, ip_recv_attr_t *ira,
1859 1859 netstack_t *ns)
1860 1860 {
1861 1861 ipsec_action_t *ap;
1862 1862 const char *reason = "no policy actions found";
1863 1863 ip_stack_t *ipst = ns->netstack_ip;
1864 1864 ipsec_stack_t *ipss = ns->netstack_ipsec;
1865 1865 kstat_named_t *counter;
1866 1866
1867 1867 counter = DROPPER(ipss, ipds_spd_got_secure);
1868 1868
1869 1869 ASSERT(ipsp != NULL);
1870 1870
1871 1871 ASSERT((ipha == NULL && ip6h != NULL) ||
1872 1872 (ip6h == NULL && ipha != NULL));
1873 1873
1874 1874 if (ira->ira_flags & IRAF_LOOPBACK)
1875 1875 return (ipsec_check_loopback_policy(data_mp, ira, ipsp));
1876 1876
1877 1877 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1878 1878
1879 1879 if (ira->ira_ipsec_action != NULL) {
1880 1880 /*
1881 1881 * this can happen if we do a double policy-check on a packet
1882 1882 * Would be nice to be able to delete this test..
1883 1883 */
1884 1884 IPACT_REFRELE(ira->ira_ipsec_action);
1885 1885 }
1886 1886 ASSERT(ira->ira_ipsec_action == NULL);
1887 1887
1888 1888 if (!SA_IDS_MATCH(ira->ira_ipsec_ah_sa, ira->ira_ipsec_esp_sa)) {
1889 1889 reason = "inbound AH and ESP identities differ";
1890 1890 counter = DROPPER(ipss, ipds_spd_ahesp_diffid);
1891 1891 goto drop;
1892 1892 }
1893 1893
1894 1894 if (!ipsec_check_ipsecin_unique(ira, &reason, &counter, pkt_unique,
1895 1895 ns))
1896 1896 goto drop;
1897 1897
1898 1898 /*
1899 1899 * Ok, now loop through the possible actions and see if any
1900 1900 * of them work for us.
1901 1901 */
1902 1902
1903 1903 for (ap = ipsp->ipsp_act; ap != NULL; ap = ap->ipa_next) {
1904 1904 if (ipsec_check_ipsecin_action(ira, data_mp, ap,
1905 1905 ipha, ip6h, &reason, &counter, ns)) {
1906 1906 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
1907 1907 IPPOL_REFRELE(ipsp);
1908 1908 return (data_mp);
1909 1909 }
1910 1910 }
1911 1911 drop:
1912 1912 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1913 1913 "ipsec inbound policy mismatch: %s, packet dropped\n",
1914 1914 reason);
1915 1915 IPPOL_REFRELE(ipsp);
1916 1916 ASSERT(ira->ira_ipsec_action == NULL);
1917 1917 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
1918 1918 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
1919 1919 &ipss->ipsec_spd_dropper);
1920 1920 return (NULL);
1921 1921 }
1922 1922
1923 1923 /*
1924 1924 * sleazy prefix-length-based compare.
1925 1925 * another inlining candidate..
1926 1926 */
1927 1927 boolean_t
1928 1928 ip_addr_match(uint8_t *addr1, int pfxlen, in6_addr_t *addr2p)
1929 1929 {
1930 1930 int offset = pfxlen>>3;
1931 1931 int bitsleft = pfxlen & 7;
1932 1932 uint8_t *addr2 = (uint8_t *)addr2p;
1933 1933
1934 1934 /*
1935 1935 * and there was much evil..
1936 1936 * XXX should inline-expand the bcmp here and do this 32 bits
1937 1937 * or 64 bits at a time..
1938 1938 */
1939 1939 return ((bcmp(addr1, addr2, offset) == 0) &&
1940 1940 ((bitsleft == 0) ||
1941 1941 (((addr1[offset] ^ addr2[offset]) & (0xff<<(8-bitsleft))) == 0)));
1942 1942 }
1943 1943
1944 1944 static ipsec_policy_t *
1945 1945 ipsec_find_policy_chain(ipsec_policy_t *best, ipsec_policy_t *chain,
1946 1946 ipsec_selector_t *sel, boolean_t is_icmp_inv_acq)
1947 1947 {
1948 1948 ipsec_selkey_t *isel;
1949 1949 ipsec_policy_t *p;
1950 1950 int bpri = best ? best->ipsp_prio : 0;
1951 1951
1952 1952 for (p = chain; p != NULL; p = p->ipsp_hash.hash_next) {
1953 1953 uint32_t valid;
1954 1954
1955 1955 if (p->ipsp_prio <= bpri)
1956 1956 continue;
1957 1957 isel = &p->ipsp_sel->ipsl_key;
1958 1958 valid = isel->ipsl_valid;
1959 1959
1960 1960 if ((valid & IPSL_PROTOCOL) &&
1961 1961 (isel->ipsl_proto != sel->ips_protocol))
1962 1962 continue;
1963 1963
1964 1964 if ((valid & IPSL_REMOTE_ADDR) &&
1965 1965 !ip_addr_match((uint8_t *)&isel->ipsl_remote,
1966 1966 isel->ipsl_remote_pfxlen, &sel->ips_remote_addr_v6))
1967 1967 continue;
1968 1968
1969 1969 if ((valid & IPSL_LOCAL_ADDR) &&
1970 1970 !ip_addr_match((uint8_t *)&isel->ipsl_local,
1971 1971 isel->ipsl_local_pfxlen, &sel->ips_local_addr_v6))
1972 1972 continue;
1973 1973
1974 1974 if ((valid & IPSL_REMOTE_PORT) &&
1975 1975 isel->ipsl_rport != sel->ips_remote_port)
1976 1976 continue;
1977 1977
1978 1978 if ((valid & IPSL_LOCAL_PORT) &&
1979 1979 isel->ipsl_lport != sel->ips_local_port)
1980 1980 continue;
1981 1981
1982 1982 if (!is_icmp_inv_acq) {
1983 1983 if ((valid & IPSL_ICMP_TYPE) &&
1984 1984 (isel->ipsl_icmp_type > sel->ips_icmp_type ||
1985 1985 isel->ipsl_icmp_type_end < sel->ips_icmp_type)) {
1986 1986 continue;
1987 1987 }
1988 1988
1989 1989 if ((valid & IPSL_ICMP_CODE) &&
1990 1990 (isel->ipsl_icmp_code > sel->ips_icmp_code ||
1991 1991 isel->ipsl_icmp_code_end <
1992 1992 sel->ips_icmp_code)) {
1993 1993 continue;
1994 1994 }
1995 1995 } else {
1996 1996 /*
1997 1997 * special case for icmp inverse acquire
1998 1998 * we only want policies that aren't drop/pass
1999 1999 */
2000 2000 if (p->ipsp_act->ipa_act.ipa_type != IPSEC_ACT_APPLY)
2001 2001 continue;
2002 2002 }
2003 2003
2004 2004 /* we matched all the packet-port-field selectors! */
2005 2005 best = p;
2006 2006 bpri = p->ipsp_prio;
2007 2007 }
2008 2008
2009 2009 return (best);
2010 2010 }
2011 2011
2012 2012 /*
2013 2013 * Try to find and return the best policy entry under a given policy
2014 2014 * root for a given set of selectors; the first parameter "best" is
2015 2015 * the current best policy so far. If "best" is non-null, we have a
2016 2016 * reference to it. We return a reference to a policy; if that policy
2017 2017 * is not the original "best", we need to release that reference
2018 2018 * before returning.
2019 2019 */
2020 2020 ipsec_policy_t *
2021 2021 ipsec_find_policy_head(ipsec_policy_t *best, ipsec_policy_head_t *head,
2022 2022 int direction, ipsec_selector_t *sel)
2023 2023 {
2024 2024 ipsec_policy_t *curbest;
2025 2025 ipsec_policy_root_t *root;
2026 2026 uint8_t is_icmp_inv_acq = sel->ips_is_icmp_inv_acq;
2027 2027 int af = sel->ips_isv4 ? IPSEC_AF_V4 : IPSEC_AF_V6;
2028 2028
2029 2029 curbest = best;
2030 2030 root = &head->iph_root[direction];
2031 2031
2032 2032 #ifdef DEBUG
2033 2033 if (is_icmp_inv_acq) {
2034 2034 if (sel->ips_isv4) {
2035 2035 if (sel->ips_protocol != IPPROTO_ICMP) {
2036 2036 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2037 2037 " expecting icmp, got %d",
2038 2038 sel->ips_protocol);
2039 2039 }
2040 2040 } else {
2041 2041 if (sel->ips_protocol != IPPROTO_ICMPV6) {
2042 2042 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2043 2043 " expecting icmpv6, got %d",
2044 2044 sel->ips_protocol);
2045 2045 }
2046 2046 }
2047 2047 }
2048 2048 #endif
2049 2049
2050 2050 rw_enter(&head->iph_lock, RW_READER);
2051 2051
2052 2052 if (root->ipr_nchains > 0) {
2053 2053 curbest = ipsec_find_policy_chain(curbest,
2054 2054 root->ipr_hash[selector_hash(sel, root)].hash_head, sel,
2055 2055 is_icmp_inv_acq);
2056 2056 }
2057 2057 curbest = ipsec_find_policy_chain(curbest, root->ipr_nonhash[af], sel,
2058 2058 is_icmp_inv_acq);
2059 2059
2060 2060 /*
2061 2061 * Adjust reference counts if we found anything new.
2062 2062 */
2063 2063 if (curbest != best) {
2064 2064 ASSERT(curbest != NULL);
2065 2065 IPPOL_REFHOLD(curbest);
2066 2066
2067 2067 if (best != NULL) {
2068 2068 IPPOL_REFRELE(best);
2069 2069 }
2070 2070 }
2071 2071
2072 2072 rw_exit(&head->iph_lock);
2073 2073
2074 2074 return (curbest);
2075 2075 }
2076 2076
2077 2077 /*
2078 2078 * Find the best system policy (either global or per-interface) which
2079 2079 * applies to the given selector; look in all the relevant policy roots
2080 2080 * to figure out which policy wins.
2081 2081 *
2082 2082 * Returns a reference to a policy; caller must release this
2083 2083 * reference when done.
2084 2084 */
2085 2085 ipsec_policy_t *
2086 2086 ipsec_find_policy(int direction, const conn_t *connp, ipsec_selector_t *sel,
2087 2087 netstack_t *ns)
2088 2088 {
2089 2089 ipsec_policy_t *p;
2090 2090 ipsec_stack_t *ipss = ns->netstack_ipsec;
2091 2091
2092 2092 p = ipsec_find_policy_head(NULL, &ipss->ipsec_system_policy,
2093 2093 direction, sel);
2094 2094 if ((connp != NULL) && (connp->conn_policy != NULL)) {
2095 2095 p = ipsec_find_policy_head(p, connp->conn_policy,
2096 2096 direction, sel);
2097 2097 }
2098 2098
2099 2099 return (p);
2100 2100 }
2101 2101
2102 2102 /*
2103 2103 * Check with global policy and see whether this inbound
2104 2104 * packet meets the policy constraints.
2105 2105 *
2106 2106 * Locate appropriate policy from global policy, supplemented by the
2107 2107 * conn's configured and/or cached policy if the conn is supplied.
2108 2108 *
2109 2109 * Dispatch to ipsec_check_ipsecin_policy if we have policy and an
2110 2110 * encrypted packet to see if they match.
2111 2111 *
2112 2112 * Otherwise, see if the policy allows cleartext; if not, drop it on the
2113 2113 * floor.
2114 2114 */
2115 2115 mblk_t *
2116 2116 ipsec_check_global_policy(mblk_t *data_mp, conn_t *connp,
2117 2117 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira, netstack_t *ns)
2118 2118 {
2119 2119 ipsec_policy_t *p;
2120 2120 ipsec_selector_t sel;
2121 2121 boolean_t policy_present;
2122 2122 kstat_named_t *counter;
2123 2123 uint64_t pkt_unique;
2124 2124 ip_stack_t *ipst = ns->netstack_ip;
2125 2125 ipsec_stack_t *ipss = ns->netstack_ipsec;
2126 2126
2127 2127 sel.ips_is_icmp_inv_acq = 0;
2128 2128
2129 2129 ASSERT((ipha == NULL && ip6h != NULL) ||
2130 2130 (ip6h == NULL && ipha != NULL));
2131 2131
2132 2132 if (ipha != NULL)
2133 2133 policy_present = ipss->ipsec_inbound_v4_policy_present;
2134 2134 else
2135 2135 policy_present = ipss->ipsec_inbound_v6_policy_present;
2136 2136
2137 2137 if (!policy_present && connp == NULL) {
2138 2138 /*
2139 2139 * No global policy and no per-socket policy;
2140 2140 * just pass it back (but we shouldn't get here in that case)
2141 2141 */
2142 2142 return (data_mp);
2143 2143 }
2144 2144
2145 2145 /*
2146 2146 * If we have cached policy, use it.
2147 2147 * Otherwise consult system policy.
2148 2148 */
2149 2149 if ((connp != NULL) && (connp->conn_latch != NULL)) {
2150 2150 p = connp->conn_latch_in_policy;
2151 2151 if (p != NULL) {
2152 2152 IPPOL_REFHOLD(p);
2153 2153 }
2154 2154 /*
2155 2155 * Fudge sel for UNIQUE_ID setting below.
2156 2156 */
2157 2157 pkt_unique = conn_to_unique(connp, data_mp, ipha, ip6h);
2158 2158 } else {
2159 2159 /* Initialize the ports in the selector */
2160 2160 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
2161 2161 SEL_NONE) == SELRET_NOMEM) {
2162 2162 /*
2163 2163 * Technically not a policy mismatch, but it is
2164 2164 * an internal failure.
2165 2165 */
2166 2166 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2167 2167 "ipsec_init_inbound_sel", ipha, ip6h, B_TRUE, ns);
2168 2168 counter = DROPPER(ipss, ipds_spd_nomem);
2169 2169 goto fail;
2170 2170 }
2171 2171
2172 2172 /*
2173 2173 * Find the policy which best applies.
2174 2174 *
2175 2175 * If we find global policy, we should look at both
2176 2176 * local policy and global policy and see which is
2177 2177 * stronger and match accordingly.
2178 2178 *
2179 2179 * If we don't find a global policy, check with
2180 2180 * local policy alone.
2181 2181 */
2182 2182
2183 2183 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
2184 2184 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
2185 2185 sel.ips_local_port, sel.ips_protocol, 0);
2186 2186 }
2187 2187
2188 2188 if (p == NULL) {
2189 2189 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2190 2190 /*
2191 2191 * We have no policy; default to succeeding.
2192 2192 * XXX paranoid system design doesn't do this.
2193 2193 */
2194 2194 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2195 2195 return (data_mp);
2196 2196 } else {
2197 2197 counter = DROPPER(ipss, ipds_spd_got_secure);
2198 2198 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED,
2199 2199 "ipsec_check_global_policy", ipha, ip6h, B_TRUE,
2200 2200 ns);
2201 2201 goto fail;
2202 2202 }
2203 2203 }
2204 2204 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2205 2205 return (ipsec_check_ipsecin_policy(data_mp, p, ipha, ip6h,
2206 2206 pkt_unique, ira, ns));
2207 2207 }
2208 2208 if (p->ipsp_act->ipa_allow_clear) {
2209 2209 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2210 2210 IPPOL_REFRELE(p);
2211 2211 return (data_mp);
2212 2212 }
2213 2213 IPPOL_REFRELE(p);
2214 2214 /*
2215 2215 * If we reach here, we will drop the packet because it failed the
2216 2216 * global policy check because the packet was cleartext, and it
2217 2217 * should not have been.
2218 2218 */
2219 2219 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2220 2220 "ipsec_check_global_policy", ipha, ip6h, B_FALSE, ns);
2221 2221 counter = DROPPER(ipss, ipds_spd_got_clear);
2222 2222
2223 2223 fail:
2224 2224 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
2225 2225 &ipss->ipsec_spd_dropper);
2226 2226 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2227 2227 return (NULL);
2228 2228 }
2229 2229
2230 2230 /*
2231 2231 * We check whether an inbound datagram is a valid one
2232 2232 * to accept in clear. If it is secure, it is the job
2233 2233 * of IPSEC to log information appropriately if it
2234 2234 * suspects that it may not be the real one.
2235 2235 *
2236 2236 * It is called only while fanning out to the ULP
2237 2237 * where ULP accepts only secure data and the incoming
2238 2238 * is clear. Usually we never accept clear datagrams in
2239 2239 * such cases. ICMP is the only exception.
2240 2240 *
2241 2241 * NOTE : We don't call this function if the client (ULP)
2242 2242 * is willing to accept things in clear.
2243 2243 */
2244 2244 boolean_t
2245 2245 ipsec_inbound_accept_clear(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h)
2246 2246 {
2247 2247 ushort_t iph_hdr_length;
2248 2248 icmph_t *icmph;
2249 2249 icmp6_t *icmp6;
2250 2250 uint8_t *nexthdrp;
2251 2251
2252 2252 ASSERT((ipha != NULL && ip6h == NULL) ||
2253 2253 (ipha == NULL && ip6h != NULL));
2254 2254
2255 2255 if (ip6h != NULL) {
2256 2256 iph_hdr_length = ip_hdr_length_v6(mp, ip6h);
2257 2257 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length,
2258 2258 &nexthdrp)) {
2259 2259 return (B_FALSE);
2260 2260 }
2261 2261 if (*nexthdrp != IPPROTO_ICMPV6)
2262 2262 return (B_FALSE);
2263 2263 icmp6 = (icmp6_t *)(&mp->b_rptr[iph_hdr_length]);
2264 2264 /* Match IPv6 ICMP policy as closely as IPv4 as possible. */
2265 2265 switch (icmp6->icmp6_type) {
2266 2266 case ICMP6_PARAM_PROB:
2267 2267 /* Corresponds to port/proto unreach in IPv4. */
2268 2268 case ICMP6_ECHO_REQUEST:
2269 2269 /* Just like IPv4. */
2270 2270 return (B_FALSE);
2271 2271
2272 2272 case MLD_LISTENER_QUERY:
2273 2273 case MLD_LISTENER_REPORT:
2274 2274 case MLD_LISTENER_REDUCTION:
2275 2275 /*
2276 2276 * XXX Seperate NDD in IPv4 what about here?
2277 2277 * Plus, mcast is important to ND.
2278 2278 */
2279 2279 case ICMP6_DST_UNREACH:
2280 2280 /* Corresponds to HOST/NET unreachable in IPv4. */
2281 2281 case ICMP6_PACKET_TOO_BIG:
2282 2282 case ICMP6_ECHO_REPLY:
2283 2283 /* These are trusted in IPv4. */
2284 2284 case ND_ROUTER_SOLICIT:
2285 2285 case ND_ROUTER_ADVERT:
2286 2286 case ND_NEIGHBOR_SOLICIT:
2287 2287 case ND_NEIGHBOR_ADVERT:
2288 2288 case ND_REDIRECT:
2289 2289 /* Trust ND messages for now. */
2290 2290 case ICMP6_TIME_EXCEEDED:
2291 2291 default:
2292 2292 return (B_TRUE);
2293 2293 }
2294 2294 } else {
2295 2295 /*
2296 2296 * If it is not ICMP, fail this request.
2297 2297 */
2298 2298 if (ipha->ipha_protocol != IPPROTO_ICMP) {
2299 2299 #ifdef FRAGCACHE_DEBUG
2300 2300 cmn_err(CE_WARN, "Dropping - ipha_proto = %d\n",
2301 2301 ipha->ipha_protocol);
2302 2302 #endif
2303 2303 return (B_FALSE);
2304 2304 }
2305 2305 iph_hdr_length = IPH_HDR_LENGTH(ipha);
2306 2306 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
2307 2307 /*
2308 2308 * It is an insecure icmp message. Check to see whether we are
2309 2309 * willing to accept this one.
2310 2310 */
2311 2311
2312 2312 switch (icmph->icmph_type) {
2313 2313 case ICMP_ECHO_REPLY:
2314 2314 case ICMP_TIME_STAMP_REPLY:
2315 2315 case ICMP_INFO_REPLY:
2316 2316 case ICMP_ROUTER_ADVERTISEMENT:
2317 2317 /*
2318 2318 * We should not encourage clear replies if this
2319 2319 * client expects secure. If somebody is replying
2320 2320 * in clear some mailicious user watching both the
2321 2321 * request and reply, can do chosen-plain-text attacks.
2322 2322 * With global policy we might be just expecting secure
2323 2323 * but sending out clear. We don't know what the right
2324 2324 * thing is. We can't do much here as we can't control
2325 2325 * the sender here. Till we are sure of what to do,
2326 2326 * accept them.
2327 2327 */
2328 2328 return (B_TRUE);
2329 2329 case ICMP_ECHO_REQUEST:
2330 2330 case ICMP_TIME_STAMP_REQUEST:
2331 2331 case ICMP_INFO_REQUEST:
2332 2332 case ICMP_ADDRESS_MASK_REQUEST:
2333 2333 case ICMP_ROUTER_SOLICITATION:
2334 2334 case ICMP_ADDRESS_MASK_REPLY:
2335 2335 /*
2336 2336 * Don't accept this as somebody could be sending
2337 2337 * us plain text to get encrypted data. If we reply,
2338 2338 * it will lead to chosen plain text attack.
2339 2339 */
2340 2340 return (B_FALSE);
2341 2341 case ICMP_DEST_UNREACHABLE:
2342 2342 switch (icmph->icmph_code) {
2343 2343 case ICMP_FRAGMENTATION_NEEDED:
2344 2344 /*
2345 2345 * Be in sync with icmp_inbound, where we have
2346 2346 * already set dce_pmtu
2347 2347 */
2348 2348 #ifdef FRAGCACHE_DEBUG
2349 2349 cmn_err(CE_WARN, "ICMP frag needed\n");
2350 2350 #endif
2351 2351 return (B_TRUE);
2352 2352 case ICMP_HOST_UNREACHABLE:
2353 2353 case ICMP_NET_UNREACHABLE:
2354 2354 /*
2355 2355 * By accepting, we could reset a connection.
2356 2356 * How do we solve the problem of some
2357 2357 * intermediate router sending in-secure ICMP
2358 2358 * messages ?
2359 2359 */
2360 2360 return (B_TRUE);
2361 2361 case ICMP_PORT_UNREACHABLE:
2362 2362 case ICMP_PROTOCOL_UNREACHABLE:
2363 2363 default :
2364 2364 return (B_FALSE);
2365 2365 }
2366 2366 case ICMP_SOURCE_QUENCH:
2367 2367 /*
2368 2368 * If this is an attack, TCP will slow start
2369 2369 * because of this. Is it very harmful ?
2370 2370 */
2371 2371 return (B_TRUE);
2372 2372 case ICMP_PARAM_PROBLEM:
2373 2373 return (B_FALSE);
2374 2374 case ICMP_TIME_EXCEEDED:
2375 2375 return (B_TRUE);
2376 2376 case ICMP_REDIRECT:
2377 2377 return (B_FALSE);
2378 2378 default :
2379 2379 return (B_FALSE);
2380 2380 }
2381 2381 }
2382 2382 }
2383 2383
2384 2384 void
2385 2385 ipsec_latch_ids(ipsec_latch_t *ipl, ipsid_t *local, ipsid_t *remote)
2386 2386 {
2387 2387 mutex_enter(&ipl->ipl_lock);
2388 2388
2389 2389 if (ipl->ipl_ids_latched) {
2390 2390 /* I lost, someone else got here before me */
2391 2391 mutex_exit(&ipl->ipl_lock);
2392 2392 return;
2393 2393 }
2394 2394
2395 2395 if (local != NULL)
2396 2396 IPSID_REFHOLD(local);
2397 2397 if (remote != NULL)
2398 2398 IPSID_REFHOLD(remote);
2399 2399
2400 2400 ipl->ipl_local_cid = local;
2401 2401 ipl->ipl_remote_cid = remote;
2402 2402 ipl->ipl_ids_latched = B_TRUE;
2403 2403 mutex_exit(&ipl->ipl_lock);
2404 2404 }
2405 2405
2406 2406 void
2407 2407 ipsec_latch_inbound(conn_t *connp, ip_recv_attr_t *ira)
2408 2408 {
2409 2409 ipsa_t *sa;
2410 2410 ipsec_latch_t *ipl = connp->conn_latch;
2411 2411
2412 2412 if (!ipl->ipl_ids_latched) {
2413 2413 ipsid_t *local = NULL;
2414 2414 ipsid_t *remote = NULL;
2415 2415
2416 2416 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
2417 2417 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2418 2418 if (ira->ira_ipsec_esp_sa != NULL)
2419 2419 sa = ira->ira_ipsec_esp_sa;
2420 2420 else
2421 2421 sa = ira->ira_ipsec_ah_sa;
2422 2422 ASSERT(sa != NULL);
2423 2423 local = sa->ipsa_dst_cid;
2424 2424 remote = sa->ipsa_src_cid;
2425 2425 }
2426 2426 ipsec_latch_ids(ipl, local, remote);
2427 2427 }
2428 2428 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2429 2429 if (connp->conn_latch_in_action != NULL) {
2430 2430 /*
2431 2431 * Previously cached action. This is probably
2432 2432 * harmless, but in DEBUG kernels, check for
2433 2433 * action equality.
2434 2434 *
2435 2435 * Preserve the existing action to preserve latch
2436 2436 * invariance.
2437 2437 */
2438 2438 ASSERT(connp->conn_latch_in_action ==
2439 2439 ira->ira_ipsec_action);
2440 2440 return;
2441 2441 }
2442 2442 connp->conn_latch_in_action = ira->ira_ipsec_action;
2443 2443 IPACT_REFHOLD(connp->conn_latch_in_action);
2444 2444 }
2445 2445 }
2446 2446
2447 2447 /*
2448 2448 * Check whether the policy constraints are met either for an
2449 2449 * inbound datagram; called from IP in numerous places.
2450 2450 *
2451 2451 * Note that this is not a chokepoint for inbound policy checks;
2452 2452 * see also ipsec_check_ipsecin_latch() and ipsec_check_global_policy()
2453 2453 */
2454 2454 mblk_t *
2455 2455 ipsec_check_inbound_policy(mblk_t *mp, conn_t *connp,
2456 2456 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira)
2457 2457 {
2458 2458 boolean_t ret;
2459 2459 ipsec_latch_t *ipl;
2460 2460 ipsec_action_t *ap;
2461 2461 uint64_t unique_id;
2462 2462 ipsec_stack_t *ipss;
2463 2463 ip_stack_t *ipst;
2464 2464 netstack_t *ns;
2465 2465 ipsec_policy_head_t *policy_head;
2466 2466 ipsec_policy_t *p = NULL;
2467 2467
2468 2468 ASSERT(connp != NULL);
2469 2469 ns = connp->conn_netstack;
2470 2470 ipss = ns->netstack_ipsec;
2471 2471 ipst = ns->netstack_ip;
2472 2472
2473 2473 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2474 2474 /*
2475 2475 * This is the case where the incoming datagram is
2476 2476 * cleartext and we need to see whether this client
2477 2477 * would like to receive such untrustworthy things from
2478 2478 * the wire.
2479 2479 */
2480 2480 ASSERT(mp != NULL);
2481 2481
2482 2482 mutex_enter(&connp->conn_lock);
2483 2483 if (connp->conn_state_flags & CONN_CONDEMNED) {
2484 2484 mutex_exit(&connp->conn_lock);
2485 2485 ip_drop_packet(mp, B_TRUE, NULL,
2486 2486 DROPPER(ipss, ipds_spd_got_clear),
2487 2487 &ipss->ipsec_spd_dropper);
2488 2488 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2489 2489 return (NULL);
2490 2490 }
2491 2491 if (connp->conn_latch != NULL) {
2492 2492 /* Hold a reference in case the conn is closing */
2493 2493 p = connp->conn_latch_in_policy;
2494 2494 if (p != NULL)
2495 2495 IPPOL_REFHOLD(p);
2496 2496 mutex_exit(&connp->conn_lock);
2497 2497 /*
2498 2498 * Policy is cached in the conn.
2499 2499 */
2500 2500 if (p != NULL && !p->ipsp_act->ipa_allow_clear) {
2501 2501 ret = ipsec_inbound_accept_clear(mp,
2502 2502 ipha, ip6h);
2503 2503 if (ret) {
2504 2504 BUMP_MIB(&ipst->ips_ip_mib,
2505 2505 ipsecInSucceeded);
2506 2506 IPPOL_REFRELE(p);
2507 2507 return (mp);
2508 2508 } else {
2509 2509 ipsec_log_policy_failure(
2510 2510 IPSEC_POLICY_MISMATCH,
2511 2511 "ipsec_check_inbound_policy", ipha,
2512 2512 ip6h, B_FALSE, ns);
2513 2513 ip_drop_packet(mp, B_TRUE, NULL,
2514 2514 DROPPER(ipss, ipds_spd_got_clear),
2515 2515 &ipss->ipsec_spd_dropper);
2516 2516 BUMP_MIB(&ipst->ips_ip_mib,
2517 2517 ipsecInFailed);
2518 2518 IPPOL_REFRELE(p);
2519 2519 return (NULL);
2520 2520 }
2521 2521 } else {
2522 2522 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2523 2523 if (p != NULL)
2524 2524 IPPOL_REFRELE(p);
2525 2525 return (mp);
2526 2526 }
2527 2527 } else {
2528 2528 policy_head = connp->conn_policy;
2529 2529
2530 2530 /* Hold a reference in case the conn is closing */
2531 2531 if (policy_head != NULL)
2532 2532 IPPH_REFHOLD(policy_head);
2533 2533 mutex_exit(&connp->conn_lock);
2534 2534 /*
2535 2535 * As this is a non-hardbound connection we need
2536 2536 * to look at both per-socket policy and global
2537 2537 * policy.
2538 2538 */
2539 2539 mp = ipsec_check_global_policy(mp, connp,
2540 2540 ipha, ip6h, ira, ns);
2541 2541 if (policy_head != NULL)
2542 2542 IPPH_REFRELE(policy_head, ns);
2543 2543 return (mp);
2544 2544 }
2545 2545 }
2546 2546
2547 2547 mutex_enter(&connp->conn_lock);
2548 2548 /* Connection is closing */
2549 2549 if (connp->conn_state_flags & CONN_CONDEMNED) {
2550 2550 mutex_exit(&connp->conn_lock);
2551 2551 ip_drop_packet(mp, B_TRUE, NULL,
2552 2552 DROPPER(ipss, ipds_spd_got_clear),
2553 2553 &ipss->ipsec_spd_dropper);
2554 2554 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2555 2555 return (NULL);
2556 2556 }
2557 2557
2558 2558 /*
2559 2559 * Once a connection is latched it remains so for life, the conn_latch
2560 2560 * pointer on the conn has not changed, simply initializing ipl here
2561 2561 * as the earlier initialization was done only in the cleartext case.
2562 2562 */
2563 2563 if ((ipl = connp->conn_latch) == NULL) {
2564 2564 mblk_t *retmp;
2565 2565 policy_head = connp->conn_policy;
2566 2566
2567 2567 /* Hold a reference in case the conn is closing */
2568 2568 if (policy_head != NULL)
2569 2569 IPPH_REFHOLD(policy_head);
2570 2570 mutex_exit(&connp->conn_lock);
2571 2571 /*
2572 2572 * We don't have policies cached in the conn
2573 2573 * for this stream. So, look at the global
2574 2574 * policy. It will check against conn or global
2575 2575 * depending on whichever is stronger.
2576 2576 */
2577 2577 retmp = ipsec_check_global_policy(mp, connp,
2578 2578 ipha, ip6h, ira, ns);
2579 2579 if (policy_head != NULL)
2580 2580 IPPH_REFRELE(policy_head, ns);
2581 2581 return (retmp);
2582 2582 }
2583 2583
2584 2584 IPLATCH_REFHOLD(ipl);
2585 2585 /* Hold reference on conn_latch_in_action in case conn is closing */
2586 2586 ap = connp->conn_latch_in_action;
2587 2587 if (ap != NULL)
2588 2588 IPACT_REFHOLD(ap);
2589 2589 mutex_exit(&connp->conn_lock);
2590 2590
2591 2591 if (ap != NULL) {
2592 2592 /* Policy is cached & latched; fast(er) path */
2593 2593 const char *reason;
2594 2594 kstat_named_t *counter;
2595 2595
2596 2596 if (ipsec_check_ipsecin_latch(ira, mp, ipl, ap,
2597 2597 ipha, ip6h, &reason, &counter, connp, ns)) {
2598 2598 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2599 2599 IPLATCH_REFRELE(ipl);
2600 2600 IPACT_REFRELE(ap);
2601 2601 return (mp);
2602 2602 }
2603 2603 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0,
2604 2604 SL_ERROR|SL_WARN|SL_CONSOLE,
2605 2605 "ipsec inbound policy mismatch: %s, packet dropped\n",
2606 2606 reason);
2607 2607 ip_drop_packet(mp, B_TRUE, NULL, counter,
2608 2608 &ipss->ipsec_spd_dropper);
2609 2609 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2610 2610 IPLATCH_REFRELE(ipl);
2611 2611 IPACT_REFRELE(ap);
2612 2612 return (NULL);
2613 2613 }
2614 2614 if ((p = connp->conn_latch_in_policy) == NULL) {
2615 2615 ipsec_weird_null_inbound_policy++;
2616 2616 IPLATCH_REFRELE(ipl);
2617 2617 return (mp);
2618 2618 }
2619 2619
2620 2620 unique_id = conn_to_unique(connp, mp, ipha, ip6h);
2621 2621 IPPOL_REFHOLD(p);
2622 2622 mp = ipsec_check_ipsecin_policy(mp, p, ipha, ip6h, unique_id, ira, ns);
2623 2623 /*
2624 2624 * NOTE: ipsecIn{Failed,Succeeeded} bumped by
2625 2625 * ipsec_check_ipsecin_policy().
2626 2626 */
2627 2627 if (mp != NULL)
2628 2628 ipsec_latch_inbound(connp, ira);
2629 2629 IPLATCH_REFRELE(ipl);
2630 2630 return (mp);
2631 2631 }
2632 2632
2633 2633 /*
2634 2634 * Handle all sorts of cases like tunnel-mode and ICMP.
2635 2635 */
2636 2636 static int
2637 2637 prepended_length(mblk_t *mp, uintptr_t hptr)
2638 2638 {
2639 2639 int rc = 0;
2640 2640
2641 2641 while (mp != NULL) {
2642 2642 if (hptr >= (uintptr_t)mp->b_rptr && hptr <
2643 2643 (uintptr_t)mp->b_wptr) {
2644 2644 rc += (int)(hptr - (uintptr_t)mp->b_rptr);
2645 2645 break; /* out of while loop */
2646 2646 }
2647 2647 rc += (int)MBLKL(mp);
2648 2648 mp = mp->b_cont;
2649 2649 }
2650 2650
2651 2651 if (mp == NULL) {
2652 2652 /*
2653 2653 * IF (big IF) we make it here by naturally exiting the loop,
2654 2654 * then ip6h isn't in the mblk chain "mp" at all.
2655 2655 *
2656 2656 * The only case where this happens is with a reversed IP
2657 2657 * header that gets passed up by inbound ICMP processing.
2658 2658 * This unfortunately triggers longstanding bug 6478464. For
2659 2659 * now, just pass up 0 for the answer.
2660 2660 */
2661 2661 #ifdef DEBUG_NOT_UNTIL_6478464
2662 2662 ASSERT(mp != NULL);
2663 2663 #endif
2664 2664 rc = 0;
2665 2665 }
2666 2666
2667 2667 return (rc);
2668 2668 }
2669 2669
2670 2670 /*
2671 2671 * Returns:
2672 2672 *
2673 2673 * SELRET_NOMEM --> msgpullup() needed to gather things failed.
2674 2674 * SELRET_BADPKT --> If we're being called after tunnel-mode fragment
2675 2675 * gathering, the initial fragment is too short for
2676 2676 * useful data. Only returned if SEL_TUNNEL_FIRSTFRAG is
2677 2677 * set.
2678 2678 * SELRET_SUCCESS --> "sel" now has initialized IPsec selector data.
2679 2679 * SELRET_TUNFRAG --> This is a fragment in a tunnel-mode packet. Caller
2680 2680 * should put this packet in a fragment-gathering queue.
2681 2681 * Only returned if SEL_TUNNEL_MODE and SEL_PORT_POLICY
2682 2682 * is set.
2683 2683 *
2684 2684 * Note that ipha/ip6h can be in a different mblk (mp->b_cont) in the case
2685 2685 * of tunneled packets.
2686 2686 * Also, mp->b_rptr can be an ICMP error where ipha/ip6h is the packet in
2687 2687 * error past the ICMP error.
2688 2688 */
2689 2689 static selret_t
2690 2690 ipsec_init_inbound_sel(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2691 2691 ip6_t *ip6h, uint8_t sel_flags)
2692 2692 {
2693 2693 uint16_t *ports;
2694 2694 int outer_hdr_len = 0; /* For ICMP or tunnel-mode cases... */
2695 2695 ushort_t hdr_len;
2696 2696 mblk_t *spare_mp = NULL;
2697 2697 uint8_t *nexthdrp, *transportp;
2698 2698 uint8_t nexthdr;
2699 2699 uint8_t icmp_proto;
2700 2700 ip_pkt_t ipp;
2701 2701 boolean_t port_policy_present = (sel_flags & SEL_PORT_POLICY);
2702 2702 boolean_t is_icmp = (sel_flags & SEL_IS_ICMP);
2703 2703 boolean_t tunnel_mode = (sel_flags & SEL_TUNNEL_MODE);
2704 2704 boolean_t post_frag = (sel_flags & SEL_POST_FRAG);
2705 2705
2706 2706 ASSERT((ipha == NULL && ip6h != NULL) ||
2707 2707 (ipha != NULL && ip6h == NULL));
2708 2708
2709 2709 if (ip6h != NULL) {
2710 2710 outer_hdr_len = prepended_length(mp, (uintptr_t)ip6h);
2711 2711 nexthdr = ip6h->ip6_nxt;
2712 2712 icmp_proto = IPPROTO_ICMPV6;
2713 2713 sel->ips_isv4 = B_FALSE;
2714 2714 sel->ips_local_addr_v6 = ip6h->ip6_dst;
2715 2715 sel->ips_remote_addr_v6 = ip6h->ip6_src;
2716 2716
2717 2717 bzero(&ipp, sizeof (ipp));
2718 2718
2719 2719 switch (nexthdr) {
2720 2720 case IPPROTO_HOPOPTS:
2721 2721 case IPPROTO_ROUTING:
2722 2722 case IPPROTO_DSTOPTS:
2723 2723 case IPPROTO_FRAGMENT:
2724 2724 /*
2725 2725 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2726 2726 * mblk that's contiguous to feed it
2727 2727 */
2728 2728 if ((spare_mp = msgpullup(mp, -1)) == NULL)
2729 2729 return (SELRET_NOMEM);
2730 2730 if (!ip_hdr_length_nexthdr_v6(spare_mp,
2731 2731 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2732 2732 &hdr_len, &nexthdrp)) {
2733 2733 /* Malformed packet - caller frees. */
2734 2734 ipsec_freemsg_chain(spare_mp);
2735 2735 return (SELRET_BADPKT);
2736 2736 }
2737 2737 /* Repopulate now that we have the whole packet */
2738 2738 ip6h = (ip6_t *)(spare_mp->b_rptr + outer_hdr_len);
2739 2739 (void) ip_find_hdr_v6(spare_mp, ip6h, B_FALSE, &ipp,
2740 2740 NULL);
2741 2741 nexthdr = *nexthdrp;
2742 2742 /* We can just extract based on hdr_len now. */
2743 2743 break;
2744 2744 default:
2745 2745 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
2746 2746 hdr_len = IPV6_HDR_LEN;
2747 2747 break;
2748 2748 }
2749 2749 if (port_policy_present && IS_V6_FRAGMENT(ipp) && !is_icmp) {
2750 2750 /* IPv6 Fragment */
2751 2751 ipsec_freemsg_chain(spare_mp);
2752 2752 return (SELRET_TUNFRAG);
2753 2753 }
2754 2754 transportp = (uint8_t *)ip6h + hdr_len;
2755 2755 } else {
2756 2756 outer_hdr_len = prepended_length(mp, (uintptr_t)ipha);
2757 2757 icmp_proto = IPPROTO_ICMP;
2758 2758 sel->ips_isv4 = B_TRUE;
2759 2759 sel->ips_local_addr_v4 = ipha->ipha_dst;
2760 2760 sel->ips_remote_addr_v4 = ipha->ipha_src;
2761 2761 nexthdr = ipha->ipha_protocol;
2762 2762 hdr_len = IPH_HDR_LENGTH(ipha);
2763 2763
2764 2764 if (port_policy_present &&
2765 2765 IS_V4_FRAGMENT(ipha->ipha_fragment_offset_and_flags) &&
2766 2766 !is_icmp) {
2767 2767 /* IPv4 Fragment */
2768 2768 ipsec_freemsg_chain(spare_mp);
2769 2769 return (SELRET_TUNFRAG);
2770 2770 }
2771 2771 transportp = (uint8_t *)ipha + hdr_len;
2772 2772 }
2773 2773 sel->ips_protocol = nexthdr;
2774 2774
2775 2775 if ((nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2776 2776 nexthdr != IPPROTO_SCTP && nexthdr != icmp_proto) ||
2777 2777 (!port_policy_present && !post_frag && tunnel_mode)) {
2778 2778 sel->ips_remote_port = sel->ips_local_port = 0;
2779 2779 ipsec_freemsg_chain(spare_mp);
2780 2780 return (SELRET_SUCCESS);
2781 2781 }
2782 2782
2783 2783 if (transportp + 4 > mp->b_wptr) {
2784 2784 /* If we didn't pullup a copy already, do so now. */
2785 2785 /*
2786 2786 * XXX performance, will upper-layers frequently split TCP/UDP
2787 2787 * apart from IP or options? If so, perhaps we should revisit
2788 2788 * the spare_mp strategy.
2789 2789 */
2790 2790 ipsec_hdr_pullup_needed++;
2791 2791 if (spare_mp == NULL &&
2792 2792 (spare_mp = msgpullup(mp, -1)) == NULL) {
2793 2793 return (SELRET_NOMEM);
2794 2794 }
2795 2795 transportp = &spare_mp->b_rptr[hdr_len + outer_hdr_len];
2796 2796 }
2797 2797
2798 2798 if (nexthdr == icmp_proto) {
2799 2799 sel->ips_icmp_type = *transportp++;
2800 2800 sel->ips_icmp_code = *transportp;
2801 2801 sel->ips_remote_port = sel->ips_local_port = 0;
2802 2802 } else {
2803 2803 ports = (uint16_t *)transportp;
2804 2804 sel->ips_remote_port = *ports++;
2805 2805 sel->ips_local_port = *ports;
2806 2806 }
2807 2807 ipsec_freemsg_chain(spare_mp);
2808 2808 return (SELRET_SUCCESS);
2809 2809 }
2810 2810
2811 2811 /*
2812 2812 * This is called with a b_next chain of messages from the fragcache code,
2813 2813 * hence it needs to discard a chain on error.
2814 2814 */
2815 2815 static boolean_t
2816 2816 ipsec_init_outbound_ports(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2817 2817 ip6_t *ip6h, int outer_hdr_len, ipsec_stack_t *ipss)
2818 2818 {
2819 2819 /*
2820 2820 * XXX cut&paste shared with ipsec_init_inbound_sel
2821 2821 */
2822 2822 uint16_t *ports;
2823 2823 ushort_t hdr_len;
2824 2824 mblk_t *spare_mp = NULL;
2825 2825 uint8_t *nexthdrp;
2826 2826 uint8_t nexthdr;
2827 2827 uint8_t *typecode;
2828 2828 uint8_t check_proto;
2829 2829
2830 2830 ASSERT((ipha == NULL && ip6h != NULL) ||
2831 2831 (ipha != NULL && ip6h == NULL));
2832 2832
2833 2833 if (ip6h != NULL) {
2834 2834 check_proto = IPPROTO_ICMPV6;
2835 2835 nexthdr = ip6h->ip6_nxt;
2836 2836 switch (nexthdr) {
2837 2837 case IPPROTO_HOPOPTS:
2838 2838 case IPPROTO_ROUTING:
2839 2839 case IPPROTO_DSTOPTS:
2840 2840 case IPPROTO_FRAGMENT:
2841 2841 /*
2842 2842 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2843 2843 * mblk that's contiguous to feed it
2844 2844 */
2845 2845 spare_mp = msgpullup(mp, -1);
2846 2846 if (spare_mp == NULL ||
2847 2847 !ip_hdr_length_nexthdr_v6(spare_mp,
2848 2848 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2849 2849 &hdr_len, &nexthdrp)) {
2850 2850 /* Always works, even if NULL. */
2851 2851 ipsec_freemsg_chain(spare_mp);
2852 2852 ip_drop_packet_chain(mp, B_FALSE, NULL,
2853 2853 DROPPER(ipss, ipds_spd_nomem),
2854 2854 &ipss->ipsec_spd_dropper);
2855 2855 return (B_FALSE);
2856 2856 } else {
2857 2857 nexthdr = *nexthdrp;
2858 2858 /* We can just extract based on hdr_len now. */
2859 2859 }
2860 2860 break;
2861 2861 default:
2862 2862 hdr_len = IPV6_HDR_LEN;
2863 2863 break;
2864 2864 }
2865 2865 } else {
2866 2866 check_proto = IPPROTO_ICMP;
2867 2867 hdr_len = IPH_HDR_LENGTH(ipha);
2868 2868 nexthdr = ipha->ipha_protocol;
2869 2869 }
2870 2870
2871 2871 sel->ips_protocol = nexthdr;
2872 2872 if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2873 2873 nexthdr != IPPROTO_SCTP && nexthdr != check_proto) {
2874 2874 sel->ips_local_port = sel->ips_remote_port = 0;
2875 2875 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2876 2876 return (B_TRUE);
2877 2877 }
2878 2878
2879 2879 if (&mp->b_rptr[hdr_len] + 4 + outer_hdr_len > mp->b_wptr) {
2880 2880 /* If we didn't pullup a copy already, do so now. */
2881 2881 /*
2882 2882 * XXX performance, will upper-layers frequently split TCP/UDP
2883 2883 * apart from IP or options? If so, perhaps we should revisit
2884 2884 * the spare_mp strategy.
2885 2885 *
2886 2886 * XXX should this be msgpullup(mp, hdr_len+4) ???
2887 2887 */
2888 2888 if (spare_mp == NULL &&
2889 2889 (spare_mp = msgpullup(mp, -1)) == NULL) {
2890 2890 ip_drop_packet_chain(mp, B_FALSE, NULL,
2891 2891 DROPPER(ipss, ipds_spd_nomem),
2892 2892 &ipss->ipsec_spd_dropper);
2893 2893 return (B_FALSE);
2894 2894 }
2895 2895 ports = (uint16_t *)&spare_mp->b_rptr[hdr_len + outer_hdr_len];
2896 2896 } else {
2897 2897 ports = (uint16_t *)&mp->b_rptr[hdr_len + outer_hdr_len];
2898 2898 }
2899 2899
2900 2900 if (nexthdr == check_proto) {
2901 2901 typecode = (uint8_t *)ports;
2902 2902 sel->ips_icmp_type = *typecode++;
2903 2903 sel->ips_icmp_code = *typecode;
2904 2904 sel->ips_remote_port = sel->ips_local_port = 0;
2905 2905 } else {
2906 2906 sel->ips_local_port = *ports++;
2907 2907 sel->ips_remote_port = *ports;
2908 2908 }
2909 2909 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2910 2910 return (B_TRUE);
2911 2911 }
2912 2912
2913 2913 /*
2914 2914 * Prepend an mblk with a ipsec_crypto_t to the message chain.
2915 2915 * Frees the argument and returns NULL should the allocation fail.
2916 2916 * Returns the pointer to the crypto data part.
2917 2917 */
2918 2918 mblk_t *
2919 2919 ipsec_add_crypto_data(mblk_t *data_mp, ipsec_crypto_t **icp)
2920 2920 {
2921 2921 mblk_t *mp;
2922 2922
2923 2923 mp = allocb(sizeof (ipsec_crypto_t), BPRI_MED);
2924 2924 if (mp == NULL) {
2925 2925 freemsg(data_mp);
2926 2926 return (NULL);
2927 2927 }
2928 2928 bzero(mp->b_rptr, sizeof (ipsec_crypto_t));
2929 2929 mp->b_wptr += sizeof (ipsec_crypto_t);
2930 2930 mp->b_cont = data_mp;
2931 2931 mp->b_datap->db_type = M_EVENT; /* For ASSERT */
2932 2932 *icp = (ipsec_crypto_t *)mp->b_rptr;
2933 2933 return (mp);
2934 2934 }
2935 2935
2936 2936 /*
2937 2937 * Remove what was prepended above. Return b_cont and a pointer to the
2938 2938 * crypto data.
2939 2939 * The caller must call ipsec_free_crypto_data for mblk once it is done
2940 2940 * with the crypto data.
2941 2941 */
2942 2942 mblk_t *
2943 2943 ipsec_remove_crypto_data(mblk_t *crypto_mp, ipsec_crypto_t **icp)
2944 2944 {
2945 2945 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2946 2946 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2947 2947
2948 2948 *icp = (ipsec_crypto_t *)crypto_mp->b_rptr;
2949 2949 return (crypto_mp->b_cont);
2950 2950 }
2951 2951
2952 2952 /*
2953 2953 * Free what was prepended above. Return b_cont.
2954 2954 */
2955 2955 mblk_t *
2956 2956 ipsec_free_crypto_data(mblk_t *crypto_mp)
2957 2957 {
2958 2958 mblk_t *mp;
2959 2959
2960 2960 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2961 2961 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2962 2962
2963 2963 mp = crypto_mp->b_cont;
2964 2964 freeb(crypto_mp);
2965 2965 return (mp);
2966 2966 }
2967 2967
2968 2968 /*
2969 2969 * Create an ipsec_action_t based on the way an inbound packet was protected.
2970 2970 * Used to reflect traffic back to a sender.
2971 2971 *
2972 2972 * We don't bother interning the action into the hash table.
2973 2973 */
2974 2974 ipsec_action_t *
2975 2975 ipsec_in_to_out_action(ip_recv_attr_t *ira)
2976 2976 {
2977 2977 ipsa_t *ah_assoc, *esp_assoc;
2978 2978 uint_t auth_alg = 0, encr_alg = 0, espa_alg = 0;
2979 2979 ipsec_action_t *ap;
2980 2980 boolean_t unique;
2981 2981
2982 2982 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
2983 2983
2984 2984 if (ap == NULL)
2985 2985 return (NULL);
2986 2986
2987 2987 bzero(ap, sizeof (*ap));
2988 2988 HASH_NULL(ap, ipa_hash);
2989 2989 ap->ipa_next = NULL;
2990 2990 ap->ipa_refs = 1;
2991 2991
2992 2992 /*
2993 2993 * Get the algorithms that were used for this packet.
2994 2994 */
2995 2995 ap->ipa_act.ipa_type = IPSEC_ACT_APPLY;
2996 2996 ap->ipa_act.ipa_log = 0;
2997 2997 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2998 2998
2999 2999 ah_assoc = ira->ira_ipsec_ah_sa;
3000 3000 ap->ipa_act.ipa_apply.ipp_use_ah = (ah_assoc != NULL);
3001 3001
3002 3002 esp_assoc = ira->ira_ipsec_esp_sa;
3003 3003 ap->ipa_act.ipa_apply.ipp_use_esp = (esp_assoc != NULL);
3004 3004
3005 3005 if (esp_assoc != NULL) {
3006 3006 encr_alg = esp_assoc->ipsa_encr_alg;
3007 3007 espa_alg = esp_assoc->ipsa_auth_alg;
3008 3008 ap->ipa_act.ipa_apply.ipp_use_espa = (espa_alg != 0);
3009 3009 }
3010 3010 if (ah_assoc != NULL)
3011 3011 auth_alg = ah_assoc->ipsa_auth_alg;
3012 3012
3013 3013 ap->ipa_act.ipa_apply.ipp_encr_alg = (uint8_t)encr_alg;
3014 3014 ap->ipa_act.ipa_apply.ipp_auth_alg = (uint8_t)auth_alg;
3015 3015 ap->ipa_act.ipa_apply.ipp_esp_auth_alg = (uint8_t)espa_alg;
3016 3016 ap->ipa_act.ipa_apply.ipp_use_se =
3017 3017 !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3018 3018 unique = B_FALSE;
3019 3019
3020 3020 if (esp_assoc != NULL) {
3021 3021 ap->ipa_act.ipa_apply.ipp_espa_minbits =
3022 3022 esp_assoc->ipsa_authkeybits;
3023 3023 ap->ipa_act.ipa_apply.ipp_espa_maxbits =
3024 3024 esp_assoc->ipsa_authkeybits;
3025 3025 ap->ipa_act.ipa_apply.ipp_espe_minbits =
3026 3026 esp_assoc->ipsa_encrkeybits;
3027 3027 ap->ipa_act.ipa_apply.ipp_espe_maxbits =
3028 3028 esp_assoc->ipsa_encrkeybits;
3029 3029 ap->ipa_act.ipa_apply.ipp_km_proto = esp_assoc->ipsa_kmp;
3030 3030 ap->ipa_act.ipa_apply.ipp_km_cookie = esp_assoc->ipsa_kmc;
3031 3031 if (esp_assoc->ipsa_flags & IPSA_F_UNIQUE)
3032 3032 unique = B_TRUE;
3033 3033 }
3034 3034 if (ah_assoc != NULL) {
3035 3035 ap->ipa_act.ipa_apply.ipp_ah_minbits =
3036 3036 ah_assoc->ipsa_authkeybits;
3037 3037 ap->ipa_act.ipa_apply.ipp_ah_maxbits =
3038 3038 ah_assoc->ipsa_authkeybits;
3039 3039 ap->ipa_act.ipa_apply.ipp_km_proto = ah_assoc->ipsa_kmp;
3040 3040 ap->ipa_act.ipa_apply.ipp_km_cookie = ah_assoc->ipsa_kmc;
3041 3041 if (ah_assoc->ipsa_flags & IPSA_F_UNIQUE)
3042 3042 unique = B_TRUE;
3043 3043 }
3044 3044 ap->ipa_act.ipa_apply.ipp_use_unique = unique;
3045 3045 ap->ipa_want_unique = unique;
3046 3046 ap->ipa_allow_clear = B_FALSE;
3047 3047 ap->ipa_want_se = !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3048 3048 ap->ipa_want_ah = (ah_assoc != NULL);
3049 3049 ap->ipa_want_esp = (esp_assoc != NULL);
3050 3050
3051 3051 ap->ipa_ovhd = ipsec_act_ovhd(&ap->ipa_act);
3052 3052
3053 3053 ap->ipa_act.ipa_apply.ipp_replay_depth = 0; /* don't care */
3054 3054
3055 3055 return (ap);
3056 3056 }
3057 3057
3058 3058
3059 3059 /*
3060 3060 * Compute the worst-case amount of extra space required by an action.
3061 3061 * Note that, because of the ESP considerations listed below, this is
3062 3062 * actually not the same as the best-case reduction in the MTU; in the
3063 3063 * future, we should pass additional information to this function to
3064 3064 * allow the actual MTU impact to be computed.
3065 3065 *
3066 3066 * AH: Revisit this if we implement algorithms with
3067 3067 * a verifier size of more than 12 bytes.
3068 3068 *
3069 3069 * ESP: A more exact but more messy computation would take into
3070 3070 * account the interaction between the cipher block size and the
3071 3071 * effective MTU, yielding the inner payload size which reflects a
3072 3072 * packet with *minimum* ESP padding..
3073 3073 */
3074 3074 int32_t
3075 3075 ipsec_act_ovhd(const ipsec_act_t *act)
3076 3076 {
3077 3077 int32_t overhead = 0;
3078 3078
3079 3079 if (act->ipa_type == IPSEC_ACT_APPLY) {
3080 3080 const ipsec_prot_t *ipp = &act->ipa_apply;
3081 3081
3082 3082 if (ipp->ipp_use_ah)
3083 3083 overhead += IPSEC_MAX_AH_HDR_SIZE;
3084 3084 if (ipp->ipp_use_esp) {
3085 3085 overhead += IPSEC_MAX_ESP_HDR_SIZE;
3086 3086 overhead += sizeof (struct udphdr);
3087 3087 }
3088 3088 if (ipp->ipp_use_se)
3089 3089 overhead += IP_SIMPLE_HDR_LENGTH;
3090 3090 }
3091 3091 return (overhead);
3092 3092 }
3093 3093
3094 3094 /*
3095 3095 * This hash function is used only when creating policies and thus is not
3096 3096 * performance-critical for packet flows.
3097 3097 *
3098 3098 * Future work: canonicalize the structures hashed with this (i.e.,
3099 3099 * zeroize padding) so the hash works correctly.
3100 3100 */
3101 3101 /* ARGSUSED */
3102 3102 static uint32_t
3103 3103 policy_hash(int size, const void *start, const void *end)
3104 3104 {
3105 3105 return (0);
3106 3106 }
3107 3107
3108 3108
3109 3109 /*
3110 3110 * Hash function macros for each address type.
3111 3111 *
3112 3112 * The IPV6 hash function assumes that the low order 32-bits of the
3113 3113 * address (typically containing the low order 24 bits of the mac
3114 3114 * address) are reasonably well-distributed. Revisit this if we run
3115 3115 * into trouble from lots of collisions on ::1 addresses and the like
3116 3116 * (seems unlikely).
3117 3117 */
3118 3118 #define IPSEC_IPV4_HASH(a, n) ((a) % (n))
3119 3119 #define IPSEC_IPV6_HASH(a, n) (((a).s6_addr32[3]) % (n))
3120 3120
3121 3121 /*
3122 3122 * These two hash functions should produce coordinated values
3123 3123 * but have slightly different roles.
3124 3124 */
3125 3125 static uint32_t
3126 3126 selkey_hash(const ipsec_selkey_t *selkey, netstack_t *ns)
3127 3127 {
3128 3128 uint32_t valid = selkey->ipsl_valid;
3129 3129 ipsec_stack_t *ipss = ns->netstack_ipsec;
3130 3130
3131 3131 if (!(valid & IPSL_REMOTE_ADDR))
3132 3132 return (IPSEC_SEL_NOHASH);
3133 3133
3134 3134 if (valid & IPSL_IPV4) {
3135 3135 if (selkey->ipsl_remote_pfxlen == 32) {
3136 3136 return (IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3137 3137 ipss->ipsec_spd_hashsize));
3138 3138 }
3139 3139 }
3140 3140 if (valid & IPSL_IPV6) {
3141 3141 if (selkey->ipsl_remote_pfxlen == 128) {
3142 3142 return (IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3143 3143 ipss->ipsec_spd_hashsize));
3144 3144 }
3145 3145 }
3146 3146 return (IPSEC_SEL_NOHASH);
3147 3147 }
3148 3148
3149 3149 static uint32_t
3150 3150 selector_hash(ipsec_selector_t *sel, ipsec_policy_root_t *root)
3151 3151 {
3152 3152 if (sel->ips_isv4) {
3153 3153 return (IPSEC_IPV4_HASH(sel->ips_remote_addr_v4,
3154 3154 root->ipr_nchains));
3155 3155 }
3156 3156 return (IPSEC_IPV6_HASH(sel->ips_remote_addr_v6, root->ipr_nchains));
3157 3157 }
3158 3158
3159 3159 /*
3160 3160 * Intern actions into the action hash table.
3161 3161 */
3162 3162 ipsec_action_t *
3163 3163 ipsec_act_find(const ipsec_act_t *a, int n, netstack_t *ns)
3164 3164 {
3165 3165 int i;
3166 3166 uint32_t hval;
3167 3167 ipsec_action_t *ap;
3168 3168 ipsec_action_t *prev = NULL;
3169 3169 int32_t overhead, maxovhd = 0;
3170 3170 boolean_t allow_clear = B_FALSE;
3171 3171 boolean_t want_ah = B_FALSE;
3172 3172 boolean_t want_esp = B_FALSE;
3173 3173 boolean_t want_se = B_FALSE;
3174 3174 boolean_t want_unique = B_FALSE;
3175 3175 ipsec_stack_t *ipss = ns->netstack_ipsec;
3176 3176
3177 3177 /*
3178 3178 * TODO: should canonicalize a[] (i.e., zeroize any padding)
3179 3179 * so we can use a non-trivial policy_hash function.
3180 3180 */
3181 3181 for (i = n-1; i >= 0; i--) {
3182 3182 hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]);
3183 3183
3184 3184 HASH_LOCK(ipss->ipsec_action_hash, hval);
3185 3185
3186 3186 for (HASH_ITERATE(ap, ipa_hash,
3187 3187 ipss->ipsec_action_hash, hval)) {
3188 3188 if (bcmp(&ap->ipa_act, &a[i], sizeof (*a)) != 0)
3189 3189 continue;
3190 3190 if (ap->ipa_next != prev)
3191 3191 continue;
3192 3192 break;
3193 3193 }
3194 3194 if (ap != NULL) {
3195 3195 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3196 3196 prev = ap;
3197 3197 continue;
3198 3198 }
3199 3199 /*
3200 3200 * need to allocate a new one..
3201 3201 */
3202 3202 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
3203 3203 if (ap == NULL) {
3204 3204 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3205 3205 if (prev != NULL)
3206 3206 ipsec_action_free(prev);
3207 3207 return (NULL);
3208 3208 }
3209 3209 HASH_INSERT(ap, ipa_hash, ipss->ipsec_action_hash, hval);
3210 3210
3211 3211 ap->ipa_next = prev;
3212 3212 ap->ipa_act = a[i];
3213 3213
3214 3214 overhead = ipsec_act_ovhd(&a[i]);
3215 3215 if (maxovhd < overhead)
3216 3216 maxovhd = overhead;
3217 3217
3218 3218 if ((a[i].ipa_type == IPSEC_ACT_BYPASS) ||
3219 3219 (a[i].ipa_type == IPSEC_ACT_CLEAR))
3220 3220 allow_clear = B_TRUE;
3221 3221 if (a[i].ipa_type == IPSEC_ACT_APPLY) {
3222 3222 const ipsec_prot_t *ipp = &a[i].ipa_apply;
3223 3223
3224 3224 ASSERT(ipp->ipp_use_ah || ipp->ipp_use_esp);
3225 3225 want_ah |= ipp->ipp_use_ah;
3226 3226 want_esp |= ipp->ipp_use_esp;
3227 3227 want_se |= ipp->ipp_use_se;
3228 3228 want_unique |= ipp->ipp_use_unique;
3229 3229 }
3230 3230 ap->ipa_allow_clear = allow_clear;
3231 3231 ap->ipa_want_ah = want_ah;
3232 3232 ap->ipa_want_esp = want_esp;
3233 3233 ap->ipa_want_se = want_se;
3234 3234 ap->ipa_want_unique = want_unique;
3235 3235 ap->ipa_refs = 1; /* from the hash table */
3236 3236 ap->ipa_ovhd = maxovhd;
3237 3237 if (prev)
3238 3238 prev->ipa_refs++;
3239 3239 prev = ap;
3240 3240 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3241 3241 }
3242 3242
3243 3243 ap->ipa_refs++; /* caller's reference */
3244 3244
3245 3245 return (ap);
3246 3246 }
3247 3247
3248 3248 /*
3249 3249 * Called when refcount goes to 0, indicating that all references to this
3250 3250 * node are gone.
3251 3251 *
3252 3252 * This does not unchain the action from the hash table.
3253 3253 */
3254 3254 void
3255 3255 ipsec_action_free(ipsec_action_t *ap)
3256 3256 {
3257 3257 for (;;) {
3258 3258 ipsec_action_t *np = ap->ipa_next;
3259 3259 ASSERT(ap->ipa_refs == 0);
3260 3260 ASSERT(ap->ipa_hash.hash_pp == NULL);
3261 3261 kmem_cache_free(ipsec_action_cache, ap);
3262 3262 ap = np;
3263 3263 /* Inlined IPACT_REFRELE -- avoid recursion */
3264 3264 if (ap == NULL)
3265 3265 break;
3266 3266 membar_exit();
3267 3267 if (atomic_dec_32_nv(&(ap)->ipa_refs) != 0)
3268 3268 break;
3269 3269 /* End inlined IPACT_REFRELE */
3270 3270 }
3271 3271 }
3272 3272
3273 3273 /*
3274 3274 * Called when the action hash table goes away.
3275 3275 *
3276 3276 * The actions can be queued on an mblk with ipsec_in or
3277 3277 * ipsec_out, hence the actions might still be around.
3278 3278 * But we decrement ipa_refs here since we no longer have
3279 3279 * a reference to the action from the hash table.
3280 3280 */
3281 3281 static void
3282 3282 ipsec_action_free_table(ipsec_action_t *ap)
3283 3283 {
3284 3284 while (ap != NULL) {
3285 3285 ipsec_action_t *np = ap->ipa_next;
3286 3286
3287 3287 /* FIXME: remove? */
3288 3288 (void) printf("ipsec_action_free_table(%p) ref %d\n",
3289 3289 (void *)ap, ap->ipa_refs);
3290 3290 ASSERT(ap->ipa_refs > 0);
3291 3291 IPACT_REFRELE(ap);
3292 3292 ap = np;
3293 3293 }
3294 3294 }
3295 3295
3296 3296 /*
3297 3297 * Need to walk all stack instances since the reclaim function
3298 3298 * is global for all instances
3299 3299 */
3300 3300 /* ARGSUSED */
3301 3301 static void
3302 3302 ipsec_action_reclaim(void *arg)
3303 3303 {
3304 3304 netstack_handle_t nh;
3305 3305 netstack_t *ns;
3306 3306 ipsec_stack_t *ipss;
3307 3307
3308 3308 netstack_next_init(&nh);
3309 3309 while ((ns = netstack_next(&nh)) != NULL) {
3310 3310 /*
3311 3311 * netstack_next() can return a netstack_t with a NULL
3312 3312 * netstack_ipsec at boot time.
3313 3313 */
3314 3314 if ((ipss = ns->netstack_ipsec) == NULL) {
3315 3315 netstack_rele(ns);
3316 3316 continue;
3317 3317 }
3318 3318 ipsec_action_reclaim_stack(ipss);
3319 3319 netstack_rele(ns);
3320 3320 }
3321 3321 netstack_next_fini(&nh);
3322 3322 }
3323 3323
3324 3324 /*
3325 3325 * Periodically sweep action hash table for actions with refcount==1, and
3326 3326 * nuke them. We cannot do this "on demand" (i.e., from IPACT_REFRELE)
3327 3327 * because we can't close the race between another thread finding the action
3328 3328 * in the hash table without holding the bucket lock during IPACT_REFRELE.
3329 3329 * Instead, we run this function sporadically to clean up after ourselves;
3330 3330 * we also set it as the "reclaim" function for the action kmem_cache.
3331 3331 *
3332 3332 * Note that it may take several passes of ipsec_action_gc() to free all
3333 3333 * "stale" actions.
3334 3334 */
3335 3335 static void
3336 3336 ipsec_action_reclaim_stack(ipsec_stack_t *ipss)
3337 3337 {
3338 3338 int i;
3339 3339
3340 3340 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
3341 3341 ipsec_action_t *ap, *np;
3342 3342
3343 3343 /* skip the lock if nobody home */
3344 3344 if (ipss->ipsec_action_hash[i].hash_head == NULL)
3345 3345 continue;
3346 3346
3347 3347 HASH_LOCK(ipss->ipsec_action_hash, i);
3348 3348 for (ap = ipss->ipsec_action_hash[i].hash_head;
3349 3349 ap != NULL; ap = np) {
3350 3350 ASSERT(ap->ipa_refs > 0);
3351 3351 np = ap->ipa_hash.hash_next;
3352 3352 if (ap->ipa_refs > 1)
3353 3353 continue;
3354 3354 HASH_UNCHAIN(ap, ipa_hash,
3355 3355 ipss->ipsec_action_hash, i);
3356 3356 IPACT_REFRELE(ap);
3357 3357 }
3358 3358 HASH_UNLOCK(ipss->ipsec_action_hash, i);
3359 3359 }
3360 3360 }
3361 3361
3362 3362 /*
3363 3363 * Intern a selector set into the selector set hash table.
3364 3364 * This is simpler than the actions case..
3365 3365 */
3366 3366 static ipsec_sel_t *
3367 3367 ipsec_find_sel(ipsec_selkey_t *selkey, netstack_t *ns)
3368 3368 {
3369 3369 ipsec_sel_t *sp;
3370 3370 uint32_t hval, bucket;
3371 3371 ipsec_stack_t *ipss = ns->netstack_ipsec;
3372 3372
3373 3373 /*
3374 3374 * Exactly one AF bit should be set in selkey.
3375 3375 */
3376 3376 ASSERT(!(selkey->ipsl_valid & IPSL_IPV4) ^
3377 3377 !(selkey->ipsl_valid & IPSL_IPV6));
3378 3378
3379 3379 hval = selkey_hash(selkey, ns);
3380 3380 /* Set pol_hval to uninitialized until we put it in a polhead. */
3381 3381 selkey->ipsl_sel_hval = hval;
3382 3382
3383 3383 bucket = (hval == IPSEC_SEL_NOHASH) ? 0 : hval;
3384 3384
3385 3385 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, bucket));
3386 3386 HASH_LOCK(ipss->ipsec_sel_hash, bucket);
3387 3387
3388 3388 for (HASH_ITERATE(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket)) {
3389 3389 if (bcmp(&sp->ipsl_key, selkey,
3390 3390 offsetof(ipsec_selkey_t, ipsl_pol_hval)) == 0)
3391 3391 break;
3392 3392 }
3393 3393 if (sp != NULL) {
3394 3394 sp->ipsl_refs++;
3395 3395
3396 3396 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3397 3397 return (sp);
3398 3398 }
3399 3399
3400 3400 sp = kmem_cache_alloc(ipsec_sel_cache, KM_NOSLEEP);
3401 3401 if (sp == NULL) {
3402 3402 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3403 3403 return (NULL);
3404 3404 }
3405 3405
3406 3406 HASH_INSERT(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket);
3407 3407 sp->ipsl_refs = 2; /* one for hash table, one for caller */
3408 3408 sp->ipsl_key = *selkey;
3409 3409 /* Set to uninitalized and have insertion into polhead fix things. */
3410 3410 if (selkey->ipsl_sel_hval != IPSEC_SEL_NOHASH)
3411 3411 sp->ipsl_key.ipsl_pol_hval = 0;
3412 3412 else
3413 3413 sp->ipsl_key.ipsl_pol_hval = IPSEC_SEL_NOHASH;
3414 3414
3415 3415 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3416 3416
3417 3417 return (sp);
3418 3418 }
3419 3419
3420 3420 static void
3421 3421 ipsec_sel_rel(ipsec_sel_t **spp, netstack_t *ns)
3422 3422 {
3423 3423 ipsec_sel_t *sp = *spp;
3424 3424 int hval = sp->ipsl_key.ipsl_sel_hval;
3425 3425 ipsec_stack_t *ipss = ns->netstack_ipsec;
3426 3426
3427 3427 *spp = NULL;
3428 3428
3429 3429 if (hval == IPSEC_SEL_NOHASH)
3430 3430 hval = 0;
3431 3431
3432 3432 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, hval));
3433 3433 HASH_LOCK(ipss->ipsec_sel_hash, hval);
3434 3434 if (--sp->ipsl_refs == 1) {
3435 3435 HASH_UNCHAIN(sp, ipsl_hash, ipss->ipsec_sel_hash, hval);
3436 3436 sp->ipsl_refs--;
3437 3437 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3438 3438 ASSERT(sp->ipsl_refs == 0);
3439 3439 kmem_cache_free(ipsec_sel_cache, sp);
3440 3440 /* Caller unlocks */
3441 3441 return;
3442 3442 }
3443 3443
3444 3444 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3445 3445 }
3446 3446
3447 3447 /*
3448 3448 * Free a policy rule which we know is no longer being referenced.
3449 3449 */
3450 3450 void
3451 3451 ipsec_policy_free(ipsec_policy_t *ipp)
3452 3452 {
3453 3453 ASSERT(ipp->ipsp_refs == 0);
3454 3454 ASSERT(ipp->ipsp_sel != NULL);
3455 3455 ASSERT(ipp->ipsp_act != NULL);
3456 3456 ASSERT(ipp->ipsp_netstack != NULL);
3457 3457
3458 3458 ipsec_sel_rel(&ipp->ipsp_sel, ipp->ipsp_netstack);
3459 3459 IPACT_REFRELE(ipp->ipsp_act);
3460 3460 kmem_cache_free(ipsec_pol_cache, ipp);
3461 3461 }
3462 3462
3463 3463 /*
3464 3464 * Construction of new policy rules; construct a policy, and add it to
3465 3465 * the appropriate tables.
3466 3466 */
3467 3467 ipsec_policy_t *
3468 3468 ipsec_policy_create(ipsec_selkey_t *keys, const ipsec_act_t *a,
3469 3469 int nacts, int prio, uint64_t *index_ptr, netstack_t *ns)
3470 3470 {
3471 3471 ipsec_action_t *ap;
3472 3472 ipsec_sel_t *sp;
3473 3473 ipsec_policy_t *ipp;
3474 3474 ipsec_stack_t *ipss = ns->netstack_ipsec;
3475 3475
3476 3476 if (index_ptr == NULL)
3477 3477 index_ptr = &ipss->ipsec_next_policy_index;
3478 3478
3479 3479 ipp = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
3480 3480 ap = ipsec_act_find(a, nacts, ns);
3481 3481 sp = ipsec_find_sel(keys, ns);
3482 3482
3483 3483 if ((ap == NULL) || (sp == NULL) || (ipp == NULL)) {
3484 3484 if (ap != NULL) {
3485 3485 IPACT_REFRELE(ap);
3486 3486 }
3487 3487 if (sp != NULL)
3488 3488 ipsec_sel_rel(&sp, ns);
3489 3489 if (ipp != NULL)
3490 3490 kmem_cache_free(ipsec_pol_cache, ipp);
3491 3491 return (NULL);
3492 3492 }
3493 3493
3494 3494 HASH_NULL(ipp, ipsp_hash);
3495 3495
3496 3496 ipp->ipsp_netstack = ns; /* Needed for ipsec_policy_free */
3497 3497 ipp->ipsp_refs = 1; /* caller's reference */
3498 3498 ipp->ipsp_sel = sp;
3499 3499 ipp->ipsp_act = ap;
3500 3500 ipp->ipsp_prio = prio; /* rule priority */
3501 3501 ipp->ipsp_index = *index_ptr;
3502 3502 (*index_ptr)++;
3503 3503
3504 3504 return (ipp);
3505 3505 }
3506 3506
3507 3507 static void
3508 3508 ipsec_update_present_flags(ipsec_stack_t *ipss)
3509 3509 {
3510 3510 boolean_t hashpol;
3511 3511
3512 3512 hashpol = (avl_numnodes(&ipss->ipsec_system_policy.iph_rulebyid) > 0);
3513 3513
3514 3514 if (hashpol) {
3515 3515 ipss->ipsec_outbound_v4_policy_present = B_TRUE;
3516 3516 ipss->ipsec_outbound_v6_policy_present = B_TRUE;
3517 3517 ipss->ipsec_inbound_v4_policy_present = B_TRUE;
3518 3518 ipss->ipsec_inbound_v6_policy_present = B_TRUE;
3519 3519 return;
3520 3520 }
3521 3521
3522 3522 ipss->ipsec_outbound_v4_policy_present = (NULL !=
3523 3523 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3524 3524 ipr_nonhash[IPSEC_AF_V4]);
3525 3525 ipss->ipsec_outbound_v6_policy_present = (NULL !=
3526 3526 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3527 3527 ipr_nonhash[IPSEC_AF_V6]);
3528 3528 ipss->ipsec_inbound_v4_policy_present = (NULL !=
3529 3529 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3530 3530 ipr_nonhash[IPSEC_AF_V4]);
3531 3531 ipss->ipsec_inbound_v6_policy_present = (NULL !=
3532 3532 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3533 3533 ipr_nonhash[IPSEC_AF_V6]);
3534 3534 }
3535 3535
3536 3536 boolean_t
3537 3537 ipsec_policy_delete(ipsec_policy_head_t *php, ipsec_selkey_t *keys, int dir,
3538 3538 netstack_t *ns)
3539 3539 {
3540 3540 ipsec_sel_t *sp;
3541 3541 ipsec_policy_t *ip, *nip, *head;
3542 3542 int af;
3543 3543 ipsec_policy_root_t *pr = &php->iph_root[dir];
3544 3544
3545 3545 sp = ipsec_find_sel(keys, ns);
3546 3546
3547 3547 if (sp == NULL)
3548 3548 return (B_FALSE);
3549 3549
3550 3550 af = (sp->ipsl_key.ipsl_valid & IPSL_IPV4) ? IPSEC_AF_V4 : IPSEC_AF_V6;
3551 3551
3552 3552 rw_enter(&php->iph_lock, RW_WRITER);
3553 3553
3554 3554 if (sp->ipsl_key.ipsl_pol_hval == IPSEC_SEL_NOHASH) {
3555 3555 head = pr->ipr_nonhash[af];
3556 3556 } else {
3557 3557 head = pr->ipr_hash[sp->ipsl_key.ipsl_pol_hval].hash_head;
3558 3558 }
3559 3559
3560 3560 for (ip = head; ip != NULL; ip = nip) {
3561 3561 nip = ip->ipsp_hash.hash_next;
3562 3562 if (ip->ipsp_sel != sp) {
3563 3563 continue;
3564 3564 }
3565 3565
3566 3566 IPPOL_UNCHAIN(php, ip);
3567 3567
3568 3568 php->iph_gen++;
3569 3569 ipsec_update_present_flags(ns->netstack_ipsec);
3570 3570
3571 3571 rw_exit(&php->iph_lock);
3572 3572
3573 3573 ipsec_sel_rel(&sp, ns);
3574 3574
3575 3575 return (B_TRUE);
3576 3576 }
3577 3577
3578 3578 rw_exit(&php->iph_lock);
3579 3579 ipsec_sel_rel(&sp, ns);
3580 3580 return (B_FALSE);
3581 3581 }
3582 3582
3583 3583 int
3584 3584 ipsec_policy_delete_index(ipsec_policy_head_t *php, uint64_t policy_index,
3585 3585 netstack_t *ns)
3586 3586 {
3587 3587 boolean_t found = B_FALSE;
3588 3588 ipsec_policy_t ipkey;
3589 3589 ipsec_policy_t *ip;
3590 3590 avl_index_t where;
3591 3591
3592 3592 bzero(&ipkey, sizeof (ipkey));
3593 3593 ipkey.ipsp_index = policy_index;
3594 3594
3595 3595 rw_enter(&php->iph_lock, RW_WRITER);
3596 3596
3597 3597 /*
3598 3598 * We could be cleverer here about the walk.
3599 3599 * but well, (k+1)*log(N) will do for now (k==number of matches,
3600 3600 * N==number of table entries
3601 3601 */
3602 3602 for (;;) {
3603 3603 ip = (ipsec_policy_t *)avl_find(&php->iph_rulebyid,
3604 3604 (void *)&ipkey, &where);
3605 3605 ASSERT(ip == NULL);
3606 3606
3607 3607 ip = avl_nearest(&php->iph_rulebyid, where, AVL_AFTER);
3608 3608
3609 3609 if (ip == NULL)
3610 3610 break;
3611 3611
3612 3612 if (ip->ipsp_index != policy_index) {
3613 3613 ASSERT(ip->ipsp_index > policy_index);
3614 3614 break;
3615 3615 }
3616 3616
3617 3617 IPPOL_UNCHAIN(php, ip);
3618 3618 found = B_TRUE;
3619 3619 }
3620 3620
3621 3621 if (found) {
3622 3622 php->iph_gen++;
3623 3623 ipsec_update_present_flags(ns->netstack_ipsec);
3624 3624 }
3625 3625
3626 3626 rw_exit(&php->iph_lock);
3627 3627
3628 3628 return (found ? 0 : ENOENT);
3629 3629 }
3630 3630
3631 3631 /*
3632 3632 * Given a constructed ipsec_policy_t policy rule, see if it can be entered
3633 3633 * into the correct policy ruleset. As a side-effect, it sets the hash
3634 3634 * entries on "ipp"'s ipsp_pol_hval.
3635 3635 *
3636 3636 * Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a
3637 3637 * duplicate policy exists with exactly the same selectors), or an icmp
3638 3638 * rule exists with a different encryption/authentication action.
3639 3639 */
3640 3640 boolean_t
3641 3641 ipsec_check_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction)
3642 3642 {
3643 3643 ipsec_policy_root_t *pr = &php->iph_root[direction];
3644 3644 int af = -1;
3645 3645 ipsec_policy_t *p2, *head;
3646 3646 uint8_t check_proto;
3647 3647 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3648 3648 uint32_t valid = selkey->ipsl_valid;
3649 3649
3650 3650 if (valid & IPSL_IPV6) {
3651 3651 ASSERT(!(valid & IPSL_IPV4));
3652 3652 af = IPSEC_AF_V6;
3653 3653 check_proto = IPPROTO_ICMPV6;
3654 3654 } else {
3655 3655 ASSERT(valid & IPSL_IPV4);
3656 3656 af = IPSEC_AF_V4;
3657 3657 check_proto = IPPROTO_ICMP;
3658 3658 }
3659 3659
3660 3660 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3661 3661
3662 3662 /*
3663 3663 * Double-check that we don't have any duplicate selectors here.
3664 3664 * Because selectors are interned below, we need only compare pointers
3665 3665 * for equality.
3666 3666 */
3667 3667 if (selkey->ipsl_sel_hval == IPSEC_SEL_NOHASH) {
3668 3668 head = pr->ipr_nonhash[af];
3669 3669 } else {
3670 3670 selkey->ipsl_pol_hval =
3671 3671 (selkey->ipsl_valid & IPSL_IPV4) ?
3672 3672 IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3673 3673 pr->ipr_nchains) :
3674 3674 IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3675 3675 pr->ipr_nchains);
3676 3676
3677 3677 head = pr->ipr_hash[selkey->ipsl_pol_hval].hash_head;
3678 3678 }
3679 3679
3680 3680 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3681 3681 if (p2->ipsp_sel == ipp->ipsp_sel)
3682 3682 return (B_FALSE);
3683 3683 }
3684 3684
3685 3685 /*
3686 3686 * If it's ICMP and not a drop or pass rule, run through the ICMP
3687 3687 * rules and make sure the action is either new or the same as any
3688 3688 * other actions. We don't have to check the full chain because
3689 3689 * discard and bypass will override all other actions
3690 3690 */
3691 3691
3692 3692 if (valid & IPSL_PROTOCOL &&
3693 3693 selkey->ipsl_proto == check_proto &&
3694 3694 (ipp->ipsp_act->ipa_act.ipa_type == IPSEC_ACT_APPLY)) {
3695 3695
3696 3696 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3697 3697
3698 3698 if (p2->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL &&
3699 3699 p2->ipsp_sel->ipsl_key.ipsl_proto == check_proto &&
3700 3700 (p2->ipsp_act->ipa_act.ipa_type ==
3701 3701 IPSEC_ACT_APPLY)) {
3702 3702 return (ipsec_compare_action(p2, ipp));
3703 3703 }
3704 3704 }
3705 3705 }
3706 3706
3707 3707 return (B_TRUE);
3708 3708 }
3709 3709
3710 3710 /*
3711 3711 * compare the action chains of two policies for equality
3712 3712 * B_TRUE -> effective equality
3713 3713 */
3714 3714
3715 3715 static boolean_t
3716 3716 ipsec_compare_action(ipsec_policy_t *p1, ipsec_policy_t *p2)
3717 3717 {
3718 3718
3719 3719 ipsec_action_t *act1, *act2;
3720 3720
3721 3721 /* We have a valid rule. Let's compare the actions */
3722 3722 if (p1->ipsp_act == p2->ipsp_act) {
3723 3723 /* same action. We are good */
3724 3724 return (B_TRUE);
3725 3725 }
3726 3726
3727 3727 /* we have to walk the chain */
3728 3728
3729 3729 act1 = p1->ipsp_act;
3730 3730 act2 = p2->ipsp_act;
3731 3731
3732 3732 while (act1 != NULL && act2 != NULL) {
3733 3733
3734 3734 /* otherwise, Are we close enough? */
3735 3735 if (act1->ipa_allow_clear != act2->ipa_allow_clear ||
3736 3736 act1->ipa_want_ah != act2->ipa_want_ah ||
3737 3737 act1->ipa_want_esp != act2->ipa_want_esp ||
3738 3738 act1->ipa_want_se != act2->ipa_want_se) {
3739 3739 /* Nope, we aren't */
3740 3740 return (B_FALSE);
3741 3741 }
3742 3742
3743 3743 if (act1->ipa_want_ah) {
3744 3744 if (act1->ipa_act.ipa_apply.ipp_auth_alg !=
3745 3745 act2->ipa_act.ipa_apply.ipp_auth_alg) {
3746 3746 return (B_FALSE);
3747 3747 }
3748 3748
3749 3749 if (act1->ipa_act.ipa_apply.ipp_ah_minbits !=
3750 3750 act2->ipa_act.ipa_apply.ipp_ah_minbits ||
3751 3751 act1->ipa_act.ipa_apply.ipp_ah_maxbits !=
3752 3752 act2->ipa_act.ipa_apply.ipp_ah_maxbits) {
3753 3753 return (B_FALSE);
3754 3754 }
3755 3755 }
3756 3756
3757 3757 if (act1->ipa_want_esp) {
3758 3758 if (act1->ipa_act.ipa_apply.ipp_use_esp !=
3759 3759 act2->ipa_act.ipa_apply.ipp_use_esp ||
3760 3760 act1->ipa_act.ipa_apply.ipp_use_espa !=
3761 3761 act2->ipa_act.ipa_apply.ipp_use_espa) {
3762 3762 return (B_FALSE);
3763 3763 }
3764 3764
3765 3765 if (act1->ipa_act.ipa_apply.ipp_use_esp) {
3766 3766 if (act1->ipa_act.ipa_apply.ipp_encr_alg !=
3767 3767 act2->ipa_act.ipa_apply.ipp_encr_alg) {
3768 3768 return (B_FALSE);
3769 3769 }
3770 3770
3771 3771 if (act1->ipa_act.ipa_apply.ipp_espe_minbits !=
3772 3772 act2->ipa_act.ipa_apply.ipp_espe_minbits ||
3773 3773 act1->ipa_act.ipa_apply.ipp_espe_maxbits !=
3774 3774 act2->ipa_act.ipa_apply.ipp_espe_maxbits) {
3775 3775 return (B_FALSE);
3776 3776 }
3777 3777 }
3778 3778
3779 3779 if (act1->ipa_act.ipa_apply.ipp_use_espa) {
3780 3780 if (act1->ipa_act.ipa_apply.ipp_esp_auth_alg !=
3781 3781 act2->ipa_act.ipa_apply.ipp_esp_auth_alg) {
3782 3782 return (B_FALSE);
3783 3783 }
3784 3784
3785 3785 if (act1->ipa_act.ipa_apply.ipp_espa_minbits !=
3786 3786 act2->ipa_act.ipa_apply.ipp_espa_minbits ||
3787 3787 act1->ipa_act.ipa_apply.ipp_espa_maxbits !=
3788 3788 act2->ipa_act.ipa_apply.ipp_espa_maxbits) {
3789 3789 return (B_FALSE);
3790 3790 }
3791 3791 }
3792 3792
3793 3793 }
3794 3794
3795 3795 act1 = act1->ipa_next;
3796 3796 act2 = act2->ipa_next;
3797 3797 }
3798 3798
3799 3799 if (act1 != NULL || act2 != NULL) {
3800 3800 return (B_FALSE);
3801 3801 }
3802 3802
3803 3803 return (B_TRUE);
3804 3804 }
3805 3805
3806 3806
3807 3807 /*
3808 3808 * Given a constructed ipsec_policy_t policy rule, enter it into
3809 3809 * the correct policy ruleset.
3810 3810 *
3811 3811 * ipsec_check_policy() is assumed to have succeeded first (to check for
3812 3812 * duplicates).
3813 3813 */
3814 3814 void
3815 3815 ipsec_enter_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction,
3816 3816 netstack_t *ns)
3817 3817 {
3818 3818 ipsec_policy_root_t *pr = &php->iph_root[direction];
3819 3819 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3820 3820 uint32_t valid = selkey->ipsl_valid;
3821 3821 uint32_t hval = selkey->ipsl_pol_hval;
3822 3822 int af = -1;
3823 3823
3824 3824 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3825 3825
3826 3826 if (valid & IPSL_IPV6) {
3827 3827 ASSERT(!(valid & IPSL_IPV4));
3828 3828 af = IPSEC_AF_V6;
3829 3829 } else {
3830 3830 ASSERT(valid & IPSL_IPV4);
3831 3831 af = IPSEC_AF_V4;
3832 3832 }
3833 3833
3834 3834 php->iph_gen++;
3835 3835
3836 3836 if (hval == IPSEC_SEL_NOHASH) {
3837 3837 HASHLIST_INSERT(ipp, ipsp_hash, pr->ipr_nonhash[af]);
3838 3838 } else {
3839 3839 HASH_LOCK(pr->ipr_hash, hval);
3840 3840 HASH_INSERT(ipp, ipsp_hash, pr->ipr_hash, hval);
3841 3841 HASH_UNLOCK(pr->ipr_hash, hval);
3842 3842 }
3843 3843
3844 3844 ipsec_insert_always(&php->iph_rulebyid, ipp);
3845 3845
3846 3846 ipsec_update_present_flags(ns->netstack_ipsec);
3847 3847 }
3848 3848
3849 3849 static void
3850 3850 ipsec_ipr_flush(ipsec_policy_head_t *php, ipsec_policy_root_t *ipr)
3851 3851 {
3852 3852 ipsec_policy_t *ip, *nip;
3853 3853 int af, chain, nchain;
3854 3854
3855 3855 for (af = 0; af < IPSEC_NAF; af++) {
3856 3856 for (ip = ipr->ipr_nonhash[af]; ip != NULL; ip = nip) {
3857 3857 nip = ip->ipsp_hash.hash_next;
3858 3858 IPPOL_UNCHAIN(php, ip);
3859 3859 }
3860 3860 ipr->ipr_nonhash[af] = NULL;
3861 3861 }
3862 3862 nchain = ipr->ipr_nchains;
3863 3863
3864 3864 for (chain = 0; chain < nchain; chain++) {
3865 3865 for (ip = ipr->ipr_hash[chain].hash_head; ip != NULL;
3866 3866 ip = nip) {
3867 3867 nip = ip->ipsp_hash.hash_next;
3868 3868 IPPOL_UNCHAIN(php, ip);
3869 3869 }
3870 3870 ipr->ipr_hash[chain].hash_head = NULL;
3871 3871 }
3872 3872 }
3873 3873
3874 3874 /*
3875 3875 * Create and insert inbound or outbound policy associated with actp for the
3876 3876 * address family fam into the policy head ph. Returns B_TRUE if policy was
3877 3877 * inserted, and B_FALSE otherwise.
3878 3878 */
3879 3879 boolean_t
3880 3880 ipsec_polhead_insert(ipsec_policy_head_t *ph, ipsec_act_t *actp, uint_t nact,
3881 3881 int fam, int ptype, netstack_t *ns)
3882 3882 {
3883 3883 ipsec_selkey_t sel;
3884 3884 ipsec_policy_t *pol;
3885 3885 ipsec_policy_root_t *pr;
3886 3886
3887 3887 bzero(&sel, sizeof (sel));
3888 3888 sel.ipsl_valid = (fam == IPSEC_AF_V4 ? IPSL_IPV4 : IPSL_IPV6);
3889 3889 if ((pol = ipsec_policy_create(&sel, actp, nact, IPSEC_PRIO_SOCKET,
3890 3890 NULL, ns)) != NULL) {
3891 3891 pr = &ph->iph_root[ptype];
3892 3892 HASHLIST_INSERT(pol, ipsp_hash, pr->ipr_nonhash[fam]);
3893 3893 ipsec_insert_always(&ph->iph_rulebyid, pol);
3894 3894 }
3895 3895 return (pol != NULL);
3896 3896 }
3897 3897
3898 3898 void
3899 3899 ipsec_polhead_flush(ipsec_policy_head_t *php, netstack_t *ns)
3900 3900 {
3901 3901 int dir;
3902 3902
3903 3903 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3904 3904
3905 3905 for (dir = 0; dir < IPSEC_NTYPES; dir++)
3906 3906 ipsec_ipr_flush(php, &php->iph_root[dir]);
3907 3907
3908 3908 php->iph_gen++;
3909 3909 ipsec_update_present_flags(ns->netstack_ipsec);
3910 3910 }
3911 3911
3912 3912 void
3913 3913 ipsec_polhead_free(ipsec_policy_head_t *php, netstack_t *ns)
3914 3914 {
3915 3915 int dir;
3916 3916
3917 3917 ASSERT(php->iph_refs == 0);
3918 3918
3919 3919 rw_enter(&php->iph_lock, RW_WRITER);
3920 3920 ipsec_polhead_flush(php, ns);
3921 3921 rw_exit(&php->iph_lock);
3922 3922 rw_destroy(&php->iph_lock);
3923 3923 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
3924 3924 ipsec_policy_root_t *ipr = &php->iph_root[dir];
3925 3925 int chain;
3926 3926
3927 3927 for (chain = 0; chain < ipr->ipr_nchains; chain++)
3928 3928 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
3929 3929
3930 3930 }
3931 3931 ipsec_polhead_free_table(php);
3932 3932 kmem_free(php, sizeof (*php));
3933 3933 }
3934 3934
3935 3935 static void
3936 3936 ipsec_ipr_init(ipsec_policy_root_t *ipr)
3937 3937 {
3938 3938 int af;
3939 3939
3940 3940 ipr->ipr_nchains = 0;
3941 3941 ipr->ipr_hash = NULL;
3942 3942
3943 3943 for (af = 0; af < IPSEC_NAF; af++) {
3944 3944 ipr->ipr_nonhash[af] = NULL;
3945 3945 }
3946 3946 }
3947 3947
3948 3948 ipsec_policy_head_t *
3949 3949 ipsec_polhead_create(void)
3950 3950 {
3951 3951 ipsec_policy_head_t *php;
3952 3952
3953 3953 php = kmem_alloc(sizeof (*php), KM_NOSLEEP);
3954 3954 if (php == NULL)
3955 3955 return (php);
3956 3956
3957 3957 rw_init(&php->iph_lock, NULL, RW_DEFAULT, NULL);
3958 3958 php->iph_refs = 1;
3959 3959 php->iph_gen = 0;
3960 3960
3961 3961 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_INBOUND]);
3962 3962 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_OUTBOUND]);
3963 3963
3964 3964 avl_create(&php->iph_rulebyid, ipsec_policy_cmpbyid,
3965 3965 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
3966 3966
3967 3967 return (php);
3968 3968 }
3969 3969
3970 3970 /*
3971 3971 * Clone the policy head into a new polhead; release one reference to the
3972 3972 * old one and return the only reference to the new one.
3973 3973 * If the old one had a refcount of 1, just return it.
3974 3974 */
3975 3975 ipsec_policy_head_t *
3976 3976 ipsec_polhead_split(ipsec_policy_head_t *php, netstack_t *ns)
3977 3977 {
3978 3978 ipsec_policy_head_t *nphp;
3979 3979
3980 3980 if (php == NULL)
3981 3981 return (ipsec_polhead_create());
3982 3982 else if (php->iph_refs == 1)
3983 3983 return (php);
3984 3984
3985 3985 nphp = ipsec_polhead_create();
3986 3986 if (nphp == NULL)
3987 3987 return (NULL);
3988 3988
3989 3989 if (ipsec_copy_polhead(php, nphp, ns) != 0) {
3990 3990 ipsec_polhead_free(nphp, ns);
3991 3991 return (NULL);
3992 3992 }
3993 3993 IPPH_REFRELE(php, ns);
3994 3994 return (nphp);
3995 3995 }
3996 3996
3997 3997 /*
3998 3998 * When sending a response to a ICMP request or generating a RST
3999 3999 * in the TCP case, the outbound packets need to go at the same level
4000 4000 * of protection as the incoming ones i.e we associate our outbound
4001 4001 * policy with how the packet came in. We call this after we have
4002 4002 * accepted the incoming packet which may or may not have been in
4003 4003 * clear and hence we are sending the reply back with the policy
4004 4004 * matching the incoming datagram's policy.
4005 4005 *
4006 4006 * NOTE : This technology serves two purposes :
4007 4007 *
4008 4008 * 1) If we have multiple outbound policies, we send out a reply
4009 4009 * matching with how it came in rather than matching the outbound
4010 4010 * policy.
4011 4011 *
4012 4012 * 2) For assymetric policies, we want to make sure that incoming
4013 4013 * and outgoing has the same level of protection. Assymetric
4014 4014 * policies exist only with global policy where we may not have
4015 4015 * both outbound and inbound at the same time.
4016 4016 *
4017 4017 * NOTE2: This function is called by cleartext cases, so it needs to be
4018 4018 * in IP proper.
4019 4019 *
4020 4020 * Note: the caller has moved other parts of ira into ixa already.
4021 4021 */
4022 4022 boolean_t
4023 4023 ipsec_in_to_out(ip_recv_attr_t *ira, ip_xmit_attr_t *ixa, mblk_t *data_mp,
4024 4024 ipha_t *ipha, ip6_t *ip6h)
4025 4025 {
4026 4026 ipsec_selector_t sel;
4027 4027 ipsec_action_t *reflect_action = NULL;
4028 4028 netstack_t *ns = ixa->ixa_ipst->ips_netstack;
4029 4029
4030 4030 bzero((void*)&sel, sizeof (sel));
4031 4031
4032 4032 if (ira->ira_ipsec_action != NULL) {
4033 4033 /* transfer reference.. */
4034 4034 reflect_action = ira->ira_ipsec_action;
4035 4035 ira->ira_ipsec_action = NULL;
4036 4036 } else if (!(ira->ira_flags & IRAF_LOOPBACK))
4037 4037 reflect_action = ipsec_in_to_out_action(ira);
4038 4038
4039 4039 /*
4040 4040 * The caller is going to send the datagram out which might
4041 4041 * go on the wire or delivered locally through ire_send_local.
4042 4042 *
4043 4043 * 1) If it goes out on the wire, new associations will be
4044 4044 * obtained.
4045 4045 * 2) If it is delivered locally, ire_send_local will convert
4046 4046 * this ip_xmit_attr_t back to a ip_recv_attr_t looking at the
4047 4047 * requests.
4048 4048 */
4049 4049 ixa->ixa_ipsec_action = reflect_action;
4050 4050
4051 4051 if (!ipsec_init_outbound_ports(&sel, data_mp, ipha, ip6h, 0,
4052 4052 ns->netstack_ipsec)) {
4053 4053 /* Note: data_mp already consumed and ip_drop_packet done */
4054 4054 return (B_FALSE);
4055 4055 }
4056 4056 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4057 4057 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4058 4058 ixa->ixa_ipsec_proto = sel.ips_protocol;
4059 4059 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4060 4060 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4061 4061
4062 4062 /*
4063 4063 * Don't use global policy for this, as we want
4064 4064 * to use the same protection that was applied to the inbound packet.
4065 4065 * Thus we set IXAF_NO_IPSEC is it arrived in the clear to make
4066 4066 * it be sent in the clear.
4067 4067 */
4068 4068 if (ira->ira_flags & IRAF_IPSEC_SECURE)
4069 4069 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4070 4070 else
4071 4071 ixa->ixa_flags |= IXAF_NO_IPSEC;
4072 4072
4073 4073 return (B_TRUE);
4074 4074 }
4075 4075
4076 4076 void
4077 4077 ipsec_out_release_refs(ip_xmit_attr_t *ixa)
4078 4078 {
4079 4079 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4080 4080 return;
4081 4081
4082 4082 if (ixa->ixa_ipsec_ah_sa != NULL) {
4083 4083 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
4084 4084 ixa->ixa_ipsec_ah_sa = NULL;
4085 4085 }
4086 4086 if (ixa->ixa_ipsec_esp_sa != NULL) {
4087 4087 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
4088 4088 ixa->ixa_ipsec_esp_sa = NULL;
4089 4089 }
4090 4090 if (ixa->ixa_ipsec_policy != NULL) {
4091 4091 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4092 4092 ixa->ixa_ipsec_policy = NULL;
4093 4093 }
4094 4094 if (ixa->ixa_ipsec_action != NULL) {
4095 4095 IPACT_REFRELE(ixa->ixa_ipsec_action);
4096 4096 ixa->ixa_ipsec_action = NULL;
4097 4097 }
4098 4098 if (ixa->ixa_ipsec_latch) {
4099 4099 IPLATCH_REFRELE(ixa->ixa_ipsec_latch);
4100 4100 ixa->ixa_ipsec_latch = NULL;
4101 4101 }
4102 4102 /* Clear the soft references to the SAs */
4103 4103 ixa->ixa_ipsec_ref[0].ipsr_sa = NULL;
4104 4104 ixa->ixa_ipsec_ref[0].ipsr_bucket = NULL;
4105 4105 ixa->ixa_ipsec_ref[0].ipsr_gen = 0;
4106 4106 ixa->ixa_ipsec_ref[1].ipsr_sa = NULL;
4107 4107 ixa->ixa_ipsec_ref[1].ipsr_bucket = NULL;
4108 4108 ixa->ixa_ipsec_ref[1].ipsr_gen = 0;
4109 4109 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4110 4110 }
4111 4111
4112 4112 void
4113 4113 ipsec_in_release_refs(ip_recv_attr_t *ira)
4114 4114 {
4115 4115 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
|
↓ open down ↓ |
4079 lines elided |
↑ open up ↑ |
4116 4116 return;
4117 4117
4118 4118 if (ira->ira_ipsec_ah_sa != NULL) {
4119 4119 IPSA_REFRELE(ira->ira_ipsec_ah_sa);
4120 4120 ira->ira_ipsec_ah_sa = NULL;
4121 4121 }
4122 4122 if (ira->ira_ipsec_esp_sa != NULL) {
4123 4123 IPSA_REFRELE(ira->ira_ipsec_esp_sa);
4124 4124 ira->ira_ipsec_esp_sa = NULL;
4125 4125 }
4126 + if (ira->ira_ipsec_action != NULL) {
4127 + IPACT_REFRELE(ira->ira_ipsec_action);
4128 + ira->ira_ipsec_action = NULL;
4129 + }
4130 +
4126 4131 ira->ira_flags &= ~IRAF_IPSEC_SECURE;
4127 4132 }
4128 4133
4129 4134 /*
4130 4135 * This is called from ire_send_local when a packet
4131 4136 * is looped back. We setup the ip_recv_attr_t "borrowing" the references
4132 4137 * held by the callers.
4133 4138 * Note that we don't do any IPsec but we carry the actions and IPSEC flags
4134 4139 * across so that the fanout policy checks see that IPsec was applied.
4135 4140 *
4136 4141 * The caller should do ipsec_in_release_refs() on the ira by calling
4137 4142 * ira_cleanup().
4138 4143 */
4139 4144 void
4140 4145 ipsec_out_to_in(ip_xmit_attr_t *ixa, ill_t *ill, ip_recv_attr_t *ira)
4141 4146 {
4142 4147 ipsec_policy_t *pol;
4143 4148 ipsec_action_t *act;
4144 4149
4145 4150 /* Non-IPsec operations */
4146 4151 ira->ira_free_flags = 0;
4147 4152 ira->ira_zoneid = ixa->ixa_zoneid;
4148 4153 ira->ira_cred = ixa->ixa_cred;
4149 4154 ira->ira_cpid = ixa->ixa_cpid;
4150 4155 ira->ira_tsl = ixa->ixa_tsl;
4151 4156 ira->ira_ill = ira->ira_rill = ill;
4152 4157 ira->ira_flags = ixa->ixa_flags & IAF_MASK;
4153 4158 ira->ira_no_loop_zoneid = ixa->ixa_no_loop_zoneid;
4154 4159 ira->ira_pktlen = ixa->ixa_pktlen;
4155 4160 ira->ira_ip_hdr_length = ixa->ixa_ip_hdr_length;
4156 4161 ira->ira_protocol = ixa->ixa_protocol;
4157 4162 ira->ira_mhip = NULL;
4158 4163
4159 4164 ira->ira_flags |= IRAF_LOOPBACK | IRAF_L2SRC_LOOPBACK;
4160 4165
4161 4166 ira->ira_sqp = ixa->ixa_sqp;
4162 4167 ira->ira_ring = NULL;
4163 4168
4164 4169 ira->ira_ruifindex = ill->ill_phyint->phyint_ifindex;
4165 4170 ira->ira_rifindex = ira->ira_ruifindex;
4166 4171
4167 4172 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4168 4173 return;
4169 4174
4170 4175 ira->ira_flags |= IRAF_IPSEC_SECURE;
4171 4176
4172 4177 ira->ira_ipsec_ah_sa = NULL;
4173 4178 ira->ira_ipsec_esp_sa = NULL;
4174 4179
4175 4180 act = ixa->ixa_ipsec_action;
4176 4181 if (act == NULL) {
4177 4182 pol = ixa->ixa_ipsec_policy;
4178 4183 if (pol != NULL) {
4179 4184 act = pol->ipsp_act;
4180 4185 IPACT_REFHOLD(act);
4181 4186 }
4182 4187 }
4183 4188 ixa->ixa_ipsec_action = NULL;
4184 4189 ira->ira_ipsec_action = act;
4185 4190 }
4186 4191
4187 4192 /*
4188 4193 * Consults global policy and per-socket policy to see whether this datagram
4189 4194 * should go out secure. If so it updates the ip_xmit_attr_t
4190 4195 * Should not be used when connecting, since then we want to latch the policy.
4191 4196 *
4192 4197 * If connp is NULL we just look at the global policy.
4193 4198 *
4194 4199 * Returns NULL if the packet was dropped, in which case the MIB has
4195 4200 * been incremented and ip_drop_packet done.
4196 4201 */
4197 4202 mblk_t *
4198 4203 ip_output_attach_policy(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4199 4204 const conn_t *connp, ip_xmit_attr_t *ixa)
4200 4205 {
4201 4206 ipsec_selector_t sel;
4202 4207 boolean_t policy_present;
4203 4208 ip_stack_t *ipst = ixa->ixa_ipst;
4204 4209 netstack_t *ns = ipst->ips_netstack;
4205 4210 ipsec_stack_t *ipss = ns->netstack_ipsec;
4206 4211 ipsec_policy_t *p;
4207 4212
4208 4213 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4209 4214 ASSERT((ipha != NULL && ip6h == NULL) ||
4210 4215 (ip6h != NULL && ipha == NULL));
4211 4216
4212 4217 if (ipha != NULL)
4213 4218 policy_present = ipss->ipsec_outbound_v4_policy_present;
4214 4219 else
4215 4220 policy_present = ipss->ipsec_outbound_v6_policy_present;
4216 4221
4217 4222 if (!policy_present && (connp == NULL || connp->conn_policy == NULL))
4218 4223 return (mp);
4219 4224
4220 4225 bzero((void*)&sel, sizeof (sel));
4221 4226
4222 4227 if (ipha != NULL) {
4223 4228 sel.ips_local_addr_v4 = ipha->ipha_src;
4224 4229 sel.ips_remote_addr_v4 = ip_get_dst(ipha);
4225 4230 sel.ips_isv4 = B_TRUE;
4226 4231 } else {
4227 4232 sel.ips_isv4 = B_FALSE;
4228 4233 sel.ips_local_addr_v6 = ip6h->ip6_src;
4229 4234 sel.ips_remote_addr_v6 = ip_get_dst_v6(ip6h, mp, NULL);
4230 4235 }
4231 4236 sel.ips_protocol = ixa->ixa_protocol;
4232 4237
4233 4238 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h, 0, ipss)) {
4234 4239 if (ipha != NULL) {
4235 4240 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
4236 4241 } else {
4237 4242 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
4238 4243 }
4239 4244 /* Note: mp already consumed and ip_drop_packet done */
4240 4245 return (NULL);
4241 4246 }
4242 4247
4243 4248 ASSERT(ixa->ixa_ipsec_policy == NULL);
4244 4249 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4245 4250 ixa->ixa_ipsec_policy = p;
4246 4251 if (p != NULL) {
4247 4252 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4248 4253 if (connp == NULL || connp->conn_policy == NULL)
4249 4254 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4250 4255 } else {
4251 4256 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4252 4257 }
4253 4258
4254 4259 /*
4255 4260 * Copy the right port information.
4256 4261 */
4257 4262 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4258 4263 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4259 4264 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4260 4265 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4261 4266 ixa->ixa_ipsec_proto = sel.ips_protocol;
4262 4267 return (mp);
4263 4268 }
4264 4269
4265 4270 /*
4266 4271 * When appropriate, this function caches inbound and outbound policy
4267 4272 * for this connection. The outbound policy is stored in conn_ixa.
4268 4273 * Note that it can not be used for SCTP since conn_faddr isn't set for SCTP.
4269 4274 *
4270 4275 * XXX need to work out more details about per-interface policy and
4271 4276 * caching here!
4272 4277 *
4273 4278 * XXX may want to split inbound and outbound caching for ill..
4274 4279 */
4275 4280 int
4276 4281 ipsec_conn_cache_policy(conn_t *connp, boolean_t isv4)
4277 4282 {
4278 4283 boolean_t global_policy_present;
4279 4284 netstack_t *ns = connp->conn_netstack;
4280 4285 ipsec_stack_t *ipss = ns->netstack_ipsec;
4281 4286
4282 4287 connp->conn_ixa->ixa_ipsec_policy_gen =
4283 4288 ipss->ipsec_system_policy.iph_gen;
4284 4289 /*
4285 4290 * There is no policy latching for ICMP sockets because we can't
4286 4291 * decide on which policy to use until we see the packet and get
4287 4292 * type/code selectors.
4288 4293 */
4289 4294 if (connp->conn_proto == IPPROTO_ICMP ||
4290 4295 connp->conn_proto == IPPROTO_ICMPV6) {
4291 4296 connp->conn_in_enforce_policy =
4292 4297 connp->conn_out_enforce_policy = B_TRUE;
4293 4298 if (connp->conn_latch != NULL) {
4294 4299 IPLATCH_REFRELE(connp->conn_latch);
4295 4300 connp->conn_latch = NULL;
4296 4301 }
4297 4302 if (connp->conn_latch_in_policy != NULL) {
4298 4303 IPPOL_REFRELE(connp->conn_latch_in_policy);
4299 4304 connp->conn_latch_in_policy = NULL;
4300 4305 }
4301 4306 if (connp->conn_latch_in_action != NULL) {
4302 4307 IPACT_REFRELE(connp->conn_latch_in_action);
4303 4308 connp->conn_latch_in_action = NULL;
4304 4309 }
4305 4310 if (connp->conn_ixa->ixa_ipsec_policy != NULL) {
4306 4311 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4307 4312 connp->conn_ixa->ixa_ipsec_policy = NULL;
4308 4313 }
4309 4314 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4310 4315 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4311 4316 connp->conn_ixa->ixa_ipsec_action = NULL;
4312 4317 }
4313 4318 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4314 4319 return (0);
4315 4320 }
4316 4321
4317 4322 global_policy_present = isv4 ?
4318 4323 (ipss->ipsec_outbound_v4_policy_present ||
4319 4324 ipss->ipsec_inbound_v4_policy_present) :
4320 4325 (ipss->ipsec_outbound_v6_policy_present ||
4321 4326 ipss->ipsec_inbound_v6_policy_present);
4322 4327
4323 4328 if ((connp->conn_policy != NULL) || global_policy_present) {
4324 4329 ipsec_selector_t sel;
4325 4330 ipsec_policy_t *p;
4326 4331
4327 4332 if (connp->conn_latch == NULL &&
4328 4333 (connp->conn_latch = iplatch_create()) == NULL) {
4329 4334 return (ENOMEM);
4330 4335 }
4331 4336
4332 4337 bzero((void*)&sel, sizeof (sel));
4333 4338
4334 4339 sel.ips_protocol = connp->conn_proto;
4335 4340 sel.ips_local_port = connp->conn_lport;
4336 4341 sel.ips_remote_port = connp->conn_fport;
4337 4342 sel.ips_is_icmp_inv_acq = 0;
4338 4343 sel.ips_isv4 = isv4;
4339 4344 if (isv4) {
4340 4345 sel.ips_local_addr_v4 = connp->conn_laddr_v4;
4341 4346 sel.ips_remote_addr_v4 = connp->conn_faddr_v4;
4342 4347 } else {
4343 4348 sel.ips_local_addr_v6 = connp->conn_laddr_v6;
4344 4349 sel.ips_remote_addr_v6 = connp->conn_faddr_v6;
4345 4350 }
4346 4351
4347 4352 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
4348 4353 if (connp->conn_latch_in_policy != NULL)
4349 4354 IPPOL_REFRELE(connp->conn_latch_in_policy);
4350 4355 connp->conn_latch_in_policy = p;
4351 4356 connp->conn_in_enforce_policy = (p != NULL);
4352 4357
4353 4358 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4354 4359 if (connp->conn_ixa->ixa_ipsec_policy != NULL)
4355 4360 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4356 4361 connp->conn_ixa->ixa_ipsec_policy = p;
4357 4362 connp->conn_out_enforce_policy = (p != NULL);
4358 4363 if (p != NULL) {
4359 4364 connp->conn_ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4360 4365 if (connp->conn_policy == NULL) {
4361 4366 connp->conn_ixa->ixa_flags |=
4362 4367 IXAF_IPSEC_GLOBAL_POLICY;
4363 4368 }
4364 4369 } else {
4365 4370 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4366 4371 }
4367 4372 /* Clear the latched actions too, in case we're recaching. */
4368 4373 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4369 4374 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4370 4375 connp->conn_ixa->ixa_ipsec_action = NULL;
4371 4376 }
4372 4377 if (connp->conn_latch_in_action != NULL) {
4373 4378 IPACT_REFRELE(connp->conn_latch_in_action);
4374 4379 connp->conn_latch_in_action = NULL;
4375 4380 }
4376 4381 connp->conn_ixa->ixa_ipsec_src_port = sel.ips_local_port;
4377 4382 connp->conn_ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4378 4383 connp->conn_ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4379 4384 connp->conn_ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4380 4385 connp->conn_ixa->ixa_ipsec_proto = sel.ips_protocol;
4381 4386 } else {
4382 4387 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4383 4388 }
4384 4389
4385 4390 /*
4386 4391 * We may or may not have policy for this endpoint. We still set
4387 4392 * conn_policy_cached so that inbound datagrams don't have to look
4388 4393 * at global policy as policy is considered latched for these
4389 4394 * endpoints. We should not set conn_policy_cached until the conn
4390 4395 * reflects the actual policy. If we *set* this before inheriting
4391 4396 * the policy there is a window where the check
4392 4397 * CONN_INBOUND_POLICY_PRESENT, will neither check with the policy
4393 4398 * on the conn (because we have not yet copied the policy on to
4394 4399 * conn and hence not set conn_in_enforce_policy) nor with the
4395 4400 * global policy (because conn_policy_cached is already set).
4396 4401 */
4397 4402 connp->conn_policy_cached = B_TRUE;
4398 4403 return (0);
4399 4404 }
4400 4405
4401 4406 /*
4402 4407 * When appropriate, this function caches outbound policy for faddr/fport.
4403 4408 * It is used when we are not connected i.e., when we can not latch the
4404 4409 * policy.
4405 4410 */
4406 4411 void
4407 4412 ipsec_cache_outbound_policy(const conn_t *connp, const in6_addr_t *v6src,
4408 4413 const in6_addr_t *v6dst, in_port_t dstport, ip_xmit_attr_t *ixa)
4409 4414 {
4410 4415 boolean_t isv4 = (ixa->ixa_flags & IXAF_IS_IPV4) != 0;
4411 4416 boolean_t global_policy_present;
4412 4417 netstack_t *ns = connp->conn_netstack;
4413 4418 ipsec_stack_t *ipss = ns->netstack_ipsec;
4414 4419
4415 4420 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4416 4421
4417 4422 /*
4418 4423 * There is no policy caching for ICMP sockets because we can't
4419 4424 * decide on which policy to use until we see the packet and get
4420 4425 * type/code selectors.
4421 4426 */
4422 4427 if (connp->conn_proto == IPPROTO_ICMP ||
4423 4428 connp->conn_proto == IPPROTO_ICMPV6) {
4424 4429 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4425 4430 if (ixa->ixa_ipsec_policy != NULL) {
4426 4431 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4427 4432 ixa->ixa_ipsec_policy = NULL;
4428 4433 }
4429 4434 if (ixa->ixa_ipsec_action != NULL) {
4430 4435 IPACT_REFRELE(ixa->ixa_ipsec_action);
4431 4436 ixa->ixa_ipsec_action = NULL;
4432 4437 }
4433 4438 return;
4434 4439 }
4435 4440
4436 4441 global_policy_present = isv4 ?
4437 4442 (ipss->ipsec_outbound_v4_policy_present ||
4438 4443 ipss->ipsec_inbound_v4_policy_present) :
4439 4444 (ipss->ipsec_outbound_v6_policy_present ||
4440 4445 ipss->ipsec_inbound_v6_policy_present);
4441 4446
4442 4447 if ((connp->conn_policy != NULL) || global_policy_present) {
4443 4448 ipsec_selector_t sel;
4444 4449 ipsec_policy_t *p;
4445 4450
4446 4451 bzero((void*)&sel, sizeof (sel));
4447 4452
4448 4453 sel.ips_protocol = connp->conn_proto;
4449 4454 sel.ips_local_port = connp->conn_lport;
4450 4455 sel.ips_remote_port = dstport;
4451 4456 sel.ips_is_icmp_inv_acq = 0;
4452 4457 sel.ips_isv4 = isv4;
4453 4458 if (isv4) {
4454 4459 IN6_V4MAPPED_TO_IPADDR(v6src, sel.ips_local_addr_v4);
4455 4460 IN6_V4MAPPED_TO_IPADDR(v6dst, sel.ips_remote_addr_v4);
4456 4461 } else {
4457 4462 sel.ips_local_addr_v6 = *v6src;
4458 4463 sel.ips_remote_addr_v6 = *v6dst;
4459 4464 }
4460 4465
4461 4466 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4462 4467 if (ixa->ixa_ipsec_policy != NULL)
4463 4468 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4464 4469 ixa->ixa_ipsec_policy = p;
4465 4470 if (p != NULL) {
4466 4471 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4467 4472 if (connp->conn_policy == NULL)
4468 4473 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4469 4474 } else {
4470 4475 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4471 4476 }
4472 4477 /* Clear the latched actions too, in case we're recaching. */
4473 4478 if (ixa->ixa_ipsec_action != NULL) {
4474 4479 IPACT_REFRELE(ixa->ixa_ipsec_action);
4475 4480 ixa->ixa_ipsec_action = NULL;
4476 4481 }
4477 4482
4478 4483 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4479 4484 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4480 4485 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4481 4486 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4482 4487 ixa->ixa_ipsec_proto = sel.ips_protocol;
4483 4488 } else {
4484 4489 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4485 4490 if (ixa->ixa_ipsec_policy != NULL) {
4486 4491 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4487 4492 ixa->ixa_ipsec_policy = NULL;
4488 4493 }
4489 4494 if (ixa->ixa_ipsec_action != NULL) {
4490 4495 IPACT_REFRELE(ixa->ixa_ipsec_action);
4491 4496 ixa->ixa_ipsec_action = NULL;
4492 4497 }
4493 4498 }
4494 4499 }
4495 4500
4496 4501 /*
4497 4502 * Returns B_FALSE if the policy has gone stale.
4498 4503 */
4499 4504 boolean_t
4500 4505 ipsec_outbound_policy_current(ip_xmit_attr_t *ixa)
4501 4506 {
4502 4507 ipsec_stack_t *ipss = ixa->ixa_ipst->ips_netstack->netstack_ipsec;
4503 4508
4504 4509 if (!(ixa->ixa_flags & IXAF_IPSEC_GLOBAL_POLICY))
4505 4510 return (B_TRUE);
4506 4511
4507 4512 return (ixa->ixa_ipsec_policy_gen == ipss->ipsec_system_policy.iph_gen);
4508 4513 }
4509 4514
4510 4515 void
4511 4516 iplatch_free(ipsec_latch_t *ipl)
4512 4517 {
4513 4518 if (ipl->ipl_local_cid != NULL)
4514 4519 IPSID_REFRELE(ipl->ipl_local_cid);
4515 4520 if (ipl->ipl_remote_cid != NULL)
4516 4521 IPSID_REFRELE(ipl->ipl_remote_cid);
4517 4522 mutex_destroy(&ipl->ipl_lock);
4518 4523 kmem_free(ipl, sizeof (*ipl));
4519 4524 }
4520 4525
4521 4526 ipsec_latch_t *
4522 4527 iplatch_create()
4523 4528 {
4524 4529 ipsec_latch_t *ipl = kmem_zalloc(sizeof (*ipl), KM_NOSLEEP);
4525 4530 if (ipl == NULL)
4526 4531 return (ipl);
4527 4532 mutex_init(&ipl->ipl_lock, NULL, MUTEX_DEFAULT, NULL);
4528 4533 ipl->ipl_refcnt = 1;
4529 4534 return (ipl);
4530 4535 }
4531 4536
4532 4537 /*
4533 4538 * Hash function for ID hash table.
4534 4539 */
4535 4540 static uint32_t
4536 4541 ipsid_hash(int idtype, char *idstring)
4537 4542 {
4538 4543 uint32_t hval = idtype;
4539 4544 unsigned char c;
4540 4545
4541 4546 while ((c = *idstring++) != 0) {
4542 4547 hval = (hval << 4) | (hval >> 28);
4543 4548 hval ^= c;
4544 4549 }
4545 4550 hval = hval ^ (hval >> 16);
4546 4551 return (hval & (IPSID_HASHSIZE-1));
4547 4552 }
4548 4553
4549 4554 /*
4550 4555 * Look up identity string in hash table. Return identity object
4551 4556 * corresponding to the name -- either preexisting, or newly allocated.
4552 4557 *
4553 4558 * Return NULL if we need to allocate a new one and can't get memory.
4554 4559 */
4555 4560 ipsid_t *
4556 4561 ipsid_lookup(int idtype, char *idstring, netstack_t *ns)
4557 4562 {
4558 4563 ipsid_t *retval;
4559 4564 char *nstr;
4560 4565 int idlen = strlen(idstring) + 1;
4561 4566 ipsec_stack_t *ipss = ns->netstack_ipsec;
4562 4567 ipsif_t *bucket;
4563 4568
4564 4569 bucket = &ipss->ipsec_ipsid_buckets[ipsid_hash(idtype, idstring)];
4565 4570
4566 4571 mutex_enter(&bucket->ipsif_lock);
4567 4572
4568 4573 for (retval = bucket->ipsif_head; retval != NULL;
4569 4574 retval = retval->ipsid_next) {
4570 4575 if (idtype != retval->ipsid_type)
4571 4576 continue;
4572 4577 if (bcmp(idstring, retval->ipsid_cid, idlen) != 0)
4573 4578 continue;
4574 4579
4575 4580 IPSID_REFHOLD(retval);
4576 4581 mutex_exit(&bucket->ipsif_lock);
4577 4582 return (retval);
4578 4583 }
4579 4584
4580 4585 retval = kmem_alloc(sizeof (*retval), KM_NOSLEEP);
4581 4586 if (!retval) {
4582 4587 mutex_exit(&bucket->ipsif_lock);
4583 4588 return (NULL);
4584 4589 }
4585 4590
4586 4591 nstr = kmem_alloc(idlen, KM_NOSLEEP);
4587 4592 if (!nstr) {
4588 4593 mutex_exit(&bucket->ipsif_lock);
4589 4594 kmem_free(retval, sizeof (*retval));
4590 4595 return (NULL);
4591 4596 }
4592 4597
4593 4598 retval->ipsid_refcnt = 1;
4594 4599 retval->ipsid_next = bucket->ipsif_head;
4595 4600 if (retval->ipsid_next != NULL)
4596 4601 retval->ipsid_next->ipsid_ptpn = &retval->ipsid_next;
4597 4602 retval->ipsid_ptpn = &bucket->ipsif_head;
4598 4603 retval->ipsid_type = idtype;
4599 4604 retval->ipsid_cid = nstr;
4600 4605 bucket->ipsif_head = retval;
4601 4606 bcopy(idstring, nstr, idlen);
4602 4607 mutex_exit(&bucket->ipsif_lock);
4603 4608
4604 4609 return (retval);
4605 4610 }
4606 4611
4607 4612 /*
4608 4613 * Garbage collect the identity hash table.
4609 4614 */
4610 4615 void
4611 4616 ipsid_gc(netstack_t *ns)
4612 4617 {
4613 4618 int i, len;
4614 4619 ipsid_t *id, *nid;
4615 4620 ipsif_t *bucket;
4616 4621 ipsec_stack_t *ipss = ns->netstack_ipsec;
4617 4622
4618 4623 for (i = 0; i < IPSID_HASHSIZE; i++) {
4619 4624 bucket = &ipss->ipsec_ipsid_buckets[i];
4620 4625 mutex_enter(&bucket->ipsif_lock);
4621 4626 for (id = bucket->ipsif_head; id != NULL; id = nid) {
4622 4627 nid = id->ipsid_next;
4623 4628 if (id->ipsid_refcnt == 0) {
4624 4629 *id->ipsid_ptpn = nid;
4625 4630 if (nid != NULL)
4626 4631 nid->ipsid_ptpn = id->ipsid_ptpn;
4627 4632 len = strlen(id->ipsid_cid) + 1;
4628 4633 kmem_free(id->ipsid_cid, len);
4629 4634 kmem_free(id, sizeof (*id));
4630 4635 }
4631 4636 }
4632 4637 mutex_exit(&bucket->ipsif_lock);
4633 4638 }
4634 4639 }
4635 4640
4636 4641 /*
4637 4642 * Return true if two identities are the same.
4638 4643 */
4639 4644 boolean_t
4640 4645 ipsid_equal(ipsid_t *id1, ipsid_t *id2)
4641 4646 {
4642 4647 if (id1 == id2)
4643 4648 return (B_TRUE);
4644 4649 #ifdef DEBUG
4645 4650 if ((id1 == NULL) || (id2 == NULL))
4646 4651 return (B_FALSE);
4647 4652 /*
4648 4653 * test that we're interning id's correctly..
4649 4654 */
4650 4655 ASSERT((strcmp(id1->ipsid_cid, id2->ipsid_cid) != 0) ||
4651 4656 (id1->ipsid_type != id2->ipsid_type));
4652 4657 #endif
4653 4658 return (B_FALSE);
4654 4659 }
4655 4660
4656 4661 /*
4657 4662 * Initialize identity table; called during module initialization.
4658 4663 */
4659 4664 static void
4660 4665 ipsid_init(netstack_t *ns)
4661 4666 {
4662 4667 ipsif_t *bucket;
4663 4668 int i;
4664 4669 ipsec_stack_t *ipss = ns->netstack_ipsec;
4665 4670
4666 4671 for (i = 0; i < IPSID_HASHSIZE; i++) {
4667 4672 bucket = &ipss->ipsec_ipsid_buckets[i];
4668 4673 mutex_init(&bucket->ipsif_lock, NULL, MUTEX_DEFAULT, NULL);
4669 4674 }
4670 4675 }
4671 4676
4672 4677 /*
4673 4678 * Free identity table (preparatory to module unload)
4674 4679 */
4675 4680 static void
4676 4681 ipsid_fini(netstack_t *ns)
4677 4682 {
4678 4683 ipsif_t *bucket;
4679 4684 int i;
4680 4685 ipsec_stack_t *ipss = ns->netstack_ipsec;
4681 4686
4682 4687 for (i = 0; i < IPSID_HASHSIZE; i++) {
4683 4688 bucket = &ipss->ipsec_ipsid_buckets[i];
4684 4689 ASSERT(bucket->ipsif_head == NULL);
4685 4690 mutex_destroy(&bucket->ipsif_lock);
4686 4691 }
4687 4692 }
4688 4693
4689 4694 /*
4690 4695 * Update the minimum and maximum supported key sizes for the specified
4691 4696 * algorithm, which is either a member of a netstack alg array or about to be,
4692 4697 * and therefore must be called holding ipsec_alg_lock for write.
4693 4698 */
4694 4699 void
4695 4700 ipsec_alg_fix_min_max(ipsec_alginfo_t *alg, ipsec_algtype_t alg_type,
4696 4701 netstack_t *ns)
4697 4702 {
4698 4703 size_t crypto_min = (size_t)-1, crypto_max = 0;
4699 4704 size_t cur_crypto_min, cur_crypto_max;
4700 4705 boolean_t is_valid;
4701 4706 crypto_mechanism_info_t *mech_infos;
4702 4707 uint_t nmech_infos;
4703 4708 int crypto_rc, i;
4704 4709 crypto_mech_usage_t mask;
4705 4710 ipsec_stack_t *ipss = ns->netstack_ipsec;
4706 4711
4707 4712 ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
4708 4713
4709 4714 /*
4710 4715 * Compute the min, max, and default key sizes (in number of
4711 4716 * increments to the default key size in bits) as defined
4712 4717 * by the algorithm mappings. This range of key sizes is used
4713 4718 * for policy related operations. The effective key sizes
4714 4719 * supported by the framework could be more limited than
4715 4720 * those defined for an algorithm.
4716 4721 */
4717 4722 alg->alg_default_bits = alg->alg_key_sizes[0];
4718 4723 alg->alg_default = 0;
4719 4724 if (alg->alg_increment != 0) {
4720 4725 /* key sizes are defined by range & increment */
4721 4726 alg->alg_minbits = alg->alg_key_sizes[1];
4722 4727 alg->alg_maxbits = alg->alg_key_sizes[2];
4723 4728 } else if (alg->alg_nkey_sizes == 0) {
4724 4729 /* no specified key size for algorithm */
4725 4730 alg->alg_minbits = alg->alg_maxbits = 0;
4726 4731 } else {
4727 4732 /* key sizes are defined by enumeration */
4728 4733 alg->alg_minbits = (uint16_t)-1;
4729 4734 alg->alg_maxbits = 0;
4730 4735
4731 4736 for (i = 0; i < alg->alg_nkey_sizes; i++) {
4732 4737 if (alg->alg_key_sizes[i] < alg->alg_minbits)
4733 4738 alg->alg_minbits = alg->alg_key_sizes[i];
4734 4739 if (alg->alg_key_sizes[i] > alg->alg_maxbits)
4735 4740 alg->alg_maxbits = alg->alg_key_sizes[i];
4736 4741 }
4737 4742 }
4738 4743
4739 4744 if (!(alg->alg_flags & ALG_FLAG_VALID))
4740 4745 return;
4741 4746
4742 4747 /*
4743 4748 * Mechanisms do not apply to the NULL encryption
4744 4749 * algorithm, so simply return for this case.
4745 4750 */
4746 4751 if (alg->alg_id == SADB_EALG_NULL)
4747 4752 return;
4748 4753
4749 4754 /*
4750 4755 * Find the min and max key sizes supported by the cryptographic
4751 4756 * framework providers.
4752 4757 */
4753 4758
4754 4759 /* get the key sizes supported by the framework */
4755 4760 crypto_rc = crypto_get_all_mech_info(alg->alg_mech_type,
4756 4761 &mech_infos, &nmech_infos, KM_SLEEP);
4757 4762 if (crypto_rc != CRYPTO_SUCCESS || nmech_infos == 0) {
4758 4763 alg->alg_flags &= ~ALG_FLAG_VALID;
4759 4764 return;
4760 4765 }
4761 4766
4762 4767 /* min and max key sizes supported by framework */
4763 4768 for (i = 0, is_valid = B_FALSE; i < nmech_infos; i++) {
4764 4769 int unit_bits;
4765 4770
4766 4771 /*
4767 4772 * Ignore entries that do not support the operations
4768 4773 * needed for the algorithm type.
4769 4774 */
4770 4775 if (alg_type == IPSEC_ALG_AUTH) {
4771 4776 mask = CRYPTO_MECH_USAGE_MAC;
4772 4777 } else {
4773 4778 mask = CRYPTO_MECH_USAGE_ENCRYPT |
4774 4779 CRYPTO_MECH_USAGE_DECRYPT;
4775 4780 }
4776 4781 if ((mech_infos[i].mi_usage & mask) != mask)
4777 4782 continue;
4778 4783
4779 4784 unit_bits = (mech_infos[i].mi_keysize_unit ==
4780 4785 CRYPTO_KEYSIZE_UNIT_IN_BYTES) ? 8 : 1;
4781 4786 /* adjust min/max supported by framework */
4782 4787 cur_crypto_min = mech_infos[i].mi_min_key_size * unit_bits;
4783 4788 cur_crypto_max = mech_infos[i].mi_max_key_size * unit_bits;
4784 4789
4785 4790 if (cur_crypto_min < crypto_min)
4786 4791 crypto_min = cur_crypto_min;
4787 4792
4788 4793 /*
4789 4794 * CRYPTO_EFFECTIVELY_INFINITE is a special value of
4790 4795 * the crypto framework which means "no upper limit".
4791 4796 */
4792 4797 if (mech_infos[i].mi_max_key_size ==
4793 4798 CRYPTO_EFFECTIVELY_INFINITE) {
4794 4799 crypto_max = (size_t)-1;
4795 4800 } else if (cur_crypto_max > crypto_max) {
4796 4801 crypto_max = cur_crypto_max;
4797 4802 }
4798 4803
4799 4804 is_valid = B_TRUE;
4800 4805 }
4801 4806
4802 4807 kmem_free(mech_infos, sizeof (crypto_mechanism_info_t) *
4803 4808 nmech_infos);
4804 4809
4805 4810 if (!is_valid) {
4806 4811 /* no key sizes supported by framework */
4807 4812 alg->alg_flags &= ~ALG_FLAG_VALID;
4808 4813 return;
4809 4814 }
4810 4815
4811 4816 /*
4812 4817 * Determine min and max key sizes from alg_key_sizes[].
4813 4818 * defined for the algorithm entry. Adjust key sizes based on
4814 4819 * those supported by the framework.
4815 4820 */
4816 4821 alg->alg_ef_default_bits = alg->alg_key_sizes[0];
4817 4822
4818 4823 /*
4819 4824 * For backwards compatability, assume that the IV length
4820 4825 * is the same as the data length.
4821 4826 */
4822 4827 alg->alg_ivlen = alg->alg_datalen;
4823 4828
4824 4829 /*
4825 4830 * Copy any algorithm parameters (if provided) into dedicated
4826 4831 * elements in the ipsec_alginfo_t structure.
4827 4832 * There may be a better place to put this code.
4828 4833 */
4829 4834 for (i = 0; i < alg->alg_nparams; i++) {
4830 4835 switch (i) {
4831 4836 case 0:
4832 4837 /* Initialisation Vector length (bytes) */
4833 4838 alg->alg_ivlen = alg->alg_params[0];
4834 4839 break;
4835 4840 case 1:
4836 4841 /* Integrity Check Vector length (bytes) */
4837 4842 alg->alg_icvlen = alg->alg_params[1];
4838 4843 break;
4839 4844 case 2:
4840 4845 /* Salt length (bytes) */
4841 4846 alg->alg_saltlen = (uint8_t)alg->alg_params[2];
4842 4847 break;
4843 4848 default:
4844 4849 break;
4845 4850 }
4846 4851 }
4847 4852
4848 4853 /* Default if the IV length is not specified. */
4849 4854 if (alg_type == IPSEC_ALG_ENCR && alg->alg_ivlen == 0)
4850 4855 alg->alg_ivlen = alg->alg_datalen;
4851 4856
4852 4857 alg_flag_check(alg);
4853 4858
4854 4859 if (alg->alg_increment != 0) {
4855 4860 /* supported key sizes are defined by range & increment */
4856 4861 crypto_min = ALGBITS_ROUND_UP(crypto_min, alg->alg_increment);
4857 4862 crypto_max = ALGBITS_ROUND_DOWN(crypto_max, alg->alg_increment);
4858 4863
4859 4864 alg->alg_ef_minbits = MAX(alg->alg_minbits,
4860 4865 (uint16_t)crypto_min);
4861 4866 alg->alg_ef_maxbits = MIN(alg->alg_maxbits,
4862 4867 (uint16_t)crypto_max);
4863 4868
4864 4869 /*
4865 4870 * If the sizes supported by the framework are outside
4866 4871 * the range of sizes defined by the algorithm mappings,
4867 4872 * the algorithm cannot be used. Check for this
4868 4873 * condition here.
4869 4874 */
4870 4875 if (alg->alg_ef_minbits > alg->alg_ef_maxbits) {
4871 4876 alg->alg_flags &= ~ALG_FLAG_VALID;
4872 4877 return;
4873 4878 }
4874 4879 if (alg->alg_ef_default_bits < alg->alg_ef_minbits)
4875 4880 alg->alg_ef_default_bits = alg->alg_ef_minbits;
4876 4881 if (alg->alg_ef_default_bits > alg->alg_ef_maxbits)
4877 4882 alg->alg_ef_default_bits = alg->alg_ef_maxbits;
4878 4883 } else if (alg->alg_nkey_sizes == 0) {
4879 4884 /* no specified key size for algorithm */
4880 4885 alg->alg_ef_minbits = alg->alg_ef_maxbits = 0;
4881 4886 } else {
4882 4887 /* supported key sizes are defined by enumeration */
4883 4888 alg->alg_ef_minbits = (uint16_t)-1;
4884 4889 alg->alg_ef_maxbits = 0;
4885 4890
4886 4891 for (i = 0, is_valid = B_FALSE; i < alg->alg_nkey_sizes; i++) {
4887 4892 /*
4888 4893 * Ignore the current key size if it is not in the
4889 4894 * range of sizes supported by the framework.
4890 4895 */
4891 4896 if (alg->alg_key_sizes[i] < crypto_min ||
4892 4897 alg->alg_key_sizes[i] > crypto_max)
4893 4898 continue;
4894 4899 if (alg->alg_key_sizes[i] < alg->alg_ef_minbits)
4895 4900 alg->alg_ef_minbits = alg->alg_key_sizes[i];
4896 4901 if (alg->alg_key_sizes[i] > alg->alg_ef_maxbits)
4897 4902 alg->alg_ef_maxbits = alg->alg_key_sizes[i];
4898 4903 is_valid = B_TRUE;
4899 4904 }
4900 4905
4901 4906 if (!is_valid) {
4902 4907 alg->alg_flags &= ~ALG_FLAG_VALID;
4903 4908 return;
4904 4909 }
4905 4910 alg->alg_ef_default = 0;
4906 4911 }
4907 4912 }
4908 4913
4909 4914 /*
4910 4915 * Sanity check parameters provided by ipsecalgs(1m). Assume that
4911 4916 * the algoritm is marked as valid, there is a check at the top
4912 4917 * of this function. If any of the checks below fail, the algorithm
4913 4918 * entry is invalid.
4914 4919 */
4915 4920 void
4916 4921 alg_flag_check(ipsec_alginfo_t *alg)
4917 4922 {
4918 4923 alg->alg_flags &= ~ALG_FLAG_VALID;
4919 4924
4920 4925 /*
4921 4926 * Can't have the algorithm marked as CCM and GCM.
4922 4927 * Check the ALG_FLAG_COMBINED and ALG_FLAG_COUNTERMODE
4923 4928 * flags are set for CCM & GCM.
4924 4929 */
4925 4930 if ((alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) ==
4926 4931 (ALG_FLAG_CCM|ALG_FLAG_GCM))
4927 4932 return;
4928 4933 if (alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) {
4929 4934 if (!(alg->alg_flags & ALG_FLAG_COUNTERMODE))
4930 4935 return;
4931 4936 if (!(alg->alg_flags & ALG_FLAG_COMBINED))
4932 4937 return;
4933 4938 }
4934 4939
4935 4940 /*
4936 4941 * For ALG_FLAG_COUNTERMODE, check the parameters
4937 4942 * fit in the ipsec_nonce_t structure.
4938 4943 */
4939 4944 if (alg->alg_flags & ALG_FLAG_COUNTERMODE) {
4940 4945 if (alg->alg_ivlen != sizeof (((ipsec_nonce_t *)NULL)->iv))
4941 4946 return;
4942 4947 if (alg->alg_saltlen > sizeof (((ipsec_nonce_t *)NULL)->salt))
4943 4948 return;
4944 4949 }
4945 4950 if ((alg->alg_flags & ALG_FLAG_COMBINED) &&
4946 4951 (alg->alg_icvlen == 0))
4947 4952 return;
4948 4953
4949 4954 /* all is well. */
4950 4955 alg->alg_flags |= ALG_FLAG_VALID;
4951 4956 }
4952 4957
4953 4958 /*
4954 4959 * Free the memory used by the specified algorithm.
4955 4960 */
4956 4961 void
4957 4962 ipsec_alg_free(ipsec_alginfo_t *alg)
4958 4963 {
4959 4964 if (alg == NULL)
4960 4965 return;
4961 4966
4962 4967 if (alg->alg_key_sizes != NULL) {
4963 4968 kmem_free(alg->alg_key_sizes,
4964 4969 (alg->alg_nkey_sizes + 1) * sizeof (uint16_t));
4965 4970 alg->alg_key_sizes = NULL;
4966 4971 }
4967 4972 if (alg->alg_block_sizes != NULL) {
4968 4973 kmem_free(alg->alg_block_sizes,
4969 4974 (alg->alg_nblock_sizes + 1) * sizeof (uint16_t));
4970 4975 alg->alg_block_sizes = NULL;
4971 4976 }
4972 4977 if (alg->alg_params != NULL) {
4973 4978 kmem_free(alg->alg_params,
4974 4979 (alg->alg_nparams + 1) * sizeof (uint16_t));
4975 4980 alg->alg_params = NULL;
4976 4981 }
4977 4982 kmem_free(alg, sizeof (*alg));
4978 4983 }
4979 4984
4980 4985 /*
4981 4986 * Check the validity of the specified key size for an algorithm.
4982 4987 * Returns B_TRUE if key size is valid, B_FALSE otherwise.
4983 4988 */
4984 4989 boolean_t
4985 4990 ipsec_valid_key_size(uint16_t key_size, ipsec_alginfo_t *alg)
4986 4991 {
4987 4992 if (key_size < alg->alg_ef_minbits || key_size > alg->alg_ef_maxbits)
4988 4993 return (B_FALSE);
4989 4994
4990 4995 if (alg->alg_increment == 0 && alg->alg_nkey_sizes != 0) {
4991 4996 /*
4992 4997 * If the key sizes are defined by enumeration, the new
4993 4998 * key size must be equal to one of the supported values.
4994 4999 */
4995 5000 int i;
4996 5001
4997 5002 for (i = 0; i < alg->alg_nkey_sizes; i++)
4998 5003 if (key_size == alg->alg_key_sizes[i])
4999 5004 break;
5000 5005 if (i == alg->alg_nkey_sizes)
5001 5006 return (B_FALSE);
5002 5007 }
5003 5008
5004 5009 return (B_TRUE);
5005 5010 }
5006 5011
5007 5012 /*
5008 5013 * Callback function invoked by the crypto framework when a provider
5009 5014 * registers or unregisters. This callback updates the algorithms
5010 5015 * tables when a crypto algorithm is no longer available or becomes
5011 5016 * available, and triggers the freeing/creation of context templates
5012 5017 * associated with existing SAs, if needed.
5013 5018 *
5014 5019 * Need to walk all stack instances since the callback is global
5015 5020 * for all instances
5016 5021 */
5017 5022 void
5018 5023 ipsec_prov_update_callback(uint32_t event, void *event_arg)
5019 5024 {
5020 5025 netstack_handle_t nh;
5021 5026 netstack_t *ns;
5022 5027
5023 5028 netstack_next_init(&nh);
5024 5029 while ((ns = netstack_next(&nh)) != NULL) {
5025 5030 ipsec_prov_update_callback_stack(event, event_arg, ns);
5026 5031 netstack_rele(ns);
5027 5032 }
5028 5033 netstack_next_fini(&nh);
5029 5034 }
5030 5035
5031 5036 static void
5032 5037 ipsec_prov_update_callback_stack(uint32_t event, void *event_arg,
5033 5038 netstack_t *ns)
5034 5039 {
5035 5040 crypto_notify_event_change_t *prov_change =
5036 5041 (crypto_notify_event_change_t *)event_arg;
5037 5042 uint_t algidx, algid, algtype, mech_count, mech_idx;
5038 5043 ipsec_alginfo_t *alg;
5039 5044 ipsec_alginfo_t oalg;
5040 5045 crypto_mech_name_t *mechs;
5041 5046 boolean_t alg_changed = B_FALSE;
5042 5047 ipsec_stack_t *ipss = ns->netstack_ipsec;
5043 5048
5044 5049 /* ignore events for which we didn't register */
5045 5050 if (event != CRYPTO_EVENT_MECHS_CHANGED) {
5046 5051 ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x "
5047 5052 " received from crypto framework\n", event));
5048 5053 return;
5049 5054 }
5050 5055
5051 5056 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
5052 5057 if (mechs == NULL)
5053 5058 return;
5054 5059
5055 5060 /*
5056 5061 * Walk the list of currently defined IPsec algorithm. Update
5057 5062 * the algorithm valid flag and trigger an update of the
5058 5063 * SAs that depend on that algorithm.
5059 5064 */
5060 5065 rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
5061 5066 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
5062 5067 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
5063 5068 algidx++) {
5064 5069
5065 5070 algid = ipss->ipsec_sortlist[algtype][algidx];
5066 5071 alg = ipss->ipsec_alglists[algtype][algid];
5067 5072 ASSERT(alg != NULL);
5068 5073
5069 5074 /*
5070 5075 * Skip the algorithms which do not map to the
5071 5076 * crypto framework provider being added or removed.
5072 5077 */
5073 5078 if (strncmp(alg->alg_mech_name,
5074 5079 prov_change->ec_mech_name,
5075 5080 CRYPTO_MAX_MECH_NAME) != 0)
5076 5081 continue;
5077 5082
5078 5083 /*
5079 5084 * Determine if the mechanism is valid. If it
5080 5085 * is not, mark the algorithm as being invalid. If
5081 5086 * it is, mark the algorithm as being valid.
5082 5087 */
5083 5088 for (mech_idx = 0; mech_idx < mech_count; mech_idx++)
5084 5089 if (strncmp(alg->alg_mech_name,
5085 5090 mechs[mech_idx], CRYPTO_MAX_MECH_NAME) == 0)
5086 5091 break;
5087 5092 if (mech_idx == mech_count &&
5088 5093 alg->alg_flags & ALG_FLAG_VALID) {
5089 5094 alg->alg_flags &= ~ALG_FLAG_VALID;
5090 5095 alg_changed = B_TRUE;
5091 5096 } else if (mech_idx < mech_count &&
5092 5097 !(alg->alg_flags & ALG_FLAG_VALID)) {
5093 5098 alg->alg_flags |= ALG_FLAG_VALID;
5094 5099 alg_changed = B_TRUE;
5095 5100 }
5096 5101
5097 5102 /*
5098 5103 * Update the supported key sizes, regardless
5099 5104 * of whether a crypto provider was added or
5100 5105 * removed.
5101 5106 */
5102 5107 oalg = *alg;
5103 5108 ipsec_alg_fix_min_max(alg, algtype, ns);
5104 5109 if (!alg_changed &&
5105 5110 alg->alg_ef_minbits != oalg.alg_ef_minbits ||
5106 5111 alg->alg_ef_maxbits != oalg.alg_ef_maxbits ||
5107 5112 alg->alg_ef_default != oalg.alg_ef_default ||
5108 5113 alg->alg_ef_default_bits !=
5109 5114 oalg.alg_ef_default_bits)
5110 5115 alg_changed = B_TRUE;
5111 5116
5112 5117 /*
5113 5118 * Update the affected SAs if a software provider is
5114 5119 * being added or removed.
5115 5120 */
5116 5121 if (prov_change->ec_provider_type ==
5117 5122 CRYPTO_SW_PROVIDER)
5118 5123 sadb_alg_update(algtype, alg->alg_id,
5119 5124 prov_change->ec_change ==
5120 5125 CRYPTO_MECH_ADDED, ns);
5121 5126 }
5122 5127 }
5123 5128 rw_exit(&ipss->ipsec_alg_lock);
5124 5129 crypto_free_mech_list(mechs, mech_count);
5125 5130
5126 5131 if (alg_changed) {
5127 5132 /*
5128 5133 * An algorithm has changed, i.e. it became valid or
5129 5134 * invalid, or its support key sizes have changed.
5130 5135 * Notify ipsecah and ipsecesp of this change so
5131 5136 * that they can send a SADB_REGISTER to their consumers.
5132 5137 */
5133 5138 ipsecah_algs_changed(ns);
5134 5139 ipsecesp_algs_changed(ns);
5135 5140 }
5136 5141 }
5137 5142
5138 5143 /*
5139 5144 * Registers with the crypto framework to be notified of crypto
5140 5145 * providers changes. Used to update the algorithm tables and
5141 5146 * to free or create context templates if needed. Invoked after IPsec
5142 5147 * is loaded successfully.
5143 5148 *
5144 5149 * This is called separately for each IP instance, so we ensure we only
5145 5150 * register once.
5146 5151 */
5147 5152 void
5148 5153 ipsec_register_prov_update(void)
5149 5154 {
5150 5155 if (prov_update_handle != NULL)
5151 5156 return;
5152 5157
5153 5158 prov_update_handle = crypto_notify_events(
5154 5159 ipsec_prov_update_callback, CRYPTO_EVENT_MECHS_CHANGED);
5155 5160 }
5156 5161
5157 5162 /*
5158 5163 * Unregisters from the framework to be notified of crypto providers
5159 5164 * changes. Called from ipsec_policy_g_destroy().
5160 5165 */
5161 5166 static void
5162 5167 ipsec_unregister_prov_update(void)
5163 5168 {
5164 5169 if (prov_update_handle != NULL)
5165 5170 crypto_unnotify_events(prov_update_handle);
5166 5171 }
5167 5172
5168 5173 /*
5169 5174 * Tunnel-mode support routines.
5170 5175 */
5171 5176
5172 5177 /*
5173 5178 * Returns an mblk chain suitable for putnext() if policies match and IPsec
5174 5179 * SAs are available. If there's no per-tunnel policy, or a match comes back
5175 5180 * with no match, then still return the packet and have global policy take
5176 5181 * a crack at it in IP.
5177 5182 * This updates the ip_xmit_attr with the IPsec policy.
5178 5183 *
5179 5184 * Remember -> we can be forwarding packets. Keep that in mind w.r.t.
5180 5185 * inner-packet contents.
5181 5186 */
5182 5187 mblk_t *
5183 5188 ipsec_tun_outbound(mblk_t *mp, iptun_t *iptun, ipha_t *inner_ipv4,
5184 5189 ip6_t *inner_ipv6, ipha_t *outer_ipv4, ip6_t *outer_ipv6, int outer_hdr_len,
5185 5190 ip_xmit_attr_t *ixa)
5186 5191 {
5187 5192 ipsec_policy_head_t *polhead;
5188 5193 ipsec_selector_t sel;
5189 5194 mblk_t *nmp;
5190 5195 boolean_t is_fragment;
5191 5196 ipsec_policy_t *pol;
5192 5197 ipsec_tun_pol_t *itp = iptun->iptun_itp;
5193 5198 netstack_t *ns = iptun->iptun_ns;
5194 5199 ipsec_stack_t *ipss = ns->netstack_ipsec;
5195 5200
5196 5201 ASSERT(outer_ipv6 != NULL && outer_ipv4 == NULL ||
5197 5202 outer_ipv4 != NULL && outer_ipv6 == NULL);
5198 5203 /* We take care of inners in a bit. */
5199 5204
5200 5205 /* Are the IPsec fields initialized at all? */
5201 5206 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE)) {
5202 5207 ASSERT(ixa->ixa_ipsec_policy == NULL);
5203 5208 ASSERT(ixa->ixa_ipsec_latch == NULL);
5204 5209 ASSERT(ixa->ixa_ipsec_action == NULL);
5205 5210 ASSERT(ixa->ixa_ipsec_ah_sa == NULL);
5206 5211 ASSERT(ixa->ixa_ipsec_esp_sa == NULL);
5207 5212 }
5208 5213
5209 5214 ASSERT(itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE));
5210 5215 polhead = itp->itp_policy;
5211 5216
5212 5217 bzero(&sel, sizeof (sel));
5213 5218 if (inner_ipv4 != NULL) {
5214 5219 ASSERT(inner_ipv6 == NULL);
5215 5220 sel.ips_isv4 = B_TRUE;
5216 5221 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5217 5222 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5218 5223 sel.ips_protocol = (uint8_t)inner_ipv4->ipha_protocol;
5219 5224 } else {
5220 5225 ASSERT(inner_ipv6 != NULL);
5221 5226 sel.ips_isv4 = B_FALSE;
5222 5227 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5223 5228 /*
5224 5229 * We don't care about routing-header dests in the
5225 5230 * forwarding/tunnel path, so just grab ip6_dst.
5226 5231 */
5227 5232 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5228 5233 }
5229 5234
5230 5235 if (itp->itp_flags & ITPF_P_PER_PORT_SECURITY) {
5231 5236 /*
5232 5237 * Caller can prepend the outer header, which means
5233 5238 * inner_ipv[46] may be stuck in the middle. Pullup the whole
5234 5239 * mess now if need-be, for easier processing later. Don't
5235 5240 * forget to rewire the outer header too.
5236 5241 */
5237 5242 if (mp->b_cont != NULL) {
5238 5243 nmp = msgpullup(mp, -1);
5239 5244 if (nmp == NULL) {
5240 5245 ip_drop_packet(mp, B_FALSE, NULL,
5241 5246 DROPPER(ipss, ipds_spd_nomem),
5242 5247 &ipss->ipsec_spd_dropper);
5243 5248 return (NULL);
5244 5249 }
5245 5250 freemsg(mp);
5246 5251 mp = nmp;
5247 5252 if (outer_ipv4 != NULL)
5248 5253 outer_ipv4 = (ipha_t *)mp->b_rptr;
5249 5254 else
5250 5255 outer_ipv6 = (ip6_t *)mp->b_rptr;
5251 5256 if (inner_ipv4 != NULL) {
5252 5257 inner_ipv4 =
5253 5258 (ipha_t *)(mp->b_rptr + outer_hdr_len);
5254 5259 } else {
5255 5260 inner_ipv6 =
5256 5261 (ip6_t *)(mp->b_rptr + outer_hdr_len);
5257 5262 }
5258 5263 }
5259 5264 if (inner_ipv4 != NULL) {
5260 5265 is_fragment = IS_V4_FRAGMENT(
5261 5266 inner_ipv4->ipha_fragment_offset_and_flags);
5262 5267 } else {
5263 5268 sel.ips_remote_addr_v6 = ip_get_dst_v6(inner_ipv6, mp,
5264 5269 &is_fragment);
5265 5270 }
5266 5271
5267 5272 if (is_fragment) {
5268 5273 ipha_t *oiph;
5269 5274 ipha_t *iph = NULL;
5270 5275 ip6_t *ip6h = NULL;
5271 5276 int hdr_len;
5272 5277 uint16_t ip6_hdr_length;
5273 5278 uint8_t v6_proto;
5274 5279 uint8_t *v6_proto_p;
5275 5280
5276 5281 /*
5277 5282 * We have a fragment we need to track!
5278 5283 */
5279 5284 mp = ipsec_fragcache_add(&itp->itp_fragcache, NULL, mp,
5280 5285 outer_hdr_len, ipss);
5281 5286 if (mp == NULL)
5282 5287 return (NULL);
5283 5288 ASSERT(mp->b_cont == NULL);
5284 5289
5285 5290 /*
5286 5291 * If we get here, we have a full fragment chain
5287 5292 */
5288 5293
5289 5294 oiph = (ipha_t *)mp->b_rptr;
5290 5295 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
5291 5296 hdr_len = ((outer_hdr_len != 0) ?
5292 5297 IPH_HDR_LENGTH(oiph) : 0);
5293 5298 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5294 5299 } else {
5295 5300 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
5296 5301 ip6h = (ip6_t *)mp->b_rptr;
5297 5302 if (!ip_hdr_length_nexthdr_v6(mp, ip6h,
5298 5303 &ip6_hdr_length, &v6_proto_p)) {
5299 5304 ip_drop_packet_chain(mp, B_FALSE, NULL,
5300 5305 DROPPER(ipss,
5301 5306 ipds_spd_malformed_packet),
5302 5307 &ipss->ipsec_spd_dropper);
5303 5308 return (NULL);
5304 5309 }
5305 5310 hdr_len = ip6_hdr_length;
5306 5311 }
5307 5312 outer_hdr_len = hdr_len;
5308 5313
5309 5314 if (sel.ips_isv4) {
5310 5315 if (iph == NULL) {
5311 5316 /* Was v6 outer */
5312 5317 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5313 5318 }
5314 5319 inner_ipv4 = iph;
5315 5320 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5316 5321 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5317 5322 sel.ips_protocol =
5318 5323 (uint8_t)inner_ipv4->ipha_protocol;
5319 5324 } else {
5320 5325 inner_ipv6 = (ip6_t *)(mp->b_rptr +
5321 5326 hdr_len);
5322 5327 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5323 5328 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5324 5329 if (!ip_hdr_length_nexthdr_v6(mp,
5325 5330 inner_ipv6, &ip6_hdr_length, &v6_proto_p)) {
5326 5331 ip_drop_packet_chain(mp, B_FALSE, NULL,
5327 5332 DROPPER(ipss,
5328 5333 ipds_spd_malformed_frag),
5329 5334 &ipss->ipsec_spd_dropper);
5330 5335 return (NULL);
5331 5336 }
5332 5337 v6_proto = *v6_proto_p;
5333 5338 sel.ips_protocol = v6_proto;
5334 5339 #ifdef FRAGCACHE_DEBUG
5335 5340 cmn_err(CE_WARN, "v6_sel.ips_protocol = %d\n",
5336 5341 sel.ips_protocol);
5337 5342 #endif
5338 5343 }
5339 5344 /* Ports are extracted below */
5340 5345 }
5341 5346
5342 5347 /* Get ports... */
5343 5348 if (!ipsec_init_outbound_ports(&sel, mp,
5344 5349 inner_ipv4, inner_ipv6, outer_hdr_len, ipss)) {
5345 5350 /* callee did ip_drop_packet_chain() on mp. */
5346 5351 return (NULL);
5347 5352 }
5348 5353 #ifdef FRAGCACHE_DEBUG
5349 5354 if (inner_ipv4 != NULL)
5350 5355 cmn_err(CE_WARN,
5351 5356 "(v4) sel.ips_protocol = %d, "
5352 5357 "sel.ips_local_port = %d, "
5353 5358 "sel.ips_remote_port = %d\n",
5354 5359 sel.ips_protocol, ntohs(sel.ips_local_port),
5355 5360 ntohs(sel.ips_remote_port));
5356 5361 if (inner_ipv6 != NULL)
5357 5362 cmn_err(CE_WARN,
5358 5363 "(v6) sel.ips_protocol = %d, "
5359 5364 "sel.ips_local_port = %d, "
5360 5365 "sel.ips_remote_port = %d\n",
5361 5366 sel.ips_protocol, ntohs(sel.ips_local_port),
5362 5367 ntohs(sel.ips_remote_port));
5363 5368 #endif
5364 5369 /* Success so far! */
5365 5370 }
5366 5371 rw_enter(&polhead->iph_lock, RW_READER);
5367 5372 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_OUTBOUND, &sel);
5368 5373 rw_exit(&polhead->iph_lock);
5369 5374 if (pol == NULL) {
5370 5375 /*
5371 5376 * No matching policy on this tunnel, drop the packet.
5372 5377 *
5373 5378 * NOTE: Tunnel-mode tunnels are different from the
5374 5379 * IP global transport mode policy head. For a tunnel-mode
5375 5380 * tunnel, we drop the packet in lieu of passing it
5376 5381 * along accepted the way a global-policy miss would.
5377 5382 *
5378 5383 * NOTE2: "negotiate transport" tunnels should match ALL
5379 5384 * inbound packets, but we do not uncomment the ASSERT()
5380 5385 * below because if/when we open PF_POLICY, a user can
5381 5386 * shoot themself in the foot with a 0 priority.
5382 5387 */
5383 5388
5384 5389 /* ASSERT(itp->itp_flags & ITPF_P_TUNNEL); */
5385 5390 #ifdef FRAGCACHE_DEBUG
5386 5391 cmn_err(CE_WARN, "ipsec_tun_outbound(): No matching tunnel "
5387 5392 "per-port policy\n");
5388 5393 #endif
5389 5394 ip_drop_packet_chain(mp, B_FALSE, NULL,
5390 5395 DROPPER(ipss, ipds_spd_explicit),
5391 5396 &ipss->ipsec_spd_dropper);
5392 5397 return (NULL);
5393 5398 }
5394 5399
5395 5400 #ifdef FRAGCACHE_DEBUG
5396 5401 cmn_err(CE_WARN, "Having matching tunnel per-port policy\n");
5397 5402 #endif
5398 5403
5399 5404 /*
5400 5405 * NOTE: ixa_cleanup() function will release pol references.
5401 5406 */
5402 5407 ixa->ixa_ipsec_policy = pol;
5403 5408 /*
5404 5409 * NOTE: There is a subtle difference between iptun_zoneid and
5405 5410 * iptun_connp->conn_zoneid explained in iptun_conn_create(). When
5406 5411 * interacting with the ip module, we must use conn_zoneid.
5407 5412 */
5408 5413 ixa->ixa_zoneid = iptun->iptun_connp->conn_zoneid;
5409 5414
5410 5415 ASSERT((outer_ipv4 != NULL) ? (ixa->ixa_flags & IXAF_IS_IPV4) :
5411 5416 !(ixa->ixa_flags & IXAF_IS_IPV4));
5412 5417 ASSERT(ixa->ixa_ipsec_policy != NULL);
5413 5418 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
5414 5419
5415 5420 if (!(itp->itp_flags & ITPF_P_TUNNEL)) {
5416 5421 /* Set up transport mode for tunnelled packets. */
5417 5422 ixa->ixa_ipsec_proto = (inner_ipv4 != NULL) ? IPPROTO_ENCAP :
5418 5423 IPPROTO_IPV6;
5419 5424 return (mp);
5420 5425 }
5421 5426
5422 5427 /* Fill in tunnel-mode goodies here. */
5423 5428 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
5424 5429 /* XXX Do I need to fill in all of the goodies here? */
5425 5430 if (inner_ipv4) {
5426 5431 ixa->ixa_ipsec_inaf = AF_INET;
5427 5432 ixa->ixa_ipsec_insrc[0] =
5428 5433 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v4;
5429 5434 ixa->ixa_ipsec_indst[0] =
5430 5435 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v4;
5431 5436 } else {
5432 5437 ixa->ixa_ipsec_inaf = AF_INET6;
5433 5438 ixa->ixa_ipsec_insrc[0] =
5434 5439 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[0];
5435 5440 ixa->ixa_ipsec_insrc[1] =
5436 5441 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[1];
5437 5442 ixa->ixa_ipsec_insrc[2] =
5438 5443 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[2];
5439 5444 ixa->ixa_ipsec_insrc[3] =
5440 5445 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[3];
5441 5446 ixa->ixa_ipsec_indst[0] =
5442 5447 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[0];
5443 5448 ixa->ixa_ipsec_indst[1] =
5444 5449 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[1];
5445 5450 ixa->ixa_ipsec_indst[2] =
5446 5451 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[2];
5447 5452 ixa->ixa_ipsec_indst[3] =
5448 5453 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[3];
5449 5454 }
5450 5455 ixa->ixa_ipsec_insrcpfx = pol->ipsp_sel->ipsl_key.ipsl_local_pfxlen;
5451 5456 ixa->ixa_ipsec_indstpfx = pol->ipsp_sel->ipsl_key.ipsl_remote_pfxlen;
5452 5457 /* NOTE: These are used for transport mode too. */
5453 5458 ixa->ixa_ipsec_src_port = pol->ipsp_sel->ipsl_key.ipsl_lport;
5454 5459 ixa->ixa_ipsec_dst_port = pol->ipsp_sel->ipsl_key.ipsl_rport;
5455 5460 ixa->ixa_ipsec_proto = pol->ipsp_sel->ipsl_key.ipsl_proto;
5456 5461
5457 5462 return (mp);
5458 5463 }
5459 5464
5460 5465 /*
5461 5466 * NOTE: The following releases pol's reference and
5462 5467 * calls ip_drop_packet() for me on NULL returns.
5463 5468 */
5464 5469 mblk_t *
5465 5470 ipsec_check_ipsecin_policy_reasm(mblk_t *attr_mp, ipsec_policy_t *pol,
5466 5471 ipha_t *inner_ipv4, ip6_t *inner_ipv6, uint64_t pkt_unique, netstack_t *ns)
5467 5472 {
5468 5473 /* Assume attr_mp is a chain of b_next-linked ip_recv_attr mblk. */
5469 5474 mblk_t *data_chain = NULL, *data_tail = NULL;
5470 5475 mblk_t *next;
5471 5476 mblk_t *data_mp;
5472 5477 ip_recv_attr_t iras;
5473 5478
5474 5479 while (attr_mp != NULL) {
5475 5480 ASSERT(ip_recv_attr_is_mblk(attr_mp));
5476 5481 next = attr_mp->b_next;
5477 5482 attr_mp->b_next = NULL; /* No tripping asserts. */
5478 5483
5479 5484 data_mp = attr_mp->b_cont;
5480 5485 attr_mp->b_cont = NULL;
5481 5486 if (!ip_recv_attr_from_mblk(attr_mp, &iras)) {
5482 5487 /* The ill or ip_stack_t disappeared on us */
5483 5488 freemsg(data_mp); /* ip_drop_packet?? */
5484 5489 ira_cleanup(&iras, B_TRUE);
5485 5490 goto fail;
5486 5491 }
5487 5492
5488 5493 /*
5489 5494 * Need IPPOL_REFHOLD(pol) for extras because
5490 5495 * ipsecin_policy does the refrele.
5491 5496 */
5492 5497 IPPOL_REFHOLD(pol);
5493 5498
5494 5499 data_mp = ipsec_check_ipsecin_policy(data_mp, pol, inner_ipv4,
5495 5500 inner_ipv6, pkt_unique, &iras, ns);
5496 5501 ira_cleanup(&iras, B_TRUE);
5497 5502
5498 5503 if (data_mp == NULL)
5499 5504 goto fail;
5500 5505
5501 5506 if (data_tail == NULL) {
5502 5507 /* First one */
5503 5508 data_chain = data_tail = data_mp;
5504 5509 } else {
5505 5510 data_tail->b_next = data_mp;
5506 5511 data_tail = data_mp;
5507 5512 }
5508 5513 attr_mp = next;
5509 5514 }
5510 5515 /*
5511 5516 * One last release because either the loop bumped it up, or we never
5512 5517 * called ipsec_check_ipsecin_policy().
5513 5518 */
5514 5519 IPPOL_REFRELE(pol);
5515 5520
5516 5521 /* data_chain is ready for return to tun module. */
5517 5522 return (data_chain);
5518 5523
5519 5524 fail:
5520 5525 /*
5521 5526 * Need to get rid of any extra pol
5522 5527 * references, and any remaining bits as well.
5523 5528 */
5524 5529 IPPOL_REFRELE(pol);
5525 5530 ipsec_freemsg_chain(data_chain);
5526 5531 ipsec_freemsg_chain(next); /* ipdrop stats? */
5527 5532 return (NULL);
5528 5533 }
5529 5534
5530 5535 /*
5531 5536 * Return a message if the inbound packet passed an IPsec policy check. Returns
5532 5537 * NULL if it failed or if it is a fragment needing its friends before a
5533 5538 * policy check can be performed.
5534 5539 *
5535 5540 * Expects a non-NULL data_mp, and a non-NULL polhead.
5536 5541 * The returned mblk may be a b_next chain of packets if fragments
5537 5542 * neeeded to be collected for a proper policy check.
5538 5543 *
5539 5544 * This function calls ip_drop_packet() on data_mp if need be.
5540 5545 *
5541 5546 * NOTE: outer_hdr_len is signed. If it's a negative value, the caller
5542 5547 * is inspecting an ICMP packet.
5543 5548 */
5544 5549 mblk_t *
5545 5550 ipsec_tun_inbound(ip_recv_attr_t *ira, mblk_t *data_mp, ipsec_tun_pol_t *itp,
5546 5551 ipha_t *inner_ipv4, ip6_t *inner_ipv6, ipha_t *outer_ipv4,
5547 5552 ip6_t *outer_ipv6, int outer_hdr_len, netstack_t *ns)
5548 5553 {
5549 5554 ipsec_policy_head_t *polhead;
5550 5555 ipsec_selector_t sel;
5551 5556 ipsec_policy_t *pol;
5552 5557 uint16_t tmpport;
5553 5558 selret_t rc;
5554 5559 boolean_t port_policy_present, is_icmp, global_present;
5555 5560 in6_addr_t tmpaddr;
5556 5561 ipaddr_t tmp4;
5557 5562 uint8_t flags, *inner_hdr;
5558 5563 ipsec_stack_t *ipss = ns->netstack_ipsec;
5559 5564
5560 5565 sel.ips_is_icmp_inv_acq = 0;
5561 5566
5562 5567 if (outer_ipv4 != NULL) {
5563 5568 ASSERT(outer_ipv6 == NULL);
5564 5569 global_present = ipss->ipsec_inbound_v4_policy_present;
5565 5570 } else {
5566 5571 ASSERT(outer_ipv6 != NULL);
5567 5572 global_present = ipss->ipsec_inbound_v6_policy_present;
5568 5573 }
5569 5574
5570 5575 ASSERT(inner_ipv4 != NULL && inner_ipv6 == NULL ||
5571 5576 inner_ipv4 == NULL && inner_ipv6 != NULL);
5572 5577
5573 5578 if (outer_hdr_len < 0) {
5574 5579 outer_hdr_len = (-outer_hdr_len);
5575 5580 is_icmp = B_TRUE;
5576 5581 } else {
5577 5582 is_icmp = B_FALSE;
5578 5583 }
5579 5584
5580 5585 if (itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE)) {
5581 5586 mblk_t *mp = data_mp;
5582 5587
5583 5588 polhead = itp->itp_policy;
5584 5589 /*
5585 5590 * We need to perform full Tunnel-Mode enforcement,
5586 5591 * and we need to have inner-header data for such enforcement.
5587 5592 *
5588 5593 * See ipsec_init_inbound_sel() for the 0x80000000 on inbound
5589 5594 * and on return.
5590 5595 */
5591 5596
5592 5597 port_policy_present = ((itp->itp_flags &
5593 5598 ITPF_P_PER_PORT_SECURITY) ? B_TRUE : B_FALSE);
5594 5599 /*
5595 5600 * NOTE: Even if our policy is transport mode, set the
5596 5601 * SEL_TUNNEL_MODE flag so ipsec_init_inbound_sel() can
5597 5602 * do the right thing w.r.t. outer headers.
5598 5603 */
5599 5604 flags = ((port_policy_present ? SEL_PORT_POLICY : SEL_NONE) |
5600 5605 (is_icmp ? SEL_IS_ICMP : SEL_NONE) | SEL_TUNNEL_MODE);
5601 5606
5602 5607 rc = ipsec_init_inbound_sel(&sel, data_mp, inner_ipv4,
5603 5608 inner_ipv6, flags);
5604 5609
5605 5610 switch (rc) {
5606 5611 case SELRET_NOMEM:
5607 5612 ip_drop_packet(data_mp, B_TRUE, NULL,
5608 5613 DROPPER(ipss, ipds_spd_nomem),
5609 5614 &ipss->ipsec_spd_dropper);
5610 5615 return (NULL);
5611 5616 case SELRET_TUNFRAG:
5612 5617 /*
5613 5618 * At this point, if we're cleartext, we don't want
5614 5619 * to go there.
5615 5620 */
5616 5621 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5617 5622 ip_drop_packet(data_mp, B_TRUE, NULL,
5618 5623 DROPPER(ipss, ipds_spd_got_clear),
5619 5624 &ipss->ipsec_spd_dropper);
5620 5625 return (NULL);
5621 5626 }
5622 5627
5623 5628 /*
5624 5629 * Inner and outer headers may not be contiguous.
5625 5630 * Pullup the data_mp now to satisfy assumptions of
5626 5631 * ipsec_fragcache_add()
5627 5632 */
5628 5633 if (data_mp->b_cont != NULL) {
5629 5634 mblk_t *nmp;
5630 5635
5631 5636 nmp = msgpullup(data_mp, -1);
5632 5637 if (nmp == NULL) {
5633 5638 ip_drop_packet(data_mp, B_TRUE, NULL,
5634 5639 DROPPER(ipss, ipds_spd_nomem),
5635 5640 &ipss->ipsec_spd_dropper);
5636 5641 return (NULL);
5637 5642 }
5638 5643 freemsg(data_mp);
5639 5644 data_mp = nmp;
5640 5645 if (outer_ipv4 != NULL)
5641 5646 outer_ipv4 =
5642 5647 (ipha_t *)data_mp->b_rptr;
5643 5648 else
5644 5649 outer_ipv6 =
5645 5650 (ip6_t *)data_mp->b_rptr;
5646 5651 if (inner_ipv4 != NULL) {
5647 5652 inner_ipv4 =
5648 5653 (ipha_t *)(data_mp->b_rptr +
5649 5654 outer_hdr_len);
5650 5655 } else {
5651 5656 inner_ipv6 =
5652 5657 (ip6_t *)(data_mp->b_rptr +
5653 5658 outer_hdr_len);
5654 5659 }
5655 5660 }
5656 5661
5657 5662 /*
5658 5663 * If we need to queue the packet. First we
5659 5664 * get an mblk with the attributes. ipsec_fragcache_add
5660 5665 * will prepend that to the queued data and return
5661 5666 * a list of b_next messages each of which starts with
5662 5667 * the attribute mblk.
5663 5668 */
5664 5669 mp = ip_recv_attr_to_mblk(ira);
5665 5670 if (mp == NULL) {
5666 5671 ip_drop_packet(data_mp, B_TRUE, NULL,
5667 5672 DROPPER(ipss, ipds_spd_nomem),
5668 5673 &ipss->ipsec_spd_dropper);
5669 5674 return (NULL);
5670 5675 }
5671 5676
5672 5677 mp = ipsec_fragcache_add(&itp->itp_fragcache,
5673 5678 mp, data_mp, outer_hdr_len, ipss);
5674 5679
5675 5680 if (mp == NULL) {
5676 5681 /*
5677 5682 * Data is cached, fragment chain is not
5678 5683 * complete.
5679 5684 */
5680 5685 return (NULL);
5681 5686 }
5682 5687
5683 5688 /*
5684 5689 * If we get here, we have a full fragment chain.
5685 5690 * Reacquire headers and selectors from first fragment.
5686 5691 */
5687 5692 ASSERT(ip_recv_attr_is_mblk(mp));
5688 5693 data_mp = mp->b_cont;
5689 5694 inner_hdr = data_mp->b_rptr;
5690 5695 if (outer_ipv4 != NULL) {
5691 5696 inner_hdr += IPH_HDR_LENGTH(
5692 5697 (ipha_t *)data_mp->b_rptr);
5693 5698 } else {
5694 5699 inner_hdr += ip_hdr_length_v6(data_mp,
5695 5700 (ip6_t *)data_mp->b_rptr);
5696 5701 }
5697 5702 ASSERT(inner_hdr <= data_mp->b_wptr);
5698 5703
5699 5704 if (inner_ipv4 != NULL) {
5700 5705 inner_ipv4 = (ipha_t *)inner_hdr;
5701 5706 inner_ipv6 = NULL;
5702 5707 } else {
5703 5708 inner_ipv6 = (ip6_t *)inner_hdr;
5704 5709 inner_ipv4 = NULL;
5705 5710 }
5706 5711
5707 5712 /*
5708 5713 * Use SEL_TUNNEL_MODE to take into account the outer
5709 5714 * header. Use SEL_POST_FRAG so we always get ports.
5710 5715 */
5711 5716 rc = ipsec_init_inbound_sel(&sel, data_mp,
5712 5717 inner_ipv4, inner_ipv6,
5713 5718 SEL_TUNNEL_MODE | SEL_POST_FRAG);
5714 5719 switch (rc) {
5715 5720 case SELRET_SUCCESS:
5716 5721 /*
5717 5722 * Get to same place as first caller's
5718 5723 * SELRET_SUCCESS case.
5719 5724 */
5720 5725 break;
5721 5726 case SELRET_NOMEM:
5722 5727 ip_drop_packet_chain(mp, B_TRUE, NULL,
5723 5728 DROPPER(ipss, ipds_spd_nomem),
5724 5729 &ipss->ipsec_spd_dropper);
5725 5730 return (NULL);
5726 5731 case SELRET_BADPKT:
5727 5732 ip_drop_packet_chain(mp, B_TRUE, NULL,
5728 5733 DROPPER(ipss, ipds_spd_malformed_frag),
5729 5734 &ipss->ipsec_spd_dropper);
5730 5735 return (NULL);
5731 5736 case SELRET_TUNFRAG:
5732 5737 cmn_err(CE_WARN, "(TUNFRAG on 2nd call...)");
5733 5738 /* FALLTHRU */
5734 5739 default:
5735 5740 cmn_err(CE_WARN, "ipsec_init_inbound_sel(mark2)"
5736 5741 " returns bizarro 0x%x", rc);
5737 5742 /* Guaranteed panic! */
5738 5743 ASSERT(rc == SELRET_NOMEM);
5739 5744 return (NULL);
5740 5745 }
5741 5746 /* FALLTHRU */
5742 5747 case SELRET_SUCCESS:
5743 5748 /*
5744 5749 * Common case:
5745 5750 * No per-port policy or a non-fragment. Keep going.
5746 5751 */
5747 5752 break;
5748 5753 case SELRET_BADPKT:
5749 5754 /*
5750 5755 * We may receive ICMP (with IPv6 inner) packets that
5751 5756 * trigger this return value. Send 'em in for
5752 5757 * enforcement checking.
5753 5758 */
5754 5759 cmn_err(CE_NOTE, "ipsec_tun_inbound(): "
5755 5760 "sending 'bad packet' in for enforcement");
5756 5761 break;
5757 5762 default:
5758 5763 cmn_err(CE_WARN,
5759 5764 "ipsec_init_inbound_sel() returns bizarro 0x%x",
5760 5765 rc);
5761 5766 ASSERT(rc == SELRET_NOMEM); /* Guaranteed panic! */
5762 5767 return (NULL);
5763 5768 }
5764 5769
5765 5770 if (is_icmp) {
5766 5771 /*
5767 5772 * Swap local/remote because this is an ICMP packet.
5768 5773 */
5769 5774 tmpaddr = sel.ips_local_addr_v6;
5770 5775 sel.ips_local_addr_v6 = sel.ips_remote_addr_v6;
5771 5776 sel.ips_remote_addr_v6 = tmpaddr;
5772 5777 tmpport = sel.ips_local_port;
5773 5778 sel.ips_local_port = sel.ips_remote_port;
5774 5779 sel.ips_remote_port = tmpport;
5775 5780 }
5776 5781
5777 5782 /* find_policy_head() */
5778 5783 rw_enter(&polhead->iph_lock, RW_READER);
5779 5784 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_INBOUND,
5780 5785 &sel);
5781 5786 rw_exit(&polhead->iph_lock);
5782 5787 if (pol != NULL) {
5783 5788 uint64_t pkt_unique;
5784 5789
5785 5790 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5786 5791 if (!pol->ipsp_act->ipa_allow_clear) {
5787 5792 /*
5788 5793 * XXX should never get here with
5789 5794 * tunnel reassembled fragments?
5790 5795 */
5791 5796 ASSERT(mp == data_mp);
5792 5797 ip_drop_packet(data_mp, B_TRUE, NULL,
5793 5798 DROPPER(ipss, ipds_spd_got_clear),
5794 5799 &ipss->ipsec_spd_dropper);
5795 5800 IPPOL_REFRELE(pol);
5796 5801 return (NULL);
5797 5802 } else {
5798 5803 IPPOL_REFRELE(pol);
5799 5804 return (mp);
5800 5805 }
5801 5806 }
5802 5807 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
5803 5808 sel.ips_local_port,
5804 5809 (inner_ipv4 == NULL) ? IPPROTO_IPV6 :
5805 5810 IPPROTO_ENCAP, sel.ips_protocol);
5806 5811
5807 5812 /*
5808 5813 * NOTE: The following releases pol's reference and
5809 5814 * calls ip_drop_packet() for me on NULL returns.
5810 5815 *
5811 5816 * "sel" is still good here, so let's use it!
5812 5817 */
5813 5818 if (data_mp == mp) {
5814 5819 /* A single packet without attributes */
5815 5820 data_mp = ipsec_check_ipsecin_policy(data_mp,
5816 5821 pol, inner_ipv4, inner_ipv6, pkt_unique,
5817 5822 ira, ns);
5818 5823 } else {
5819 5824 /*
5820 5825 * We pass in the b_next chain of attr_mp's
5821 5826 * and get back a b_next chain of data_mp's.
5822 5827 */
5823 5828 data_mp = ipsec_check_ipsecin_policy_reasm(mp,
5824 5829 pol, inner_ipv4, inner_ipv6, pkt_unique,
5825 5830 ns);
5826 5831 }
5827 5832 return (data_mp);
5828 5833 }
5829 5834
5830 5835 /*
5831 5836 * Else fallthru and check the global policy on the outer
5832 5837 * header(s) if this tunnel is an old-style transport-mode
5833 5838 * one. Drop the packet explicitly (no policy entry) for
5834 5839 * a new-style tunnel-mode tunnel.
5835 5840 */
5836 5841 if ((itp->itp_flags & ITPF_P_TUNNEL) && !is_icmp) {
5837 5842 ip_drop_packet_chain(data_mp, B_TRUE, NULL,
5838 5843 DROPPER(ipss, ipds_spd_explicit),
5839 5844 &ipss->ipsec_spd_dropper);
5840 5845 return (NULL);
5841 5846 }
5842 5847 }
5843 5848
5844 5849 /*
5845 5850 * NOTE: If we reach here, we will not have packet chains from
5846 5851 * fragcache_add(), because the only way I get chains is on a
5847 5852 * tunnel-mode tunnel, which either returns with a pass, or gets
5848 5853 * hit by the ip_drop_packet_chain() call right above here.
5849 5854 */
5850 5855 ASSERT(data_mp->b_next == NULL);
5851 5856
5852 5857 /* If no per-tunnel security, check global policy now. */
5853 5858 if ((ira->ira_flags & IRAF_IPSEC_SECURE) && !global_present) {
5854 5859 if (ira->ira_flags & IRAF_TRUSTED_ICMP) {
5855 5860 /*
5856 5861 * This is an ICMP message that was geenrated locally.
5857 5862 * We should accept it.
5858 5863 */
5859 5864 return (data_mp);
5860 5865 }
5861 5866
5862 5867 ip_drop_packet(data_mp, B_TRUE, NULL,
5863 5868 DROPPER(ipss, ipds_spd_got_secure),
5864 5869 &ipss->ipsec_spd_dropper);
5865 5870 return (NULL);
5866 5871 }
5867 5872
5868 5873 if (is_icmp) {
5869 5874 /*
5870 5875 * For ICMP packets, "outer_ipvN" is set to the outer header
5871 5876 * that is *INSIDE* the ICMP payload. For global policy
5872 5877 * checking, we need to reverse src/dst on the payload in
5873 5878 * order to construct selectors appropriately. See "ripha"
5874 5879 * constructions in ip.c. To avoid a bug like 6478464 (see
5875 5880 * earlier in this file), we will actually exchange src/dst
5876 5881 * in the packet, and reverse if after the call to
5877 5882 * ipsec_check_global_policy().
5878 5883 */
5879 5884 if (outer_ipv4 != NULL) {
5880 5885 tmp4 = outer_ipv4->ipha_src;
5881 5886 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5882 5887 outer_ipv4->ipha_dst = tmp4;
5883 5888 } else {
5884 5889 ASSERT(outer_ipv6 != NULL);
5885 5890 tmpaddr = outer_ipv6->ip6_src;
5886 5891 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5887 5892 outer_ipv6->ip6_dst = tmpaddr;
5888 5893 }
5889 5894 }
5890 5895
5891 5896 data_mp = ipsec_check_global_policy(data_mp, NULL, outer_ipv4,
5892 5897 outer_ipv6, ira, ns);
5893 5898 if (data_mp == NULL)
5894 5899 return (NULL);
5895 5900
5896 5901 if (is_icmp) {
5897 5902 /* Set things back to normal. */
5898 5903 if (outer_ipv4 != NULL) {
5899 5904 tmp4 = outer_ipv4->ipha_src;
5900 5905 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5901 5906 outer_ipv4->ipha_dst = tmp4;
5902 5907 } else {
5903 5908 /* No need for ASSERT()s now. */
5904 5909 tmpaddr = outer_ipv6->ip6_src;
5905 5910 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5906 5911 outer_ipv6->ip6_dst = tmpaddr;
5907 5912 }
5908 5913 }
5909 5914
5910 5915 /*
5911 5916 * At this point, we pretend it's a cleartext accepted
5912 5917 * packet.
5913 5918 */
5914 5919 return (data_mp);
5915 5920 }
5916 5921
5917 5922 /*
5918 5923 * AVL comparison routine for our list of tunnel polheads.
5919 5924 */
5920 5925 static int
5921 5926 tunnel_compare(const void *arg1, const void *arg2)
5922 5927 {
5923 5928 ipsec_tun_pol_t *left, *right;
5924 5929 int rc;
5925 5930
5926 5931 left = (ipsec_tun_pol_t *)arg1;
5927 5932 right = (ipsec_tun_pol_t *)arg2;
5928 5933
5929 5934 rc = strncmp(left->itp_name, right->itp_name, LIFNAMSIZ);
5930 5935 return (rc == 0 ? rc : (rc > 0 ? 1 : -1));
5931 5936 }
5932 5937
5933 5938 /*
5934 5939 * Free a tunnel policy node.
5935 5940 */
5936 5941 void
5937 5942 itp_free(ipsec_tun_pol_t *node, netstack_t *ns)
5938 5943 {
5939 5944 if (node->itp_policy != NULL) {
5940 5945 IPPH_REFRELE(node->itp_policy, ns);
5941 5946 node->itp_policy = NULL;
5942 5947 }
5943 5948 if (node->itp_inactive != NULL) {
5944 5949 IPPH_REFRELE(node->itp_inactive, ns);
5945 5950 node->itp_inactive = NULL;
5946 5951 }
5947 5952 mutex_destroy(&node->itp_lock);
5948 5953 kmem_free(node, sizeof (*node));
5949 5954 }
5950 5955
5951 5956 void
5952 5957 itp_unlink(ipsec_tun_pol_t *node, netstack_t *ns)
5953 5958 {
5954 5959 ipsec_stack_t *ipss = ns->netstack_ipsec;
5955 5960
5956 5961 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
5957 5962 ipss->ipsec_tunnel_policy_gen++;
5958 5963 ipsec_fragcache_uninit(&node->itp_fragcache, ipss);
5959 5964 avl_remove(&ipss->ipsec_tunnel_policies, node);
5960 5965 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5961 5966 ITP_REFRELE(node, ns);
5962 5967 }
5963 5968
5964 5969 /*
5965 5970 * Public interface to look up a tunnel security policy by name. Used by
5966 5971 * spdsock mostly. Returns "node" with a bumped refcnt.
5967 5972 */
5968 5973 ipsec_tun_pol_t *
5969 5974 get_tunnel_policy(char *name, netstack_t *ns)
5970 5975 {
5971 5976 ipsec_tun_pol_t *node, lookup;
5972 5977 ipsec_stack_t *ipss = ns->netstack_ipsec;
5973 5978
5974 5979 (void) strncpy(lookup.itp_name, name, LIFNAMSIZ);
5975 5980
5976 5981 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5977 5982 node = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
5978 5983 &lookup, NULL);
5979 5984 if (node != NULL) {
5980 5985 ITP_REFHOLD(node);
5981 5986 }
5982 5987 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5983 5988
5984 5989 return (node);
5985 5990 }
5986 5991
5987 5992 /*
5988 5993 * Public interface to walk all tunnel security polcies. Useful for spdsock
5989 5994 * DUMP operations. iterator() will not consume a reference.
5990 5995 */
5991 5996 void
5992 5997 itp_walk(void (*iterator)(ipsec_tun_pol_t *, void *, netstack_t *),
5993 5998 void *arg, netstack_t *ns)
5994 5999 {
5995 6000 ipsec_tun_pol_t *node;
5996 6001 ipsec_stack_t *ipss = ns->netstack_ipsec;
5997 6002
5998 6003 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5999 6004 for (node = avl_first(&ipss->ipsec_tunnel_policies); node != NULL;
6000 6005 node = AVL_NEXT(&ipss->ipsec_tunnel_policies, node)) {
6001 6006 iterator(node, arg, ns);
6002 6007 }
6003 6008 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6004 6009 }
6005 6010
6006 6011 /*
6007 6012 * Initialize policy head. This can only fail if there's a memory problem.
6008 6013 */
6009 6014 static boolean_t
6010 6015 tunnel_polhead_init(ipsec_policy_head_t *iph, netstack_t *ns)
6011 6016 {
6012 6017 ipsec_stack_t *ipss = ns->netstack_ipsec;
6013 6018
6014 6019 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
6015 6020 iph->iph_refs = 1;
6016 6021 iph->iph_gen = 0;
6017 6022 if (ipsec_alloc_table(iph, ipss->ipsec_tun_spd_hashsize,
6018 6023 KM_SLEEP, B_FALSE, ns) != 0) {
6019 6024 ipsec_polhead_free_table(iph);
6020 6025 return (B_FALSE);
6021 6026 }
6022 6027 ipsec_polhead_init(iph, ipss->ipsec_tun_spd_hashsize);
6023 6028 return (B_TRUE);
6024 6029 }
6025 6030
6026 6031 /*
6027 6032 * Create a tunnel policy node with "name". Set errno with
6028 6033 * ENOMEM if there's a memory problem, and EEXIST if there's an existing
6029 6034 * node.
6030 6035 */
6031 6036 ipsec_tun_pol_t *
6032 6037 create_tunnel_policy(char *name, int *errno, uint64_t *gen, netstack_t *ns)
6033 6038 {
6034 6039 ipsec_tun_pol_t *newbie, *existing;
6035 6040 avl_index_t where;
6036 6041 ipsec_stack_t *ipss = ns->netstack_ipsec;
6037 6042
6038 6043 newbie = kmem_zalloc(sizeof (*newbie), KM_NOSLEEP);
6039 6044 if (newbie == NULL) {
6040 6045 *errno = ENOMEM;
6041 6046 return (NULL);
6042 6047 }
6043 6048 if (!ipsec_fragcache_init(&newbie->itp_fragcache)) {
6044 6049 kmem_free(newbie, sizeof (*newbie));
6045 6050 *errno = ENOMEM;
6046 6051 return (NULL);
6047 6052 }
6048 6053
6049 6054 (void) strncpy(newbie->itp_name, name, LIFNAMSIZ);
6050 6055
6051 6056 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
6052 6057 existing = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
6053 6058 newbie, &where);
6054 6059 if (existing != NULL) {
6055 6060 itp_free(newbie, ns);
6056 6061 *errno = EEXIST;
6057 6062 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6058 6063 return (NULL);
6059 6064 }
6060 6065 ipss->ipsec_tunnel_policy_gen++;
6061 6066 *gen = ipss->ipsec_tunnel_policy_gen;
6062 6067 newbie->itp_refcnt = 2; /* One for the caller, one for the tree. */
6063 6068 newbie->itp_next_policy_index = 1;
6064 6069 avl_insert(&ipss->ipsec_tunnel_policies, newbie, where);
6065 6070 mutex_init(&newbie->itp_lock, NULL, MUTEX_DEFAULT, NULL);
6066 6071 newbie->itp_policy = kmem_zalloc(sizeof (ipsec_policy_head_t),
6067 6072 KM_NOSLEEP);
6068 6073 if (newbie->itp_policy == NULL)
6069 6074 goto nomem;
6070 6075 newbie->itp_inactive = kmem_zalloc(sizeof (ipsec_policy_head_t),
6071 6076 KM_NOSLEEP);
6072 6077 if (newbie->itp_inactive == NULL) {
6073 6078 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6074 6079 goto nomem;
6075 6080 }
6076 6081
6077 6082 if (!tunnel_polhead_init(newbie->itp_policy, ns)) {
6078 6083 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6079 6084 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6080 6085 goto nomem;
6081 6086 } else if (!tunnel_polhead_init(newbie->itp_inactive, ns)) {
6082 6087 IPPH_REFRELE(newbie->itp_policy, ns);
6083 6088 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6084 6089 goto nomem;
6085 6090 }
6086 6091 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6087 6092
6088 6093 return (newbie);
6089 6094 nomem:
6090 6095 *errno = ENOMEM;
6091 6096 kmem_free(newbie, sizeof (*newbie));
6092 6097 return (NULL);
6093 6098 }
6094 6099
6095 6100 /*
6096 6101 * Given two addresses, find a tunnel instance's IPsec policy heads.
6097 6102 * Returns NULL on failure.
6098 6103 */
6099 6104 ipsec_tun_pol_t *
6100 6105 itp_get_byaddr(uint32_t *laddr, uint32_t *faddr, int af, ip_stack_t *ipst)
6101 6106 {
6102 6107 conn_t *connp;
6103 6108 iptun_t *iptun;
6104 6109 ipsec_tun_pol_t *itp = NULL;
6105 6110
6106 6111 /* Classifiers are used to "src" being foreign. */
6107 6112 if (af == AF_INET) {
6108 6113 connp = ipcl_iptun_classify_v4((ipaddr_t *)faddr,
6109 6114 (ipaddr_t *)laddr, ipst);
6110 6115 } else {
6111 6116 ASSERT(af == AF_INET6);
6112 6117 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)laddr));
6113 6118 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)faddr));
6114 6119 connp = ipcl_iptun_classify_v6((in6_addr_t *)faddr,
6115 6120 (in6_addr_t *)laddr, ipst);
6116 6121 }
6117 6122
6118 6123 if (connp == NULL)
6119 6124 return (NULL);
6120 6125
6121 6126 if (IPCL_IS_IPTUN(connp)) {
6122 6127 iptun = connp->conn_iptun;
6123 6128 if (iptun != NULL) {
6124 6129 itp = iptun->iptun_itp;
6125 6130 if (itp != NULL) {
6126 6131 /* Braces due to the macro's nature... */
6127 6132 ITP_REFHOLD(itp);
6128 6133 }
6129 6134 } /* Else itp is already NULL. */
6130 6135 }
6131 6136
6132 6137 CONN_DEC_REF(connp);
6133 6138 return (itp);
6134 6139 }
6135 6140
6136 6141 /*
6137 6142 * Frag cache code, based on SunScreen 3.2 source
6138 6143 * screen/kernel/common/screen_fragcache.c
6139 6144 */
6140 6145
6141 6146 #define IPSEC_FRAG_TTL_MAX 5
6142 6147 /*
6143 6148 * Note that the following parameters create 256 hash buckets
6144 6149 * with 1024 free entries to be distributed. Things are cleaned
6145 6150 * periodically and are attempted to be cleaned when there is no
6146 6151 * free space, but this system errs on the side of dropping packets
6147 6152 * over creating memory exhaustion. We may decide to make hash
6148 6153 * factor a tunable if this proves to be a bad decision.
6149 6154 */
6150 6155 #define IPSEC_FRAG_HASH_SLOTS (1<<8)
6151 6156 #define IPSEC_FRAG_HASH_FACTOR 4
6152 6157 #define IPSEC_FRAG_HASH_SIZE (IPSEC_FRAG_HASH_SLOTS * IPSEC_FRAG_HASH_FACTOR)
6153 6158
6154 6159 #define IPSEC_FRAG_HASH_MASK (IPSEC_FRAG_HASH_SLOTS - 1)
6155 6160 #define IPSEC_FRAG_HASH_FUNC(id) (((id) & IPSEC_FRAG_HASH_MASK) ^ \
6156 6161 (((id) / \
6157 6162 (ushort_t)IPSEC_FRAG_HASH_SLOTS) & \
6158 6163 IPSEC_FRAG_HASH_MASK))
6159 6164
6160 6165 /* Maximum fragments per packet. 48 bytes payload x 1366 packets > 64KB */
6161 6166 #define IPSEC_MAX_FRAGS 1366
6162 6167
6163 6168 #define V4_FRAG_OFFSET(ipha) ((ntohs(ipha->ipha_fragment_offset_and_flags) & \
6164 6169 IPH_OFFSET) << 3)
6165 6170 #define V4_MORE_FRAGS(ipha) (ntohs(ipha->ipha_fragment_offset_and_flags) & \
6166 6171 IPH_MF)
6167 6172
6168 6173 /*
6169 6174 * Initialize an ipsec fragcache instance.
6170 6175 * Returns B_FALSE if memory allocation fails.
6171 6176 */
6172 6177 boolean_t
6173 6178 ipsec_fragcache_init(ipsec_fragcache_t *frag)
6174 6179 {
6175 6180 ipsec_fragcache_entry_t *ftemp;
6176 6181 int i;
6177 6182
6178 6183 mutex_init(&frag->itpf_lock, NULL, MUTEX_DEFAULT, NULL);
6179 6184 frag->itpf_ptr = (ipsec_fragcache_entry_t **)
6180 6185 kmem_zalloc(sizeof (ipsec_fragcache_entry_t *) *
6181 6186 IPSEC_FRAG_HASH_SLOTS, KM_NOSLEEP);
6182 6187 if (frag->itpf_ptr == NULL)
6183 6188 return (B_FALSE);
6184 6189
6185 6190 ftemp = (ipsec_fragcache_entry_t *)
6186 6191 kmem_zalloc(sizeof (ipsec_fragcache_entry_t) *
6187 6192 IPSEC_FRAG_HASH_SIZE, KM_NOSLEEP);
6188 6193 if (ftemp == NULL) {
6189 6194 kmem_free(frag->itpf_ptr, sizeof (ipsec_fragcache_entry_t *) *
6190 6195 IPSEC_FRAG_HASH_SLOTS);
6191 6196 return (B_FALSE);
6192 6197 }
6193 6198
6194 6199 frag->itpf_freelist = NULL;
6195 6200
6196 6201 for (i = 0; i < IPSEC_FRAG_HASH_SIZE; i++) {
6197 6202 ftemp->itpfe_next = frag->itpf_freelist;
6198 6203 frag->itpf_freelist = ftemp;
6199 6204 ftemp++;
6200 6205 }
6201 6206
6202 6207 frag->itpf_expire_hint = 0;
6203 6208
6204 6209 return (B_TRUE);
6205 6210 }
6206 6211
6207 6212 void
6208 6213 ipsec_fragcache_uninit(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6209 6214 {
6210 6215 ipsec_fragcache_entry_t *fep;
6211 6216 int i;
6212 6217
6213 6218 mutex_enter(&frag->itpf_lock);
6214 6219 if (frag->itpf_ptr) {
6215 6220 /* Delete any existing fragcache entry chains */
6216 6221 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6217 6222 fep = (frag->itpf_ptr)[i];
6218 6223 while (fep != NULL) {
6219 6224 /* Returned fep is next in chain or NULL */
6220 6225 fep = fragcache_delentry(i, fep, frag, ipss);
6221 6226 }
6222 6227 }
6223 6228 /*
6224 6229 * Chase the pointers back to the beginning
6225 6230 * of the memory allocation and then
6226 6231 * get rid of the allocated freelist
6227 6232 */
6228 6233 while (frag->itpf_freelist->itpfe_next != NULL)
6229 6234 frag->itpf_freelist = frag->itpf_freelist->itpfe_next;
6230 6235 /*
6231 6236 * XXX - If we ever dynamically grow the freelist
6232 6237 * then we'll have to free entries individually
6233 6238 * or determine how many entries or chunks we have
6234 6239 * grown since the initial allocation.
6235 6240 */
6236 6241 kmem_free(frag->itpf_freelist,
6237 6242 sizeof (ipsec_fragcache_entry_t) *
6238 6243 IPSEC_FRAG_HASH_SIZE);
6239 6244 /* Free the fragcache structure */
6240 6245 kmem_free(frag->itpf_ptr,
6241 6246 sizeof (ipsec_fragcache_entry_t *) *
6242 6247 IPSEC_FRAG_HASH_SLOTS);
6243 6248 }
6244 6249 mutex_exit(&frag->itpf_lock);
6245 6250 mutex_destroy(&frag->itpf_lock);
6246 6251 }
6247 6252
6248 6253 /*
6249 6254 * Add a fragment to the fragment cache. Consumes mp if NULL is returned.
6250 6255 * Returns mp if a whole fragment has been assembled, NULL otherwise
6251 6256 * The returned mp could be a b_next chain of fragments.
6252 6257 *
6253 6258 * The iramp argument is set on inbound; NULL if outbound.
6254 6259 */
6255 6260 mblk_t *
6256 6261 ipsec_fragcache_add(ipsec_fragcache_t *frag, mblk_t *iramp, mblk_t *mp,
6257 6262 int outer_hdr_len, ipsec_stack_t *ipss)
6258 6263 {
6259 6264 boolean_t is_v4;
6260 6265 time_t itpf_time;
6261 6266 ipha_t *iph;
6262 6267 ipha_t *oiph;
6263 6268 ip6_t *ip6h = NULL;
6264 6269 uint8_t v6_proto;
6265 6270 uint8_t *v6_proto_p;
6266 6271 uint16_t ip6_hdr_length;
6267 6272 ip_pkt_t ipp;
6268 6273 ip6_frag_t *fraghdr;
6269 6274 ipsec_fragcache_entry_t *fep;
6270 6275 int i;
6271 6276 mblk_t *nmp, *prevmp;
6272 6277 int firstbyte, lastbyte;
6273 6278 int offset;
6274 6279 int last;
6275 6280 boolean_t inbound = (iramp != NULL);
6276 6281
6277 6282 #ifdef FRAGCACHE_DEBUG
6278 6283 cmn_err(CE_WARN, "Fragcache: %s\n", inbound ? "INBOUND" : "OUTBOUND");
6279 6284 #endif
6280 6285 /*
6281 6286 * You're on the slow path, so insure that every packet in the
6282 6287 * cache is a single-mblk one.
6283 6288 */
6284 6289 if (mp->b_cont != NULL) {
6285 6290 nmp = msgpullup(mp, -1);
6286 6291 if (nmp == NULL) {
6287 6292 ip_drop_packet(mp, inbound, NULL,
6288 6293 DROPPER(ipss, ipds_spd_nomem),
6289 6294 &ipss->ipsec_spd_dropper);
6290 6295 if (inbound)
6291 6296 (void) ip_recv_attr_free_mblk(iramp);
6292 6297 return (NULL);
6293 6298 }
6294 6299 freemsg(mp);
6295 6300 mp = nmp;
6296 6301 }
6297 6302
6298 6303 mutex_enter(&frag->itpf_lock);
6299 6304
6300 6305 oiph = (ipha_t *)mp->b_rptr;
6301 6306 iph = (ipha_t *)(mp->b_rptr + outer_hdr_len);
6302 6307
6303 6308 if (IPH_HDR_VERSION(iph) == IPV4_VERSION) {
6304 6309 is_v4 = B_TRUE;
6305 6310 } else {
6306 6311 ASSERT(IPH_HDR_VERSION(iph) == IPV6_VERSION);
6307 6312 ip6h = (ip6_t *)(mp->b_rptr + outer_hdr_len);
6308 6313
6309 6314 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip6_hdr_length,
6310 6315 &v6_proto_p)) {
6311 6316 /*
6312 6317 * Find upper layer protocol.
6313 6318 * If it fails we have a malformed packet
6314 6319 */
6315 6320 mutex_exit(&frag->itpf_lock);
6316 6321 ip_drop_packet(mp, inbound, NULL,
6317 6322 DROPPER(ipss, ipds_spd_malformed_packet),
6318 6323 &ipss->ipsec_spd_dropper);
6319 6324 if (inbound)
6320 6325 (void) ip_recv_attr_free_mblk(iramp);
6321 6326 return (NULL);
6322 6327 } else {
6323 6328 v6_proto = *v6_proto_p;
6324 6329 }
6325 6330
6326 6331
6327 6332 bzero(&ipp, sizeof (ipp));
6328 6333 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
6329 6334 if (!(ipp.ipp_fields & IPPF_FRAGHDR)) {
6330 6335 /*
6331 6336 * We think this is a fragment, but didn't find
6332 6337 * a fragment header. Something is wrong.
6333 6338 */
6334 6339 mutex_exit(&frag->itpf_lock);
6335 6340 ip_drop_packet(mp, inbound, NULL,
6336 6341 DROPPER(ipss, ipds_spd_malformed_frag),
6337 6342 &ipss->ipsec_spd_dropper);
6338 6343 if (inbound)
6339 6344 (void) ip_recv_attr_free_mblk(iramp);
6340 6345 return (NULL);
6341 6346 }
6342 6347 fraghdr = ipp.ipp_fraghdr;
6343 6348 is_v4 = B_FALSE;
6344 6349 }
6345 6350
6346 6351 /* Anything to cleanup? */
6347 6352
6348 6353 /*
6349 6354 * This cleanup call could be put in a timer loop
6350 6355 * but it may actually be just as reasonable a decision to
6351 6356 * leave it here. The disadvantage is this only gets called when
6352 6357 * frags are added. The advantage is that it is not
6353 6358 * susceptible to race conditions like a time-based cleanup
6354 6359 * may be.
6355 6360 */
6356 6361 itpf_time = gethrestime_sec();
6357 6362 if (itpf_time >= frag->itpf_expire_hint)
6358 6363 ipsec_fragcache_clean(frag, ipss);
6359 6364
6360 6365 /* Lookup to see if there is an existing entry */
6361 6366
6362 6367 if (is_v4)
6363 6368 i = IPSEC_FRAG_HASH_FUNC(iph->ipha_ident);
6364 6369 else
6365 6370 i = IPSEC_FRAG_HASH_FUNC(fraghdr->ip6f_ident);
6366 6371
6367 6372 for (fep = (frag->itpf_ptr)[i]; fep; fep = fep->itpfe_next) {
6368 6373 if (is_v4) {
6369 6374 ASSERT(iph != NULL);
6370 6375 if ((fep->itpfe_id == iph->ipha_ident) &&
6371 6376 (fep->itpfe_src == iph->ipha_src) &&
6372 6377 (fep->itpfe_dst == iph->ipha_dst) &&
6373 6378 (fep->itpfe_proto == iph->ipha_protocol))
6374 6379 break;
6375 6380 } else {
6376 6381 ASSERT(fraghdr != NULL);
6377 6382 ASSERT(fep != NULL);
6378 6383 if ((fep->itpfe_id == fraghdr->ip6f_ident) &&
6379 6384 IN6_ARE_ADDR_EQUAL(&fep->itpfe_src6,
6380 6385 &ip6h->ip6_src) &&
6381 6386 IN6_ARE_ADDR_EQUAL(&fep->itpfe_dst6,
6382 6387 &ip6h->ip6_dst) && (fep->itpfe_proto == v6_proto))
6383 6388 break;
6384 6389 }
6385 6390 }
6386 6391
6387 6392 if (is_v4) {
6388 6393 firstbyte = V4_FRAG_OFFSET(iph);
6389 6394 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6390 6395 IPH_HDR_LENGTH(iph);
6391 6396 last = (V4_MORE_FRAGS(iph) == 0);
6392 6397 #ifdef FRAGCACHE_DEBUG
6393 6398 cmn_err(CE_WARN, "V4 fragcache: firstbyte = %d, lastbyte = %d, "
6394 6399 "is_last_frag = %d, id = %d, mp = %p\n", firstbyte,
6395 6400 lastbyte, last, iph->ipha_ident, mp);
6396 6401 #endif
6397 6402 } else {
6398 6403 firstbyte = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
6399 6404 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6400 6405 sizeof (ip6_t) - ip6_hdr_length;
6401 6406 last = (fraghdr->ip6f_offlg & IP6F_MORE_FRAG) == 0;
6402 6407 #ifdef FRAGCACHE_DEBUG
6403 6408 cmn_err(CE_WARN, "V6 fragcache: firstbyte = %d, lastbyte = %d, "
6404 6409 "is_last_frag = %d, id = %d, fraghdr = %p, mp = %p\n",
6405 6410 firstbyte, lastbyte, last, fraghdr->ip6f_ident, fraghdr,
6406 6411 mp);
6407 6412 #endif
6408 6413 }
6409 6414
6410 6415 /* check for bogus fragments and delete the entry */
6411 6416 if (firstbyte > 0 && firstbyte <= 8) {
6412 6417 if (fep != NULL)
6413 6418 (void) fragcache_delentry(i, fep, frag, ipss);
6414 6419 mutex_exit(&frag->itpf_lock);
6415 6420 ip_drop_packet(mp, inbound, NULL,
6416 6421 DROPPER(ipss, ipds_spd_malformed_frag),
6417 6422 &ipss->ipsec_spd_dropper);
6418 6423 if (inbound)
6419 6424 (void) ip_recv_attr_free_mblk(iramp);
6420 6425 return (NULL);
6421 6426 }
6422 6427
6423 6428 /* Not found, allocate a new entry */
6424 6429 if (fep == NULL) {
6425 6430 if (frag->itpf_freelist == NULL) {
6426 6431 /* see if there is some space */
6427 6432 ipsec_fragcache_clean(frag, ipss);
6428 6433 if (frag->itpf_freelist == NULL) {
6429 6434 mutex_exit(&frag->itpf_lock);
6430 6435 ip_drop_packet(mp, inbound, NULL,
6431 6436 DROPPER(ipss, ipds_spd_nomem),
6432 6437 &ipss->ipsec_spd_dropper);
6433 6438 if (inbound)
6434 6439 (void) ip_recv_attr_free_mblk(iramp);
6435 6440 return (NULL);
6436 6441 }
6437 6442 }
6438 6443
6439 6444 fep = frag->itpf_freelist;
6440 6445 frag->itpf_freelist = fep->itpfe_next;
6441 6446
6442 6447 if (is_v4) {
6443 6448 bcopy((caddr_t)&iph->ipha_src, (caddr_t)&fep->itpfe_src,
6444 6449 sizeof (struct in_addr));
6445 6450 bcopy((caddr_t)&iph->ipha_dst, (caddr_t)&fep->itpfe_dst,
6446 6451 sizeof (struct in_addr));
6447 6452 fep->itpfe_id = iph->ipha_ident;
6448 6453 fep->itpfe_proto = iph->ipha_protocol;
6449 6454 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6450 6455 } else {
6451 6456 bcopy((in6_addr_t *)&ip6h->ip6_src,
6452 6457 (in6_addr_t *)&fep->itpfe_src6,
6453 6458 sizeof (struct in6_addr));
6454 6459 bcopy((in6_addr_t *)&ip6h->ip6_dst,
6455 6460 (in6_addr_t *)&fep->itpfe_dst6,
6456 6461 sizeof (struct in6_addr));
6457 6462 fep->itpfe_id = fraghdr->ip6f_ident;
6458 6463 fep->itpfe_proto = v6_proto;
6459 6464 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6460 6465 }
6461 6466 itpf_time = gethrestime_sec();
6462 6467 fep->itpfe_exp = itpf_time + IPSEC_FRAG_TTL_MAX + 1;
6463 6468 fep->itpfe_last = 0;
6464 6469 fep->itpfe_fraglist = NULL;
6465 6470 fep->itpfe_depth = 0;
6466 6471 fep->itpfe_next = (frag->itpf_ptr)[i];
6467 6472 (frag->itpf_ptr)[i] = fep;
6468 6473
6469 6474 if (frag->itpf_expire_hint > fep->itpfe_exp)
6470 6475 frag->itpf_expire_hint = fep->itpfe_exp;
6471 6476
6472 6477 }
6473 6478
6474 6479 /* Insert it in the frag list */
6475 6480 /* List is in order by starting offset of fragments */
6476 6481
6477 6482 prevmp = NULL;
6478 6483 for (nmp = fep->itpfe_fraglist; nmp; nmp = nmp->b_next) {
6479 6484 ipha_t *niph;
6480 6485 ipha_t *oniph;
6481 6486 ip6_t *nip6h;
6482 6487 ip_pkt_t nipp;
6483 6488 ip6_frag_t *nfraghdr;
6484 6489 uint16_t nip6_hdr_length;
6485 6490 uint8_t *nv6_proto_p;
6486 6491 int nfirstbyte, nlastbyte;
6487 6492 char *data, *ndata;
6488 6493 mblk_t *ndata_mp = (inbound ? nmp->b_cont : nmp);
6489 6494 int hdr_len;
6490 6495
6491 6496 oniph = (ipha_t *)mp->b_rptr;
6492 6497 nip6h = NULL;
6493 6498 niph = NULL;
6494 6499
6495 6500 /*
6496 6501 * Determine outer header type and length and set
6497 6502 * pointers appropriately
6498 6503 */
6499 6504
6500 6505 if (IPH_HDR_VERSION(oniph) == IPV4_VERSION) {
6501 6506 hdr_len = ((outer_hdr_len != 0) ?
6502 6507 IPH_HDR_LENGTH(oiph) : 0);
6503 6508 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6504 6509 } else {
6505 6510 ASSERT(IPH_HDR_VERSION(oniph) == IPV6_VERSION);
6506 6511 ASSERT(ndata_mp->b_cont == NULL);
6507 6512 nip6h = (ip6_t *)ndata_mp->b_rptr;
6508 6513 (void) ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6509 6514 &nip6_hdr_length, &v6_proto_p);
6510 6515 hdr_len = ((outer_hdr_len != 0) ? nip6_hdr_length : 0);
6511 6516 }
6512 6517
6513 6518 /*
6514 6519 * Determine inner header type and length and set
6515 6520 * pointers appropriately
6516 6521 */
6517 6522
6518 6523 if (is_v4) {
6519 6524 if (niph == NULL) {
6520 6525 /* Was v6 outer */
6521 6526 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6522 6527 }
6523 6528 nfirstbyte = V4_FRAG_OFFSET(niph);
6524 6529 nlastbyte = nfirstbyte + ntohs(niph->ipha_length) -
6525 6530 IPH_HDR_LENGTH(niph);
6526 6531 } else {
6527 6532 ASSERT(ndata_mp->b_cont == NULL);
6528 6533 nip6h = (ip6_t *)(ndata_mp->b_rptr + hdr_len);
6529 6534 if (!ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6530 6535 &nip6_hdr_length, &nv6_proto_p)) {
6531 6536 mutex_exit(&frag->itpf_lock);
6532 6537 ip_drop_packet_chain(nmp, inbound, NULL,
6533 6538 DROPPER(ipss, ipds_spd_malformed_frag),
6534 6539 &ipss->ipsec_spd_dropper);
6535 6540 ipsec_freemsg_chain(ndata_mp);
6536 6541 if (inbound)
6537 6542 (void) ip_recv_attr_free_mblk(iramp);
6538 6543 return (NULL);
6539 6544 }
6540 6545 bzero(&nipp, sizeof (nipp));
6541 6546 (void) ip_find_hdr_v6(ndata_mp, nip6h, B_FALSE, &nipp,
6542 6547 NULL);
6543 6548 nfraghdr = nipp.ipp_fraghdr;
6544 6549 nfirstbyte = ntohs(nfraghdr->ip6f_offlg &
6545 6550 IP6F_OFF_MASK);
6546 6551 nlastbyte = nfirstbyte + ntohs(nip6h->ip6_plen) +
6547 6552 sizeof (ip6_t) - nip6_hdr_length;
6548 6553 }
6549 6554
6550 6555 /* Check for overlapping fragments */
6551 6556 if (firstbyte >= nfirstbyte && firstbyte < nlastbyte) {
6552 6557 /*
6553 6558 * Overlap Check:
6554 6559 * ~~~~--------- # Check if the newly
6555 6560 * ~ ndata_mp| # received fragment
6556 6561 * ~~~~--------- # overlaps with the
6557 6562 * ---------~~~~~~ # current fragment.
6558 6563 * | mp ~
6559 6564 * ---------~~~~~~
6560 6565 */
6561 6566 if (is_v4) {
6562 6567 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6563 6568 firstbyte - nfirstbyte;
6564 6569 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6565 6570 } else {
6566 6571 data = (char *)ip6h +
6567 6572 nip6_hdr_length + firstbyte -
6568 6573 nfirstbyte;
6569 6574 ndata = (char *)nip6h + nip6_hdr_length;
6570 6575 }
6571 6576 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte) -
6572 6577 firstbyte)) {
6573 6578 /* Overlapping data does not match */
6574 6579 (void) fragcache_delentry(i, fep, frag, ipss);
6575 6580 mutex_exit(&frag->itpf_lock);
6576 6581 ip_drop_packet(mp, inbound, NULL,
6577 6582 DROPPER(ipss, ipds_spd_overlap_frag),
6578 6583 &ipss->ipsec_spd_dropper);
6579 6584 if (inbound)
6580 6585 (void) ip_recv_attr_free_mblk(iramp);
6581 6586 return (NULL);
6582 6587 }
6583 6588 /* Part of defense for jolt2.c fragmentation attack */
6584 6589 if (firstbyte >= nfirstbyte && lastbyte <= nlastbyte) {
6585 6590 /*
6586 6591 * Check for identical or subset fragments:
6587 6592 * ---------- ~~~~--------~~~~~
6588 6593 * | nmp | or ~ nmp ~
6589 6594 * ---------- ~~~~--------~~~~~
6590 6595 * ---------- ------
6591 6596 * | mp | | mp |
6592 6597 * ---------- ------
6593 6598 */
6594 6599 mutex_exit(&frag->itpf_lock);
6595 6600 ip_drop_packet(mp, inbound, NULL,
6596 6601 DROPPER(ipss, ipds_spd_evil_frag),
6597 6602 &ipss->ipsec_spd_dropper);
6598 6603 if (inbound)
6599 6604 (void) ip_recv_attr_free_mblk(iramp);
6600 6605 return (NULL);
6601 6606 }
6602 6607
6603 6608 }
6604 6609
6605 6610 /* Correct location for this fragment? */
6606 6611 if (firstbyte <= nfirstbyte) {
6607 6612 /*
6608 6613 * Check if the tail end of the new fragment overlaps
6609 6614 * with the head of the current fragment.
6610 6615 * --------~~~~~~~
6611 6616 * | nmp ~
6612 6617 * --------~~~~~~~
6613 6618 * ~~~~~--------
6614 6619 * ~ mp |
6615 6620 * ~~~~~--------
6616 6621 */
6617 6622 if (lastbyte > nfirstbyte) {
6618 6623 /* Fragments overlap */
6619 6624 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6620 6625 firstbyte - nfirstbyte;
6621 6626 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6622 6627 if (is_v4) {
6623 6628 data = (char *)iph +
6624 6629 IPH_HDR_LENGTH(iph) + firstbyte -
6625 6630 nfirstbyte;
6626 6631 ndata = (char *)niph +
6627 6632 IPH_HDR_LENGTH(niph);
6628 6633 } else {
6629 6634 data = (char *)ip6h +
6630 6635 nip6_hdr_length + firstbyte -
6631 6636 nfirstbyte;
6632 6637 ndata = (char *)nip6h + nip6_hdr_length;
6633 6638 }
6634 6639 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte)
6635 6640 - nfirstbyte)) {
6636 6641 /* Overlap mismatch */
6637 6642 (void) fragcache_delentry(i, fep, frag,
6638 6643 ipss);
6639 6644 mutex_exit(&frag->itpf_lock);
6640 6645 ip_drop_packet(mp, inbound, NULL,
6641 6646 DROPPER(ipss,
6642 6647 ipds_spd_overlap_frag),
6643 6648 &ipss->ipsec_spd_dropper);
6644 6649 if (inbound) {
6645 6650 (void) ip_recv_attr_free_mblk(
6646 6651 iramp);
6647 6652 }
6648 6653 return (NULL);
6649 6654 }
6650 6655 }
6651 6656
6652 6657 /*
6653 6658 * Fragment does not illegally overlap and can now
6654 6659 * be inserted into the chain
6655 6660 */
6656 6661 break;
6657 6662 }
6658 6663
6659 6664 prevmp = nmp;
6660 6665 }
6661 6666 /* Prepend the attributes before we link it in */
6662 6667 if (iramp != NULL) {
6663 6668 ASSERT(iramp->b_cont == NULL);
6664 6669 iramp->b_cont = mp;
6665 6670 mp = iramp;
6666 6671 iramp = NULL;
6667 6672 }
6668 6673 mp->b_next = nmp;
6669 6674
6670 6675 if (prevmp == NULL) {
6671 6676 fep->itpfe_fraglist = mp;
6672 6677 } else {
6673 6678 prevmp->b_next = mp;
6674 6679 }
6675 6680 if (last)
6676 6681 fep->itpfe_last = 1;
6677 6682
6678 6683 /* Part of defense for jolt2.c fragmentation attack */
6679 6684 if (++(fep->itpfe_depth) > IPSEC_MAX_FRAGS) {
6680 6685 (void) fragcache_delentry(i, fep, frag, ipss);
6681 6686 mutex_exit(&frag->itpf_lock);
6682 6687 if (inbound)
6683 6688 mp = ip_recv_attr_free_mblk(mp);
6684 6689
6685 6690 ip_drop_packet(mp, inbound, NULL,
6686 6691 DROPPER(ipss, ipds_spd_max_frags),
6687 6692 &ipss->ipsec_spd_dropper);
6688 6693 return (NULL);
6689 6694 }
6690 6695
6691 6696 /* Check for complete packet */
6692 6697
6693 6698 if (!fep->itpfe_last) {
6694 6699 mutex_exit(&frag->itpf_lock);
6695 6700 #ifdef FRAGCACHE_DEBUG
6696 6701 cmn_err(CE_WARN, "Fragment cached, last not yet seen.\n");
6697 6702 #endif
6698 6703 return (NULL);
6699 6704 }
6700 6705
6701 6706 offset = 0;
6702 6707 for (mp = fep->itpfe_fraglist; mp; mp = mp->b_next) {
6703 6708 mblk_t *data_mp = (inbound ? mp->b_cont : mp);
6704 6709 int hdr_len;
6705 6710
6706 6711 oiph = (ipha_t *)data_mp->b_rptr;
6707 6712 ip6h = NULL;
6708 6713 iph = NULL;
6709 6714
6710 6715 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
6711 6716 hdr_len = ((outer_hdr_len != 0) ?
6712 6717 IPH_HDR_LENGTH(oiph) : 0);
6713 6718 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6714 6719 } else {
6715 6720 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
6716 6721 ASSERT(data_mp->b_cont == NULL);
6717 6722 ip6h = (ip6_t *)data_mp->b_rptr;
6718 6723 (void) ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6719 6724 &ip6_hdr_length, &v6_proto_p);
6720 6725 hdr_len = ((outer_hdr_len != 0) ? ip6_hdr_length : 0);
6721 6726 }
6722 6727
6723 6728 /* Calculate current fragment start/end */
6724 6729 if (is_v4) {
6725 6730 if (iph == NULL) {
6726 6731 /* Was v6 outer */
6727 6732 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6728 6733 }
6729 6734 firstbyte = V4_FRAG_OFFSET(iph);
6730 6735 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6731 6736 IPH_HDR_LENGTH(iph);
6732 6737 } else {
6733 6738 ASSERT(data_mp->b_cont == NULL);
6734 6739 ip6h = (ip6_t *)(data_mp->b_rptr + hdr_len);
6735 6740 if (!ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6736 6741 &ip6_hdr_length, &v6_proto_p)) {
6737 6742 mutex_exit(&frag->itpf_lock);
6738 6743 ip_drop_packet_chain(mp, inbound, NULL,
6739 6744 DROPPER(ipss, ipds_spd_malformed_frag),
6740 6745 &ipss->ipsec_spd_dropper);
6741 6746 return (NULL);
6742 6747 }
6743 6748 v6_proto = *v6_proto_p;
6744 6749 bzero(&ipp, sizeof (ipp));
6745 6750 (void) ip_find_hdr_v6(data_mp, ip6h, B_FALSE, &ipp,
6746 6751 NULL);
6747 6752 fraghdr = ipp.ipp_fraghdr;
6748 6753 firstbyte = ntohs(fraghdr->ip6f_offlg &
6749 6754 IP6F_OFF_MASK);
6750 6755 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6751 6756 sizeof (ip6_t) - ip6_hdr_length;
6752 6757 }
6753 6758
6754 6759 /*
6755 6760 * If this fragment is greater than current offset,
6756 6761 * we have a missing fragment so return NULL
6757 6762 */
6758 6763 if (firstbyte > offset) {
6759 6764 mutex_exit(&frag->itpf_lock);
6760 6765 #ifdef FRAGCACHE_DEBUG
6761 6766 /*
6762 6767 * Note, this can happen when the last frag
6763 6768 * gets sent through because it is smaller
6764 6769 * than the MTU. It is not necessarily an
6765 6770 * error condition.
6766 6771 */
6767 6772 cmn_err(CE_WARN, "Frag greater than offset! : "
6768 6773 "missing fragment: firstbyte = %d, offset = %d, "
6769 6774 "mp = %p\n", firstbyte, offset, mp);
6770 6775 #endif
6771 6776 return (NULL);
6772 6777 }
6773 6778 #ifdef FRAGCACHE_DEBUG
6774 6779 cmn_err(CE_WARN, "Frag offsets : "
6775 6780 "firstbyte = %d, offset = %d, mp = %p\n",
6776 6781 firstbyte, offset, mp);
6777 6782 #endif
6778 6783
6779 6784 /*
6780 6785 * If we are at the last fragment, we have the complete
6781 6786 * packet, so rechain things and return it to caller
6782 6787 * for processing
6783 6788 */
6784 6789
6785 6790 if ((is_v4 && !V4_MORE_FRAGS(iph)) ||
6786 6791 (!is_v4 && !(fraghdr->ip6f_offlg & IP6F_MORE_FRAG))) {
6787 6792 mp = fep->itpfe_fraglist;
6788 6793 fep->itpfe_fraglist = NULL;
6789 6794 (void) fragcache_delentry(i, fep, frag, ipss);
6790 6795 mutex_exit(&frag->itpf_lock);
6791 6796
6792 6797 if ((is_v4 && (firstbyte + ntohs(iph->ipha_length) >
6793 6798 65535)) || (!is_v4 && (firstbyte +
6794 6799 ntohs(ip6h->ip6_plen) > 65535))) {
6795 6800 /* It is an invalid "ping-o-death" packet */
6796 6801 /* Discard it */
6797 6802 ip_drop_packet_chain(mp, inbound, NULL,
6798 6803 DROPPER(ipss, ipds_spd_evil_frag),
6799 6804 &ipss->ipsec_spd_dropper);
6800 6805 return (NULL);
6801 6806 }
6802 6807 #ifdef FRAGCACHE_DEBUG
6803 6808 cmn_err(CE_WARN, "Fragcache returning mp = %p, "
6804 6809 "mp->b_next = %p", mp, mp->b_next);
6805 6810 #endif
6806 6811 /*
6807 6812 * For inbound case, mp has attrmp b_next'd chain
6808 6813 * For outbound case, it is just data mp chain
6809 6814 */
6810 6815 return (mp);
6811 6816 }
6812 6817
6813 6818 /*
6814 6819 * Update new ending offset if this
6815 6820 * fragment extends the packet
6816 6821 */
6817 6822 if (offset < lastbyte)
6818 6823 offset = lastbyte;
6819 6824 }
6820 6825
6821 6826 mutex_exit(&frag->itpf_lock);
6822 6827
6823 6828 /* Didn't find last fragment, so return NULL */
6824 6829 return (NULL);
6825 6830 }
6826 6831
6827 6832 static void
6828 6833 ipsec_fragcache_clean(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6829 6834 {
6830 6835 ipsec_fragcache_entry_t *fep;
6831 6836 int i;
6832 6837 ipsec_fragcache_entry_t *earlyfep = NULL;
6833 6838 time_t itpf_time;
6834 6839 int earlyexp;
6835 6840 int earlyi = 0;
6836 6841
6837 6842 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6838 6843
6839 6844 itpf_time = gethrestime_sec();
6840 6845 earlyexp = itpf_time + 10000;
6841 6846
6842 6847 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6843 6848 fep = (frag->itpf_ptr)[i];
6844 6849 while (fep) {
6845 6850 if (fep->itpfe_exp < itpf_time) {
6846 6851 /* found */
6847 6852 fep = fragcache_delentry(i, fep, frag, ipss);
6848 6853 } else {
6849 6854 if (fep->itpfe_exp < earlyexp) {
6850 6855 earlyfep = fep;
6851 6856 earlyexp = fep->itpfe_exp;
6852 6857 earlyi = i;
6853 6858 }
6854 6859 fep = fep->itpfe_next;
6855 6860 }
6856 6861 }
6857 6862 }
6858 6863
6859 6864 frag->itpf_expire_hint = earlyexp;
6860 6865
6861 6866 /* if (!found) */
6862 6867 if (frag->itpf_freelist == NULL)
6863 6868 (void) fragcache_delentry(earlyi, earlyfep, frag, ipss);
6864 6869 }
6865 6870
6866 6871 static ipsec_fragcache_entry_t *
6867 6872 fragcache_delentry(int slot, ipsec_fragcache_entry_t *fep,
6868 6873 ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6869 6874 {
6870 6875 ipsec_fragcache_entry_t *targp;
6871 6876 ipsec_fragcache_entry_t *nextp = fep->itpfe_next;
6872 6877
6873 6878 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6874 6879
6875 6880 /* Free up any fragment list still in cache entry */
6876 6881 if (fep->itpfe_fraglist != NULL) {
6877 6882 ip_drop_packet_chain(fep->itpfe_fraglist,
6878 6883 ip_recv_attr_is_mblk(fep->itpfe_fraglist), NULL,
6879 6884 DROPPER(ipss, ipds_spd_expired_frags),
6880 6885 &ipss->ipsec_spd_dropper);
6881 6886 }
6882 6887 fep->itpfe_fraglist = NULL;
6883 6888
6884 6889 targp = (frag->itpf_ptr)[slot];
6885 6890 ASSERT(targp != 0);
6886 6891
6887 6892 if (targp == fep) {
6888 6893 /* unlink from head of hash chain */
6889 6894 (frag->itpf_ptr)[slot] = nextp;
6890 6895 /* link into free list */
6891 6896 fep->itpfe_next = frag->itpf_freelist;
6892 6897 frag->itpf_freelist = fep;
6893 6898 return (nextp);
6894 6899 }
6895 6900
6896 6901 /* maybe should use double linked list to make update faster */
6897 6902 /* must be past front of chain */
6898 6903 while (targp) {
6899 6904 if (targp->itpfe_next == fep) {
6900 6905 /* unlink from hash chain */
6901 6906 targp->itpfe_next = nextp;
6902 6907 /* link into free list */
6903 6908 fep->itpfe_next = frag->itpf_freelist;
6904 6909 frag->itpf_freelist = fep;
6905 6910 return (nextp);
6906 6911 }
6907 6912 targp = targp->itpfe_next;
6908 6913 ASSERT(targp != 0);
6909 6914 }
6910 6915 /* NOTREACHED */
6911 6916 return (NULL);
6912 6917 }
|
↓ open down ↓ |
2777 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX