Print this page
Bayard's initial drop, needs finishing, or at least testing.
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ip/spd.c
+++ new/usr/src/uts/common/inet/ip/spd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
24 25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 + * Copyright (c) 2017, Joyent, Inc.
25 27 */
26 28
27 29 /*
28 30 * IPsec Security Policy Database.
29 31 *
30 32 * This module maintains the SPD and provides routines used by ip and ip6
31 33 * to apply IPsec policy to inbound and outbound datagrams.
32 34 */
33 35
34 36 #include <sys/types.h>
35 37 #include <sys/stream.h>
36 38 #include <sys/stropts.h>
37 39 #include <sys/sysmacros.h>
38 40 #include <sys/strsubr.h>
39 41 #include <sys/strsun.h>
40 42 #include <sys/strlog.h>
41 43 #include <sys/strsun.h>
42 44 #include <sys/cmn_err.h>
43 45 #include <sys/zone.h>
44 46
45 47 #include <sys/systm.h>
46 48 #include <sys/param.h>
47 49 #include <sys/kmem.h>
48 50 #include <sys/ddi.h>
49 51
50 52 #include <sys/crypto/api.h>
51 53
52 54 #include <inet/common.h>
53 55 #include <inet/mi.h>
54 56
55 57 #include <netinet/ip6.h>
56 58 #include <netinet/icmp6.h>
57 59 #include <netinet/udp.h>
58 60
59 61 #include <inet/ip.h>
60 62 #include <inet/ip6.h>
61 63
62 64 #include <net/pfkeyv2.h>
63 65 #include <net/pfpolicy.h>
64 66 #include <inet/sadb.h>
65 67 #include <inet/ipsec_impl.h>
66 68
67 69 #include <inet/ip_impl.h> /* For IP_MOD_ID */
68 70
69 71 #include <inet/ipsecah.h>
70 72 #include <inet/ipsecesp.h>
71 73 #include <inet/ipdrop.h>
72 74 #include <inet/ipclassifier.h>
73 75 #include <inet/iptun.h>
74 76 #include <inet/iptun/iptun_impl.h>
75 77
76 78 static void ipsec_update_present_flags(ipsec_stack_t *);
77 79 static ipsec_act_t *ipsec_act_wildcard_expand(ipsec_act_t *, uint_t *,
78 80 netstack_t *);
79 81 static mblk_t *ipsec_check_ipsecin_policy(mblk_t *, ipsec_policy_t *,
80 82 ipha_t *, ip6_t *, uint64_t, ip_recv_attr_t *, netstack_t *);
81 83 static void ipsec_action_free_table(ipsec_action_t *);
82 84 static void ipsec_action_reclaim(void *);
83 85 static void ipsec_action_reclaim_stack(ipsec_stack_t *);
84 86 static void ipsid_init(netstack_t *);
85 87 static void ipsid_fini(netstack_t *);
86 88
87 89 /* sel_flags values for ipsec_init_inbound_sel(). */
88 90 #define SEL_NONE 0x0000
89 91 #define SEL_PORT_POLICY 0x0001
90 92 #define SEL_IS_ICMP 0x0002
91 93 #define SEL_TUNNEL_MODE 0x0004
92 94 #define SEL_POST_FRAG 0x0008
93 95
94 96 /* Return values for ipsec_init_inbound_sel(). */
95 97 typedef enum { SELRET_NOMEM, SELRET_BADPKT, SELRET_SUCCESS, SELRET_TUNFRAG}
96 98 selret_t;
97 99
98 100 static selret_t ipsec_init_inbound_sel(ipsec_selector_t *, mblk_t *,
99 101 ipha_t *, ip6_t *, uint8_t);
100 102
101 103 static boolean_t ipsec_check_ipsecin_action(ip_recv_attr_t *, mblk_t *,
102 104 struct ipsec_action_s *, ipha_t *ipha, ip6_t *ip6h, const char **,
103 105 kstat_named_t **, netstack_t *);
104 106 static void ipsec_unregister_prov_update(void);
105 107 static void ipsec_prov_update_callback_stack(uint32_t, void *, netstack_t *);
106 108 static boolean_t ipsec_compare_action(ipsec_policy_t *, ipsec_policy_t *);
107 109 static uint32_t selector_hash(ipsec_selector_t *, ipsec_policy_root_t *);
108 110 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
109 111 static void ipsec_kstat_destroy(ipsec_stack_t *);
110 112 static int ipsec_free_tables(ipsec_stack_t *);
111 113 static int tunnel_compare(const void *, const void *);
112 114 static void ipsec_freemsg_chain(mblk_t *);
113 115 static void ip_drop_packet_chain(mblk_t *, boolean_t, ill_t *,
114 116 struct kstat_named *, ipdropper_t *);
115 117 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
116 118 static void ipsec_kstat_destroy(ipsec_stack_t *);
117 119 static int ipsec_free_tables(ipsec_stack_t *);
118 120 static int tunnel_compare(const void *, const void *);
119 121 static void ipsec_freemsg_chain(mblk_t *);
120 122
121 123 /*
122 124 * Selector hash table is statically sized at module load time.
123 125 * we default to 251 buckets, which is the largest prime number under 255
124 126 */
125 127
126 128 #define IPSEC_SPDHASH_DEFAULT 251
127 129
128 130 /* SPD hash-size tunable per tunnel. */
129 131 #define TUN_SPDHASH_DEFAULT 5
130 132
131 133 uint32_t ipsec_spd_hashsize;
132 134 uint32_t tun_spd_hashsize;
133 135
134 136 #define IPSEC_SEL_NOHASH ((uint32_t)(~0))
135 137
136 138 /*
137 139 * Handle global across all stack instances
138 140 */
139 141 static crypto_notify_handle_t prov_update_handle = NULL;
140 142
141 143 static kmem_cache_t *ipsec_action_cache;
142 144 static kmem_cache_t *ipsec_sel_cache;
143 145 static kmem_cache_t *ipsec_pol_cache;
144 146
145 147 /* Frag cache prototypes */
146 148 static void ipsec_fragcache_clean(ipsec_fragcache_t *, ipsec_stack_t *);
147 149 static ipsec_fragcache_entry_t *fragcache_delentry(int,
148 150 ipsec_fragcache_entry_t *, ipsec_fragcache_t *, ipsec_stack_t *);
149 151 boolean_t ipsec_fragcache_init(ipsec_fragcache_t *);
150 152 void ipsec_fragcache_uninit(ipsec_fragcache_t *, ipsec_stack_t *ipss);
151 153 mblk_t *ipsec_fragcache_add(ipsec_fragcache_t *, mblk_t *, mblk_t *,
152 154 int, ipsec_stack_t *);
153 155
154 156 int ipsec_hdr_pullup_needed = 0;
155 157 int ipsec_weird_null_inbound_policy = 0;
156 158
157 159 #define ALGBITS_ROUND_DOWN(x, align) (((x)/(align))*(align))
158 160 #define ALGBITS_ROUND_UP(x, align) ALGBITS_ROUND_DOWN((x)+(align)-1, align)
159 161
160 162 /*
161 163 * Inbound traffic should have matching identities for both SA's.
162 164 */
163 165
164 166 #define SA_IDS_MATCH(sa1, sa2) \
165 167 (((sa1) == NULL) || ((sa2) == NULL) || \
166 168 (((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) && \
167 169 (((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid))))
168 170
169 171 /*
170 172 * IPv6 Fragments
171 173 */
172 174 #define IS_V6_FRAGMENT(ipp) (ipp.ipp_fields & IPPF_FRAGHDR)
173 175
174 176 /*
175 177 * Policy failure messages.
176 178 */
177 179 static char *ipsec_policy_failure_msgs[] = {
178 180
179 181 /* IPSEC_POLICY_NOT_NEEDED */
180 182 "%s: Dropping the datagram because the incoming packet "
181 183 "is %s, but the recipient expects clear; Source %s, "
182 184 "Destination %s.\n",
183 185
184 186 /* IPSEC_POLICY_MISMATCH */
185 187 "%s: Policy Failure for the incoming packet (%s); Source %s, "
186 188 "Destination %s.\n",
187 189
188 190 /* IPSEC_POLICY_AUTH_NOT_NEEDED */
189 191 "%s: Authentication present while not expected in the "
190 192 "incoming %s packet; Source %s, Destination %s.\n",
191 193
192 194 /* IPSEC_POLICY_ENCR_NOT_NEEDED */
193 195 "%s: Encryption present while not expected in the "
194 196 "incoming %s packet; Source %s, Destination %s.\n",
195 197
196 198 /* IPSEC_POLICY_SE_NOT_NEEDED */
197 199 "%s: Self-Encapsulation present while not expected in the "
198 200 "incoming %s packet; Source %s, Destination %s.\n",
199 201 };
200 202
201 203 /*
202 204 * General overviews:
203 205 *
204 206 * Locking:
205 207 *
206 208 * All of the system policy structures are protected by a single
207 209 * rwlock. These structures are threaded in a
208 210 * fairly complex fashion and are not expected to change on a
209 211 * regular basis, so this should not cause scaling/contention
210 212 * problems. As a result, policy checks should (hopefully) be MT-hot.
211 213 *
212 214 * Allocation policy:
213 215 *
214 216 * We use custom kmem cache types for the various
215 217 * bits & pieces of the policy data structures. All allocations
216 218 * use KM_NOSLEEP instead of KM_SLEEP for policy allocation. The
217 219 * policy table is of potentially unbounded size, so we don't
218 220 * want to provide a way to hog all system memory with policy
219 221 * entries..
220 222 */
221 223
222 224 /* Convenient functions for freeing or dropping a b_next linked mblk chain */
223 225
224 226 /* Free all messages in an mblk chain */
225 227 static void
226 228 ipsec_freemsg_chain(mblk_t *mp)
227 229 {
228 230 mblk_t *mpnext;
229 231 while (mp != NULL) {
230 232 ASSERT(mp->b_prev == NULL);
231 233 mpnext = mp->b_next;
232 234 mp->b_next = NULL;
233 235 freemsg(mp);
234 236 mp = mpnext;
235 237 }
236 238 }
237 239
238 240 /*
239 241 * ip_drop all messages in an mblk chain
240 242 * Can handle a b_next chain of ip_recv_attr_t mblks, or just a b_next chain
241 243 * of data.
242 244 */
243 245 static void
244 246 ip_drop_packet_chain(mblk_t *mp, boolean_t inbound, ill_t *ill,
245 247 struct kstat_named *counter, ipdropper_t *who_called)
246 248 {
247 249 mblk_t *mpnext;
248 250 while (mp != NULL) {
249 251 ASSERT(mp->b_prev == NULL);
250 252 mpnext = mp->b_next;
251 253 mp->b_next = NULL;
252 254 if (ip_recv_attr_is_mblk(mp))
253 255 mp = ip_recv_attr_free_mblk(mp);
254 256 ip_drop_packet(mp, inbound, ill, counter, who_called);
255 257 mp = mpnext;
256 258 }
257 259 }
258 260
259 261 /*
260 262 * AVL tree comparison function.
261 263 * the in-kernel avl assumes unique keys for all objects.
262 264 * Since sometimes policy will duplicate rules, we may insert
263 265 * multiple rules with the same rule id, so we need a tie-breaker.
264 266 */
265 267 static int
266 268 ipsec_policy_cmpbyid(const void *a, const void *b)
267 269 {
268 270 const ipsec_policy_t *ipa, *ipb;
269 271 uint64_t idxa, idxb;
270 272
271 273 ipa = (const ipsec_policy_t *)a;
272 274 ipb = (const ipsec_policy_t *)b;
273 275 idxa = ipa->ipsp_index;
274 276 idxb = ipb->ipsp_index;
275 277
276 278 if (idxa < idxb)
277 279 return (-1);
278 280 if (idxa > idxb)
279 281 return (1);
280 282 /*
281 283 * Tie-breaker #1: All installed policy rules have a non-NULL
282 284 * ipsl_sel (selector set), so an entry with a NULL ipsp_sel is not
283 285 * actually in-tree but rather a template node being used in
284 286 * an avl_find query; see ipsec_policy_delete(). This gives us
285 287 * a placeholder in the ordering just before the first entry with
286 288 * a key >= the one we're looking for, so we can walk forward from
287 289 * that point to get the remaining entries with the same id.
288 290 */
289 291 if ((ipa->ipsp_sel == NULL) && (ipb->ipsp_sel != NULL))
290 292 return (-1);
291 293 if ((ipb->ipsp_sel == NULL) && (ipa->ipsp_sel != NULL))
292 294 return (1);
293 295 /*
294 296 * At most one of the arguments to the comparison should have a
295 297 * NULL selector pointer; if not, the tree is broken.
296 298 */
297 299 ASSERT(ipa->ipsp_sel != NULL);
298 300 ASSERT(ipb->ipsp_sel != NULL);
299 301 /*
300 302 * Tie-breaker #2: use the virtual address of the policy node
301 303 * to arbitrarily break ties. Since we use the new tree node in
302 304 * the avl_find() in ipsec_insert_always, the new node will be
303 305 * inserted into the tree in the right place in the sequence.
304 306 */
305 307 if (ipa < ipb)
306 308 return (-1);
307 309 if (ipa > ipb)
308 310 return (1);
309 311 return (0);
310 312 }
311 313
312 314 /*
313 315 * Free what ipsec_alloc_table allocated.
314 316 */
315 317 void
316 318 ipsec_polhead_free_table(ipsec_policy_head_t *iph)
317 319 {
318 320 int dir;
319 321 int i;
320 322
321 323 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
322 324 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
323 325
324 326 if (ipr->ipr_hash == NULL)
325 327 continue;
326 328
327 329 for (i = 0; i < ipr->ipr_nchains; i++) {
328 330 ASSERT(ipr->ipr_hash[i].hash_head == NULL);
329 331 }
330 332 kmem_free(ipr->ipr_hash, ipr->ipr_nchains *
331 333 sizeof (ipsec_policy_hash_t));
332 334 ipr->ipr_hash = NULL;
333 335 }
334 336 }
335 337
336 338 void
337 339 ipsec_polhead_destroy(ipsec_policy_head_t *iph)
338 340 {
339 341 int dir;
340 342
341 343 avl_destroy(&iph->iph_rulebyid);
342 344 rw_destroy(&iph->iph_lock);
343 345
344 346 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
345 347 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
346 348 int chain;
347 349
348 350 for (chain = 0; chain < ipr->ipr_nchains; chain++)
349 351 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
350 352
351 353 }
352 354 ipsec_polhead_free_table(iph);
353 355 }
354 356
355 357 /*
356 358 * Free the IPsec stack instance.
357 359 */
358 360 /* ARGSUSED */
359 361 static void
360 362 ipsec_stack_fini(netstackid_t stackid, void *arg)
361 363 {
362 364 ipsec_stack_t *ipss = (ipsec_stack_t *)arg;
363 365 void *cookie;
364 366 ipsec_tun_pol_t *node;
365 367 netstack_t *ns = ipss->ipsec_netstack;
366 368 int i;
367 369 ipsec_algtype_t algtype;
368 370
369 371 ipsec_loader_destroy(ipss);
370 372
371 373 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
372 374 /*
373 375 * It's possible we can just ASSERT() the tree is empty. After all,
374 376 * we aren't called until IP is ready to unload (and presumably all
375 377 * tunnels have been unplumbed). But we'll play it safe for now, the
376 378 * loop will just exit immediately if it's empty.
377 379 */
378 380 cookie = NULL;
379 381 while ((node = (ipsec_tun_pol_t *)
380 382 avl_destroy_nodes(&ipss->ipsec_tunnel_policies,
381 383 &cookie)) != NULL) {
382 384 ITP_REFRELE(node, ns);
383 385 }
384 386 avl_destroy(&ipss->ipsec_tunnel_policies);
385 387 rw_exit(&ipss->ipsec_tunnel_policy_lock);
386 388 rw_destroy(&ipss->ipsec_tunnel_policy_lock);
387 389
388 390 ipsec_config_flush(ns);
389 391
390 392 ipsec_kstat_destroy(ipss);
391 393
392 394 ip_drop_unregister(&ipss->ipsec_dropper);
393 395
394 396 ip_drop_unregister(&ipss->ipsec_spd_dropper);
395 397 ip_drop_destroy(ipss);
396 398 /*
397 399 * Globals start with ref == 1 to prevent IPPH_REFRELE() from
398 400 * attempting to free them, hence they should have 1 now.
399 401 */
400 402 ipsec_polhead_destroy(&ipss->ipsec_system_policy);
401 403 ASSERT(ipss->ipsec_system_policy.iph_refs == 1);
402 404 ipsec_polhead_destroy(&ipss->ipsec_inactive_policy);
403 405 ASSERT(ipss->ipsec_inactive_policy.iph_refs == 1);
404 406
405 407 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
|
↓ open down ↓ |
371 lines elided |
↑ open up ↑ |
406 408 ipsec_action_free_table(ipss->ipsec_action_hash[i].hash_head);
407 409 ipss->ipsec_action_hash[i].hash_head = NULL;
408 410 mutex_destroy(&(ipss->ipsec_action_hash[i].hash_lock));
409 411 }
410 412
411 413 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
412 414 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
413 415 mutex_destroy(&(ipss->ipsec_sel_hash[i].hash_lock));
414 416 }
415 417
416 - mutex_enter(&ipss->ipsec_alg_lock);
418 + rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
417 419 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype ++) {
418 420 int nalgs = ipss->ipsec_nalgs[algtype];
419 421
420 422 for (i = 0; i < nalgs; i++) {
421 423 if (ipss->ipsec_alglists[algtype][i] != NULL)
422 424 ipsec_alg_unreg(algtype, i, ns);
423 425 }
424 426 }
425 - mutex_exit(&ipss->ipsec_alg_lock);
426 - mutex_destroy(&ipss->ipsec_alg_lock);
427 + rw_exit(&ipss->ipsec_alg_lock);
428 + rw_destroy(&ipss->ipsec_alg_lock);
427 429
428 430 ipsid_gc(ns);
429 431 ipsid_fini(ns);
430 432
431 433 (void) ipsec_free_tables(ipss);
432 434 kmem_free(ipss, sizeof (*ipss));
433 435 }
434 436
435 437 void
436 438 ipsec_policy_g_destroy(void)
437 439 {
438 440 kmem_cache_destroy(ipsec_action_cache);
439 441 kmem_cache_destroy(ipsec_sel_cache);
440 442 kmem_cache_destroy(ipsec_pol_cache);
441 443
442 444 ipsec_unregister_prov_update();
443 445
444 446 netstack_unregister(NS_IPSEC);
445 447 }
446 448
447 449
448 450 /*
449 451 * Free what ipsec_alloc_tables allocated.
450 452 * Called when table allocation fails to free the table.
451 453 */
452 454 static int
453 455 ipsec_free_tables(ipsec_stack_t *ipss)
454 456 {
455 457 int i;
456 458
457 459 if (ipss->ipsec_sel_hash != NULL) {
458 460 for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
459 461 ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
460 462 }
461 463 kmem_free(ipss->ipsec_sel_hash, ipss->ipsec_spd_hashsize *
462 464 sizeof (*ipss->ipsec_sel_hash));
463 465 ipss->ipsec_sel_hash = NULL;
464 466 ipss->ipsec_spd_hashsize = 0;
465 467 }
466 468 ipsec_polhead_free_table(&ipss->ipsec_system_policy);
467 469 ipsec_polhead_free_table(&ipss->ipsec_inactive_policy);
468 470
469 471 return (ENOMEM);
470 472 }
471 473
472 474 /*
473 475 * Attempt to allocate the tables in a single policy head.
474 476 * Return nonzero on failure after cleaning up any work in progress.
475 477 */
476 478 int
477 479 ipsec_alloc_table(ipsec_policy_head_t *iph, int nchains, int kmflag,
478 480 boolean_t global_cleanup, netstack_t *ns)
479 481 {
480 482 int dir;
481 483
482 484 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
483 485 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
484 486
485 487 ipr->ipr_nchains = nchains;
486 488 ipr->ipr_hash = kmem_zalloc(nchains *
487 489 sizeof (ipsec_policy_hash_t), kmflag);
488 490 if (ipr->ipr_hash == NULL)
489 491 return (global_cleanup ?
490 492 ipsec_free_tables(ns->netstack_ipsec) :
491 493 ENOMEM);
492 494 }
493 495 return (0);
494 496 }
495 497
496 498 /*
497 499 * Attempt to allocate the various tables. Return nonzero on failure
498 500 * after cleaning up any work in progress.
499 501 */
500 502 static int
501 503 ipsec_alloc_tables(int kmflag, netstack_t *ns)
502 504 {
503 505 int error;
504 506 ipsec_stack_t *ipss = ns->netstack_ipsec;
505 507
506 508 error = ipsec_alloc_table(&ipss->ipsec_system_policy,
507 509 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
508 510 if (error != 0)
509 511 return (error);
510 512
511 513 error = ipsec_alloc_table(&ipss->ipsec_inactive_policy,
512 514 ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
513 515 if (error != 0)
514 516 return (error);
515 517
516 518 ipss->ipsec_sel_hash = kmem_zalloc(ipss->ipsec_spd_hashsize *
517 519 sizeof (*ipss->ipsec_sel_hash), kmflag);
518 520
519 521 if (ipss->ipsec_sel_hash == NULL)
520 522 return (ipsec_free_tables(ipss));
521 523
522 524 return (0);
523 525 }
524 526
525 527 /*
526 528 * After table allocation, initialize a policy head.
527 529 */
528 530 void
529 531 ipsec_polhead_init(ipsec_policy_head_t *iph, int nchains)
530 532 {
531 533 int dir, chain;
532 534
533 535 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
534 536 avl_create(&iph->iph_rulebyid, ipsec_policy_cmpbyid,
535 537 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
536 538
537 539 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
538 540 ipsec_policy_root_t *ipr = &iph->iph_root[dir];
539 541 ipr->ipr_nchains = nchains;
540 542
541 543 for (chain = 0; chain < nchains; chain++) {
542 544 mutex_init(&(ipr->ipr_hash[chain].hash_lock),
543 545 NULL, MUTEX_DEFAULT, NULL);
544 546 }
545 547 }
546 548 }
547 549
548 550 static boolean_t
549 551 ipsec_kstat_init(ipsec_stack_t *ipss)
550 552 {
551 553 ipss->ipsec_ksp = kstat_create_netstack("ip", 0, "ipsec_stat", "net",
552 554 KSTAT_TYPE_NAMED, sizeof (ipsec_kstats_t) / sizeof (kstat_named_t),
553 555 KSTAT_FLAG_PERSISTENT, ipss->ipsec_netstack->netstack_stackid);
554 556
555 557 if (ipss->ipsec_ksp == NULL || ipss->ipsec_ksp->ks_data == NULL)
556 558 return (B_FALSE);
557 559
558 560 ipss->ipsec_kstats = ipss->ipsec_ksp->ks_data;
559 561
560 562 #define KI(x) kstat_named_init(&ipss->ipsec_kstats->x, #x, KSTAT_DATA_UINT64)
561 563 KI(esp_stat_in_requests);
562 564 KI(esp_stat_in_discards);
563 565 KI(esp_stat_lookup_failure);
564 566 KI(ah_stat_in_requests);
565 567 KI(ah_stat_in_discards);
566 568 KI(ah_stat_lookup_failure);
567 569 KI(sadb_acquire_maxpackets);
568 570 KI(sadb_acquire_qhiwater);
569 571 #undef KI
570 572
571 573 kstat_install(ipss->ipsec_ksp);
572 574 return (B_TRUE);
573 575 }
574 576
575 577 static void
576 578 ipsec_kstat_destroy(ipsec_stack_t *ipss)
577 579 {
578 580 kstat_delete_netstack(ipss->ipsec_ksp,
579 581 ipss->ipsec_netstack->netstack_stackid);
580 582 ipss->ipsec_kstats = NULL;
581 583
582 584 }
583 585
584 586 /*
585 587 * Initialize the IPsec stack instance.
586 588 */
587 589 /* ARGSUSED */
588 590 static void *
589 591 ipsec_stack_init(netstackid_t stackid, netstack_t *ns)
590 592 {
591 593 ipsec_stack_t *ipss;
592 594 int i;
593 595
594 596 ipss = (ipsec_stack_t *)kmem_zalloc(sizeof (*ipss), KM_SLEEP);
595 597 ipss->ipsec_netstack = ns;
596 598
597 599 /*
598 600 * FIXME: netstack_ipsec is used by some of the routines we call
599 601 * below, but it isn't set until this routine returns.
600 602 * Either we introduce optional xxx_stack_alloc() functions
601 603 * that will be called by the netstack framework before xxx_stack_init,
602 604 * or we switch spd.c and sadb.c to operate on ipsec_stack_t
603 605 * (latter has some include file order issues for sadb.h, but makes
604 606 * sense if we merge some of the ipsec related stack_t's together.
605 607 */
606 608 ns->netstack_ipsec = ipss;
607 609
608 610 /*
609 611 * Make two attempts to allocate policy hash tables; try it at
610 612 * the "preferred" size (may be set in /etc/system) first,
611 613 * then fall back to the default size.
612 614 */
613 615 ipss->ipsec_spd_hashsize = (ipsec_spd_hashsize == 0) ?
614 616 IPSEC_SPDHASH_DEFAULT : ipsec_spd_hashsize;
615 617
616 618 if (ipsec_alloc_tables(KM_NOSLEEP, ns) != 0) {
617 619 cmn_err(CE_WARN,
618 620 "Unable to allocate %d entry IPsec policy hash table",
619 621 ipss->ipsec_spd_hashsize);
620 622 ipss->ipsec_spd_hashsize = IPSEC_SPDHASH_DEFAULT;
621 623 cmn_err(CE_WARN, "Falling back to %d entries",
622 624 ipss->ipsec_spd_hashsize);
623 625 (void) ipsec_alloc_tables(KM_SLEEP, ns);
624 626 }
625 627
626 628 /* Just set a default for tunnels. */
627 629 ipss->ipsec_tun_spd_hashsize = (tun_spd_hashsize == 0) ?
628 630 TUN_SPDHASH_DEFAULT : tun_spd_hashsize;
629 631
630 632 ipsid_init(ns);
631 633 /*
632 634 * Globals need ref == 1 to prevent IPPH_REFRELE() from attempting
633 635 * to free them.
634 636 */
635 637 ipss->ipsec_system_policy.iph_refs = 1;
636 638 ipss->ipsec_inactive_policy.iph_refs = 1;
637 639 ipsec_polhead_init(&ipss->ipsec_system_policy,
638 640 ipss->ipsec_spd_hashsize);
639 641 ipsec_polhead_init(&ipss->ipsec_inactive_policy,
640 642 ipss->ipsec_spd_hashsize);
641 643 rw_init(&ipss->ipsec_tunnel_policy_lock, NULL, RW_DEFAULT, NULL);
642 644 avl_create(&ipss->ipsec_tunnel_policies, tunnel_compare,
643 645 sizeof (ipsec_tun_pol_t), 0);
644 646
645 647 ipss->ipsec_next_policy_index = 1;
646 648
647 649 rw_init(&ipss->ipsec_system_policy.iph_lock, NULL, RW_DEFAULT, NULL);
|
↓ open down ↓ |
211 lines elided |
↑ open up ↑ |
648 650 rw_init(&ipss->ipsec_inactive_policy.iph_lock, NULL, RW_DEFAULT, NULL);
649 651
650 652 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++)
651 653 mutex_init(&(ipss->ipsec_action_hash[i].hash_lock),
652 654 NULL, MUTEX_DEFAULT, NULL);
653 655
654 656 for (i = 0; i < ipss->ipsec_spd_hashsize; i++)
655 657 mutex_init(&(ipss->ipsec_sel_hash[i].hash_lock),
656 658 NULL, MUTEX_DEFAULT, NULL);
657 659
658 - mutex_init(&ipss->ipsec_alg_lock, NULL, MUTEX_DEFAULT, NULL);
660 + rw_init(&ipss->ipsec_alg_lock, NULL, RW_DEFAULT, NULL);
659 661 for (i = 0; i < IPSEC_NALGTYPES; i++) {
660 662 ipss->ipsec_nalgs[i] = 0;
661 663 }
662 664
663 665 ip_drop_init(ipss);
664 666 ip_drop_register(&ipss->ipsec_spd_dropper, "IPsec SPD");
665 667
666 668 /* IP's IPsec code calls the packet dropper */
667 669 ip_drop_register(&ipss->ipsec_dropper, "IP IPsec processing");
668 670
669 671 (void) ipsec_kstat_init(ipss);
670 672
671 673 ipsec_loader_init(ipss);
672 674 ipsec_loader_start(ipss);
673 675
674 676 return (ipss);
675 677 }
676 678
677 679 /* Global across all stack instances */
678 680 void
679 681 ipsec_policy_g_init(void)
680 682 {
681 683 ipsec_action_cache = kmem_cache_create("ipsec_actions",
682 684 sizeof (ipsec_action_t), _POINTER_ALIGNMENT, NULL, NULL,
683 685 ipsec_action_reclaim, NULL, NULL, 0);
684 686 ipsec_sel_cache = kmem_cache_create("ipsec_selectors",
685 687 sizeof (ipsec_sel_t), _POINTER_ALIGNMENT, NULL, NULL,
686 688 NULL, NULL, NULL, 0);
687 689 ipsec_pol_cache = kmem_cache_create("ipsec_policy",
688 690 sizeof (ipsec_policy_t), _POINTER_ALIGNMENT, NULL, NULL,
689 691 NULL, NULL, NULL, 0);
690 692
691 693 /*
692 694 * We want to be informed each time a stack is created or
693 695 * destroyed in the kernel, so we can maintain the
694 696 * set of ipsec_stack_t's.
695 697 */
696 698 netstack_register(NS_IPSEC, ipsec_stack_init, NULL, ipsec_stack_fini);
697 699 }
698 700
699 701 /*
700 702 * Sort algorithm lists.
701 703 *
702 704 * I may need to split this based on
703 705 * authentication/encryption, and I may wish to have an administrator
704 706 * configure this list. Hold on to some NDD variables...
705 707 *
706 708 * XXX For now, sort on minimum key size (GAG!). While minimum key size is
707 709 * not the ideal metric, it's the only quantifiable measure available.
708 710 * We need a better metric for sorting algorithms by preference.
709 711 */
710 712 static void
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
711 713 alg_insert_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
712 714 {
713 715 ipsec_stack_t *ipss = ns->netstack_ipsec;
714 716 ipsec_alginfo_t *ai = ipss->ipsec_alglists[at][algid];
715 717 uint8_t holder, swap;
716 718 uint_t i;
717 719 uint_t count = ipss->ipsec_nalgs[at];
718 720 ASSERT(ai != NULL);
719 721 ASSERT(algid == ai->alg_id);
720 722
721 - ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
723 + ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
722 724
723 725 holder = algid;
724 726
725 727 for (i = 0; i < count - 1; i++) {
726 728 ipsec_alginfo_t *alt;
727 729
728 730 alt = ipss->ipsec_alglists[at][ipss->ipsec_sortlist[at][i]];
729 731 /*
730 732 * If you want to give precedence to newly added algs,
731 733 * add the = in the > comparison.
732 734 */
733 735 if ((holder != algid) || (ai->alg_minbits > alt->alg_minbits)) {
734 736 /* Swap sortlist[i] and holder. */
735 737 swap = ipss->ipsec_sortlist[at][i];
736 738 ipss->ipsec_sortlist[at][i] = holder;
737 739 holder = swap;
738 740 ai = alt;
739 741 } /* Else just continue. */
740 742 }
741 743
742 744 /* Store holder in last slot. */
743 745 ipss->ipsec_sortlist[at][i] = holder;
744 746 }
745 747
746 748 /*
747 749 * Remove an algorithm from a sorted algorithm list.
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
748 750 * This should be considerably easier, even with complex sorting.
749 751 */
750 752 static void
751 753 alg_remove_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
752 754 {
753 755 boolean_t copyback = B_FALSE;
754 756 int i;
755 757 ipsec_stack_t *ipss = ns->netstack_ipsec;
756 758 int newcount = ipss->ipsec_nalgs[at];
757 759
758 - ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
760 + ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
759 761
760 762 for (i = 0; i <= newcount; i++) {
761 763 if (copyback) {
762 764 ipss->ipsec_sortlist[at][i-1] =
763 765 ipss->ipsec_sortlist[at][i];
764 766 } else if (ipss->ipsec_sortlist[at][i] == algid) {
765 767 copyback = B_TRUE;
766 768 }
767 769 }
768 770 }
769 771
770 772 /*
771 773 * Add the specified algorithm to the algorithm tables.
772 774 * Must be called while holding the algorithm table writer lock.
773 775 */
774 776 void
775 777 ipsec_alg_reg(ipsec_algtype_t algtype, ipsec_alginfo_t *alg, netstack_t *ns)
776 778 {
777 779 ipsec_stack_t *ipss = ns->netstack_ipsec;
778 780
779 - ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
781 + ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
780 782
781 783 ASSERT(ipss->ipsec_alglists[algtype][alg->alg_id] == NULL);
782 784 ipsec_alg_fix_min_max(alg, algtype, ns);
783 785 ipss->ipsec_alglists[algtype][alg->alg_id] = alg;
784 786
785 787 ipss->ipsec_nalgs[algtype]++;
786 788 alg_insert_sortlist(algtype, alg->alg_id, ns);
787 789 }
788 790
789 791 /*
790 792 * Remove the specified algorithm from the algorithm tables.
791 793 * Must be called while holding the algorithm table writer lock.
792 794 */
793 795 void
794 796 ipsec_alg_unreg(ipsec_algtype_t algtype, uint8_t algid, netstack_t *ns)
795 797 {
796 798 ipsec_stack_t *ipss = ns->netstack_ipsec;
797 799
798 - ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
800 + ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
799 801
800 802 ASSERT(ipss->ipsec_alglists[algtype][algid] != NULL);
801 803 ipsec_alg_free(ipss->ipsec_alglists[algtype][algid]);
802 804 ipss->ipsec_alglists[algtype][algid] = NULL;
803 805
804 806 ipss->ipsec_nalgs[algtype]--;
805 807 alg_remove_sortlist(algtype, algid, ns);
806 808 }
807 809
808 810 /*
809 811 * Hooks for spdsock to get a grip on system policy.
810 812 */
811 813
812 814 ipsec_policy_head_t *
813 815 ipsec_system_policy(netstack_t *ns)
814 816 {
815 817 ipsec_stack_t *ipss = ns->netstack_ipsec;
816 818 ipsec_policy_head_t *h = &ipss->ipsec_system_policy;
817 819
818 820 IPPH_REFHOLD(h);
819 821 return (h);
820 822 }
821 823
822 824 ipsec_policy_head_t *
823 825 ipsec_inactive_policy(netstack_t *ns)
824 826 {
825 827 ipsec_stack_t *ipss = ns->netstack_ipsec;
826 828 ipsec_policy_head_t *h = &ipss->ipsec_inactive_policy;
827 829
828 830 IPPH_REFHOLD(h);
829 831 return (h);
830 832 }
831 833
832 834 /*
833 835 * Lock inactive policy, then active policy, then exchange policy root
834 836 * pointers.
835 837 */
836 838 void
837 839 ipsec_swap_policy(ipsec_policy_head_t *active, ipsec_policy_head_t *inactive,
838 840 netstack_t *ns)
839 841 {
840 842 int af, dir;
841 843 avl_tree_t r1, r2;
842 844
843 845 rw_enter(&inactive->iph_lock, RW_WRITER);
844 846 rw_enter(&active->iph_lock, RW_WRITER);
845 847
846 848 r1 = active->iph_rulebyid;
847 849 r2 = inactive->iph_rulebyid;
848 850 active->iph_rulebyid = r2;
849 851 inactive->iph_rulebyid = r1;
850 852
851 853 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
852 854 ipsec_policy_hash_t *h1, *h2;
853 855
854 856 h1 = active->iph_root[dir].ipr_hash;
855 857 h2 = inactive->iph_root[dir].ipr_hash;
856 858 active->iph_root[dir].ipr_hash = h2;
857 859 inactive->iph_root[dir].ipr_hash = h1;
858 860
859 861 for (af = 0; af < IPSEC_NAF; af++) {
860 862 ipsec_policy_t *t1, *t2;
861 863
862 864 t1 = active->iph_root[dir].ipr_nonhash[af];
863 865 t2 = inactive->iph_root[dir].ipr_nonhash[af];
864 866 active->iph_root[dir].ipr_nonhash[af] = t2;
865 867 inactive->iph_root[dir].ipr_nonhash[af] = t1;
866 868 if (t1 != NULL) {
867 869 t1->ipsp_hash.hash_pp =
868 870 &(inactive->iph_root[dir].ipr_nonhash[af]);
869 871 }
870 872 if (t2 != NULL) {
871 873 t2->ipsp_hash.hash_pp =
872 874 &(active->iph_root[dir].ipr_nonhash[af]);
873 875 }
874 876
875 877 }
876 878 }
877 879 active->iph_gen++;
878 880 inactive->iph_gen++;
879 881 ipsec_update_present_flags(ns->netstack_ipsec);
880 882 rw_exit(&active->iph_lock);
881 883 rw_exit(&inactive->iph_lock);
882 884 }
883 885
884 886 /*
885 887 * Swap global policy primary/secondary.
886 888 */
887 889 void
888 890 ipsec_swap_global_policy(netstack_t *ns)
889 891 {
890 892 ipsec_stack_t *ipss = ns->netstack_ipsec;
891 893
892 894 ipsec_swap_policy(&ipss->ipsec_system_policy,
893 895 &ipss->ipsec_inactive_policy, ns);
894 896 }
895 897
896 898 /*
897 899 * Clone one policy rule..
898 900 */
899 901 static ipsec_policy_t *
900 902 ipsec_copy_policy(const ipsec_policy_t *src)
901 903 {
902 904 ipsec_policy_t *dst = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
903 905
904 906 if (dst == NULL)
905 907 return (NULL);
906 908
907 909 /*
908 910 * Adjust refcounts of cloned state.
909 911 */
910 912 IPACT_REFHOLD(src->ipsp_act);
911 913 src->ipsp_sel->ipsl_refs++;
912 914
913 915 HASH_NULL(dst, ipsp_hash);
914 916 dst->ipsp_netstack = src->ipsp_netstack;
915 917 dst->ipsp_refs = 1;
916 918 dst->ipsp_sel = src->ipsp_sel;
917 919 dst->ipsp_act = src->ipsp_act;
918 920 dst->ipsp_prio = src->ipsp_prio;
919 921 dst->ipsp_index = src->ipsp_index;
920 922
921 923 return (dst);
922 924 }
923 925
924 926 void
925 927 ipsec_insert_always(avl_tree_t *tree, void *new_node)
926 928 {
927 929 void *node;
928 930 avl_index_t where;
929 931
930 932 node = avl_find(tree, new_node, &where);
931 933 ASSERT(node == NULL);
932 934 avl_insert(tree, new_node, where);
933 935 }
934 936
935 937
936 938 static int
937 939 ipsec_copy_chain(ipsec_policy_head_t *dph, ipsec_policy_t *src,
938 940 ipsec_policy_t **dstp)
939 941 {
940 942 for (; src != NULL; src = src->ipsp_hash.hash_next) {
941 943 ipsec_policy_t *dst = ipsec_copy_policy(src);
942 944 if (dst == NULL)
943 945 return (ENOMEM);
944 946
945 947 HASHLIST_INSERT(dst, ipsp_hash, *dstp);
946 948 ipsec_insert_always(&dph->iph_rulebyid, dst);
947 949 }
948 950 return (0);
949 951 }
950 952
951 953
952 954
953 955 /*
954 956 * Make one policy head look exactly like another.
955 957 *
956 958 * As with ipsec_swap_policy, we lock the destination policy head first, then
957 959 * the source policy head. Note that we only need to read-lock the source
958 960 * policy head as we are not changing it.
959 961 */
960 962 int
961 963 ipsec_copy_polhead(ipsec_policy_head_t *sph, ipsec_policy_head_t *dph,
962 964 netstack_t *ns)
963 965 {
964 966 int af, dir, chain, nchains;
965 967
966 968 rw_enter(&dph->iph_lock, RW_WRITER);
967 969
968 970 ipsec_polhead_flush(dph, ns);
969 971
970 972 rw_enter(&sph->iph_lock, RW_READER);
971 973
972 974 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
973 975 ipsec_policy_root_t *dpr = &dph->iph_root[dir];
974 976 ipsec_policy_root_t *spr = &sph->iph_root[dir];
975 977 nchains = dpr->ipr_nchains;
976 978
977 979 ASSERT(dpr->ipr_nchains == spr->ipr_nchains);
978 980
979 981 for (af = 0; af < IPSEC_NAF; af++) {
980 982 if (ipsec_copy_chain(dph, spr->ipr_nonhash[af],
981 983 &dpr->ipr_nonhash[af]))
982 984 goto abort_copy;
983 985 }
984 986
985 987 for (chain = 0; chain < nchains; chain++) {
986 988 if (ipsec_copy_chain(dph,
987 989 spr->ipr_hash[chain].hash_head,
988 990 &dpr->ipr_hash[chain].hash_head))
989 991 goto abort_copy;
990 992 }
991 993 }
992 994
993 995 dph->iph_gen++;
994 996
995 997 rw_exit(&sph->iph_lock);
996 998 rw_exit(&dph->iph_lock);
997 999 return (0);
998 1000
999 1001 abort_copy:
1000 1002 ipsec_polhead_flush(dph, ns);
1001 1003 rw_exit(&sph->iph_lock);
1002 1004 rw_exit(&dph->iph_lock);
1003 1005 return (ENOMEM);
1004 1006 }
1005 1007
1006 1008 /*
1007 1009 * Clone currently active policy to the inactive policy list.
1008 1010 */
1009 1011 int
1010 1012 ipsec_clone_system_policy(netstack_t *ns)
1011 1013 {
1012 1014 ipsec_stack_t *ipss = ns->netstack_ipsec;
1013 1015
1014 1016 return (ipsec_copy_polhead(&ipss->ipsec_system_policy,
1015 1017 &ipss->ipsec_inactive_policy, ns));
1016 1018 }
1017 1019
1018 1020 /*
1019 1021 * Extract the string from ipsec_policy_failure_msgs[type] and
1020 1022 * log it.
1021 1023 *
1022 1024 */
1023 1025 void
1024 1026 ipsec_log_policy_failure(int type, char *func_name, ipha_t *ipha, ip6_t *ip6h,
1025 1027 boolean_t secure, netstack_t *ns)
1026 1028 {
1027 1029 char sbuf[INET6_ADDRSTRLEN];
1028 1030 char dbuf[INET6_ADDRSTRLEN];
1029 1031 char *s;
1030 1032 char *d;
1031 1033 ipsec_stack_t *ipss = ns->netstack_ipsec;
1032 1034
1033 1035 ASSERT((ipha == NULL && ip6h != NULL) ||
1034 1036 (ip6h == NULL && ipha != NULL));
1035 1037
1036 1038 if (ipha != NULL) {
1037 1039 s = inet_ntop(AF_INET, &ipha->ipha_src, sbuf, sizeof (sbuf));
1038 1040 d = inet_ntop(AF_INET, &ipha->ipha_dst, dbuf, sizeof (dbuf));
1039 1041 } else {
1040 1042 s = inet_ntop(AF_INET6, &ip6h->ip6_src, sbuf, sizeof (sbuf));
1041 1043 d = inet_ntop(AF_INET6, &ip6h->ip6_dst, dbuf, sizeof (dbuf));
1042 1044
1043 1045 }
1044 1046
1045 1047 /* Always bump the policy failure counter. */
1046 1048 ipss->ipsec_policy_failure_count[type]++;
1047 1049
1048 1050 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1049 1051 ipsec_policy_failure_msgs[type], func_name,
1050 1052 (secure ? "secure" : "not secure"), s, d);
1051 1053 }
1052 1054
1053 1055 /*
1054 1056 * Rate-limiting front-end to strlog() for AH and ESP. Uses the ndd variables
1055 1057 * in /dev/ip and the same rate-limiting clock so that there's a single
1056 1058 * knob to turn to throttle the rate of messages.
1057 1059 */
1058 1060 void
1059 1061 ipsec_rl_strlog(netstack_t *ns, short mid, short sid, char level, ushort_t sl,
1060 1062 char *fmt, ...)
1061 1063 {
1062 1064 va_list adx;
1063 1065 hrtime_t current = gethrtime();
1064 1066 ip_stack_t *ipst = ns->netstack_ip;
1065 1067 ipsec_stack_t *ipss = ns->netstack_ipsec;
1066 1068
1067 1069 sl |= SL_CONSOLE;
1068 1070 /*
1069 1071 * Throttle logging to stop syslog from being swamped. If variable
1070 1072 * 'ipsec_policy_log_interval' is zero, don't log any messages at
1071 1073 * all, otherwise log only one message every 'ipsec_policy_log_interval'
1072 1074 * msec. Convert interval (in msec) to hrtime (in nsec).
1073 1075 */
1074 1076
1075 1077 if (ipst->ips_ipsec_policy_log_interval) {
1076 1078 if (ipss->ipsec_policy_failure_last +
1077 1079 MSEC2NSEC(ipst->ips_ipsec_policy_log_interval) <= current) {
1078 1080 va_start(adx, fmt);
1079 1081 (void) vstrlog(mid, sid, level, sl, fmt, adx);
1080 1082 va_end(adx);
1081 1083 ipss->ipsec_policy_failure_last = current;
1082 1084 }
1083 1085 }
1084 1086 }
1085 1087
1086 1088 void
1087 1089 ipsec_config_flush(netstack_t *ns)
1088 1090 {
1089 1091 ipsec_stack_t *ipss = ns->netstack_ipsec;
1090 1092
1091 1093 rw_enter(&ipss->ipsec_system_policy.iph_lock, RW_WRITER);
1092 1094 ipsec_polhead_flush(&ipss->ipsec_system_policy, ns);
1093 1095 ipss->ipsec_next_policy_index = 1;
1094 1096 rw_exit(&ipss->ipsec_system_policy.iph_lock);
1095 1097 ipsec_action_reclaim_stack(ipss);
1096 1098 }
1097 1099
1098 1100 /*
1099 1101 * Clip a policy's min/max keybits vs. the capabilities of the
1100 1102 * algorithm.
1101 1103 */
1102 1104 static void
1103 1105 act_alg_adjust(uint_t algtype, uint_t algid,
1104 1106 uint16_t *minbits, uint16_t *maxbits, netstack_t *ns)
1105 1107 {
1106 1108 ipsec_stack_t *ipss = ns->netstack_ipsec;
1107 1109 ipsec_alginfo_t *algp = ipss->ipsec_alglists[algtype][algid];
1108 1110
1109 1111 if (algp != NULL) {
1110 1112 /*
1111 1113 * If passed-in minbits is zero, we assume the caller trusts
1112 1114 * us with setting the minimum key size. We pick the
1113 1115 * algorithms DEFAULT key size for the minimum in this case.
1114 1116 */
1115 1117 if (*minbits == 0) {
1116 1118 *minbits = algp->alg_default_bits;
1117 1119 ASSERT(*minbits >= algp->alg_minbits);
1118 1120 } else {
1119 1121 *minbits = MAX(MIN(*minbits, algp->alg_maxbits),
1120 1122 algp->alg_minbits);
1121 1123 }
1122 1124 if (*maxbits == 0)
1123 1125 *maxbits = algp->alg_maxbits;
1124 1126 else
1125 1127 *maxbits = MIN(MAX(*maxbits, algp->alg_minbits),
1126 1128 algp->alg_maxbits);
1127 1129 ASSERT(*minbits <= *maxbits);
1128 1130 } else {
1129 1131 *minbits = 0;
1130 1132 *maxbits = 0;
1131 1133 }
1132 1134 }
1133 1135
1134 1136 /*
1135 1137 * Check an action's requested algorithms against the algorithms currently
1136 1138 * loaded in the system.
1137 1139 */
1138 1140 boolean_t
1139 1141 ipsec_check_action(ipsec_act_t *act, int *diag, netstack_t *ns)
1140 1142 {
1141 1143 ipsec_prot_t *ipp;
1142 1144 ipsec_stack_t *ipss = ns->netstack_ipsec;
1143 1145
1144 1146 ipp = &act->ipa_apply;
1145 1147
1146 1148 if (ipp->ipp_use_ah &&
1147 1149 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_auth_alg] == NULL) {
1148 1150 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
1149 1151 return (B_FALSE);
1150 1152 }
1151 1153 if (ipp->ipp_use_espa &&
1152 1154 ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_esp_auth_alg] ==
1153 1155 NULL) {
1154 1156 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
1155 1157 return (B_FALSE);
1156 1158 }
1157 1159 if (ipp->ipp_use_esp &&
1158 1160 ipss->ipsec_alglists[IPSEC_ALG_ENCR][ipp->ipp_encr_alg] == NULL) {
1159 1161 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
1160 1162 return (B_FALSE);
1161 1163 }
1162 1164
1163 1165 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_auth_alg,
1164 1166 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1165 1167 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_esp_auth_alg,
1166 1168 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1167 1169 act_alg_adjust(IPSEC_ALG_ENCR, ipp->ipp_encr_alg,
1168 1170 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1169 1171
1170 1172 if (ipp->ipp_ah_minbits > ipp->ipp_ah_maxbits) {
1171 1173 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_KEYSIZE;
1172 1174 return (B_FALSE);
1173 1175 }
1174 1176 if (ipp->ipp_espa_minbits > ipp->ipp_espa_maxbits) {
1175 1177 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_KEYSIZE;
1176 1178 return (B_FALSE);
1177 1179 }
1178 1180 if (ipp->ipp_espe_minbits > ipp->ipp_espe_maxbits) {
1179 1181 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_KEYSIZE;
1180 1182 return (B_FALSE);
1181 1183 }
1182 1184 /* TODO: sanity check lifetimes */
1183 1185 return (B_TRUE);
1184 1186 }
1185 1187
1186 1188 /*
1187 1189 * Set up a single action during wildcard expansion..
1188 1190 */
1189 1191 static void
1190 1192 ipsec_setup_act(ipsec_act_t *outact, ipsec_act_t *act,
1191 1193 uint_t auth_alg, uint_t encr_alg, uint_t eauth_alg, netstack_t *ns)
1192 1194 {
1193 1195 ipsec_prot_t *ipp;
1194 1196
1195 1197 *outact = *act;
1196 1198 ipp = &outact->ipa_apply;
1197 1199 ipp->ipp_auth_alg = (uint8_t)auth_alg;
1198 1200 ipp->ipp_encr_alg = (uint8_t)encr_alg;
1199 1201 ipp->ipp_esp_auth_alg = (uint8_t)eauth_alg;
1200 1202
1201 1203 act_alg_adjust(IPSEC_ALG_AUTH, auth_alg,
1202 1204 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1203 1205 act_alg_adjust(IPSEC_ALG_AUTH, eauth_alg,
1204 1206 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1205 1207 act_alg_adjust(IPSEC_ALG_ENCR, encr_alg,
1206 1208 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1207 1209 }
1208 1210
1209 1211 /*
1210 1212 * combinatoric expansion time: expand a wildcarded action into an
1211 1213 * array of wildcarded actions; we return the exploded action list,
1212 1214 * and return a count in *nact (output only).
1213 1215 */
1214 1216 static ipsec_act_t *
1215 1217 ipsec_act_wildcard_expand(ipsec_act_t *act, uint_t *nact, netstack_t *ns)
1216 1218 {
1217 1219 boolean_t use_ah, use_esp, use_espa;
1218 1220 boolean_t wild_auth, wild_encr, wild_eauth;
1219 1221 uint_t auth_alg, auth_idx, auth_min, auth_max;
1220 1222 uint_t eauth_alg, eauth_idx, eauth_min, eauth_max;
1221 1223 uint_t encr_alg, encr_idx, encr_min, encr_max;
1222 1224 uint_t action_count, ai;
1223 1225 ipsec_act_t *outact;
1224 1226 ipsec_stack_t *ipss = ns->netstack_ipsec;
1225 1227
1226 1228 if (act->ipa_type != IPSEC_ACT_APPLY) {
1227 1229 outact = kmem_alloc(sizeof (*act), KM_NOSLEEP);
1228 1230 *nact = 1;
1229 1231 if (outact != NULL)
1230 1232 bcopy(act, outact, sizeof (*act));
1231 1233 return (outact);
1232 1234 }
1233 1235 /*
1234 1236 * compute the combinatoric explosion..
1235 1237 *
1236 1238 * we assume a request for encr if esp_req is PREF_REQUIRED
1237 1239 * we assume a request for ah auth if ah_req is PREF_REQUIRED.
1238 1240 * we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED
1239 1241 */
1240 1242
1241 1243 use_ah = act->ipa_apply.ipp_use_ah;
1242 1244 use_esp = act->ipa_apply.ipp_use_esp;
1243 1245 use_espa = act->ipa_apply.ipp_use_espa;
1244 1246 auth_alg = act->ipa_apply.ipp_auth_alg;
1245 1247 eauth_alg = act->ipa_apply.ipp_esp_auth_alg;
1246 1248 encr_alg = act->ipa_apply.ipp_encr_alg;
1247 1249
1248 1250 wild_auth = use_ah && (auth_alg == 0);
1249 1251 wild_eauth = use_espa && (eauth_alg == 0);
1250 1252 wild_encr = use_esp && (encr_alg == 0);
1251 1253
1252 1254 action_count = 1;
1253 1255 auth_min = auth_max = auth_alg;
1254 1256 eauth_min = eauth_max = eauth_alg;
1255 1257 encr_min = encr_max = encr_alg;
1256 1258
1257 1259 /*
1258 1260 * set up for explosion.. for each dimension, expand output
1259 1261 * size by the explosion factor.
1260 1262 *
1261 1263 * Don't include the "any" algorithms, if defined, as no
1262 1264 * kernel policies should be set for these algorithms.
1263 1265 */
1264 1266
1265 1267 #define SET_EXP_MINMAX(type, wild, alg, min, max, ipss) \
1266 1268 if (wild) { \
1267 1269 int nalgs = ipss->ipsec_nalgs[type]; \
1268 1270 if (ipss->ipsec_alglists[type][alg] != NULL) \
1269 1271 nalgs--; \
1270 1272 action_count *= nalgs; \
1271 1273 min = 0; \
1272 1274 max = ipss->ipsec_nalgs[type] - 1; \
1273 1275 }
1274 1276
1275 1277 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_auth, SADB_AALG_NONE,
1276 1278 auth_min, auth_max, ipss);
1277 1279 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_eauth, SADB_AALG_NONE,
1278 1280 eauth_min, eauth_max, ipss);
1279 1281 SET_EXP_MINMAX(IPSEC_ALG_ENCR, wild_encr, SADB_EALG_NONE,
1280 1282 encr_min, encr_max, ipss);
1281 1283
1282 1284 #undef SET_EXP_MINMAX
1283 1285
1284 1286 /*
1285 1287 * ok, allocate the whole mess..
1286 1288 */
1287 1289
1288 1290 outact = kmem_alloc(sizeof (*outact) * action_count, KM_NOSLEEP);
1289 1291 if (outact == NULL)
1290 1292 return (NULL);
1291 1293
1292 1294 /*
1293 1295 * Now compute all combinations. Note that non-wildcarded
1294 1296 * dimensions just get a single value from auth_min, while
1295 1297 * wildcarded dimensions indirect through the sortlist.
1296 1298 *
1297 1299 * We do encryption outermost since, at this time, there's
1298 1300 * greater difference in security and performance between
1299 1301 * encryption algorithms vs. authentication algorithms.
1300 1302 */
1301 1303
1302 1304 ai = 0;
1303 1305
1304 1306 #define WHICH_ALG(type, wild, idx, ipss) \
1305 1307 ((wild)?(ipss->ipsec_sortlist[type][idx]):(idx))
1306 1308
1307 1309 for (encr_idx = encr_min; encr_idx <= encr_max; encr_idx++) {
1308 1310 encr_alg = WHICH_ALG(IPSEC_ALG_ENCR, wild_encr, encr_idx, ipss);
1309 1311 if (wild_encr && encr_alg == SADB_EALG_NONE)
1310 1312 continue;
1311 1313 for (auth_idx = auth_min; auth_idx <= auth_max; auth_idx++) {
1312 1314 auth_alg = WHICH_ALG(IPSEC_ALG_AUTH, wild_auth,
1313 1315 auth_idx, ipss);
1314 1316 if (wild_auth && auth_alg == SADB_AALG_NONE)
1315 1317 continue;
1316 1318 for (eauth_idx = eauth_min; eauth_idx <= eauth_max;
1317 1319 eauth_idx++) {
1318 1320 eauth_alg = WHICH_ALG(IPSEC_ALG_AUTH,
1319 1321 wild_eauth, eauth_idx, ipss);
1320 1322 if (wild_eauth && eauth_alg == SADB_AALG_NONE)
1321 1323 continue;
1322 1324
1323 1325 ipsec_setup_act(&outact[ai], act,
1324 1326 auth_alg, encr_alg, eauth_alg, ns);
1325 1327 ai++;
1326 1328 }
1327 1329 }
1328 1330 }
1329 1331
1330 1332 #undef WHICH_ALG
1331 1333
1332 1334 ASSERT(ai == action_count);
1333 1335 *nact = action_count;
1334 1336 return (outact);
1335 1337 }
1336 1338
1337 1339 /*
1338 1340 * Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t.
1339 1341 */
1340 1342 static void
1341 1343 ipsec_prot_from_req(const ipsec_req_t *req, ipsec_prot_t *ipp)
1342 1344 {
1343 1345 bzero(ipp, sizeof (*ipp));
1344 1346 /*
1345 1347 * ipp_use_* are bitfields. Look at "!!" in the following as a
1346 1348 * "boolean canonicalization" operator.
1347 1349 */
1348 1350 ipp->ipp_use_ah = !!(req->ipsr_ah_req & IPSEC_PREF_REQUIRED);
1349 1351 ipp->ipp_use_esp = !!(req->ipsr_esp_req & IPSEC_PREF_REQUIRED);
1350 1352 ipp->ipp_use_espa = !!(req->ipsr_esp_auth_alg);
1351 1353 ipp->ipp_use_se = !!(req->ipsr_self_encap_req & IPSEC_PREF_REQUIRED);
1352 1354 ipp->ipp_use_unique = !!((req->ipsr_ah_req|req->ipsr_esp_req) &
1353 1355 IPSEC_PREF_UNIQUE);
1354 1356 ipp->ipp_encr_alg = req->ipsr_esp_alg;
1355 1357 /*
1356 1358 * SADB_AALG_ANY is a placeholder to distinguish "any" from
1357 1359 * "none" above. If auth is required, as determined above,
1358 1360 * SADB_AALG_ANY becomes 0, which is the representation
1359 1361 * of "any" and "none" in PF_KEY v2.
1360 1362 */
1361 1363 ipp->ipp_auth_alg = (req->ipsr_auth_alg != SADB_AALG_ANY) ?
1362 1364 req->ipsr_auth_alg : 0;
1363 1365 ipp->ipp_esp_auth_alg = (req->ipsr_esp_auth_alg != SADB_AALG_ANY) ?
1364 1366 req->ipsr_esp_auth_alg : 0;
1365 1367 }
1366 1368
1367 1369 /*
1368 1370 * Extract a new-style action from a request.
1369 1371 */
1370 1372 void
1371 1373 ipsec_actvec_from_req(const ipsec_req_t *req, ipsec_act_t **actp, uint_t *nactp,
1372 1374 netstack_t *ns)
1373 1375 {
1374 1376 struct ipsec_act act;
1375 1377
1376 1378 bzero(&act, sizeof (act));
1377 1379 if ((req->ipsr_ah_req & IPSEC_PREF_NEVER) &&
1378 1380 (req->ipsr_esp_req & IPSEC_PREF_NEVER)) {
1379 1381 act.ipa_type = IPSEC_ACT_BYPASS;
1380 1382 } else {
1381 1383 act.ipa_type = IPSEC_ACT_APPLY;
1382 1384 ipsec_prot_from_req(req, &act.ipa_apply);
1383 1385 }
1384 1386 *actp = ipsec_act_wildcard_expand(&act, nactp, ns);
1385 1387 }
1386 1388
1387 1389 /*
1388 1390 * Convert a new-style "prot" back to an ipsec_req_t (more backwards compat).
1389 1391 * We assume caller has already zero'ed *req for us.
1390 1392 */
1391 1393 static int
1392 1394 ipsec_req_from_prot(ipsec_prot_t *ipp, ipsec_req_t *req)
1393 1395 {
1394 1396 req->ipsr_esp_alg = ipp->ipp_encr_alg;
1395 1397 req->ipsr_auth_alg = ipp->ipp_auth_alg;
1396 1398 req->ipsr_esp_auth_alg = ipp->ipp_esp_auth_alg;
1397 1399
1398 1400 if (ipp->ipp_use_unique) {
1399 1401 req->ipsr_ah_req |= IPSEC_PREF_UNIQUE;
1400 1402 req->ipsr_esp_req |= IPSEC_PREF_UNIQUE;
1401 1403 }
1402 1404 if (ipp->ipp_use_se)
1403 1405 req->ipsr_self_encap_req |= IPSEC_PREF_REQUIRED;
1404 1406 if (ipp->ipp_use_ah)
1405 1407 req->ipsr_ah_req |= IPSEC_PREF_REQUIRED;
1406 1408 if (ipp->ipp_use_esp)
1407 1409 req->ipsr_esp_req |= IPSEC_PREF_REQUIRED;
1408 1410 return (sizeof (*req));
1409 1411 }
1410 1412
1411 1413 /*
1412 1414 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1413 1415 * We assume caller has already zero'ed *req for us.
1414 1416 */
1415 1417 static int
1416 1418 ipsec_req_from_act(ipsec_action_t *ap, ipsec_req_t *req)
1417 1419 {
1418 1420 switch (ap->ipa_act.ipa_type) {
1419 1421 case IPSEC_ACT_BYPASS:
1420 1422 req->ipsr_ah_req = IPSEC_PREF_NEVER;
1421 1423 req->ipsr_esp_req = IPSEC_PREF_NEVER;
1422 1424 return (sizeof (*req));
1423 1425 case IPSEC_ACT_APPLY:
1424 1426 return (ipsec_req_from_prot(&ap->ipa_act.ipa_apply, req));
1425 1427 }
1426 1428 return (sizeof (*req));
1427 1429 }
1428 1430
1429 1431 /*
1430 1432 * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1431 1433 * We assume caller has already zero'ed *req for us.
1432 1434 */
1433 1435 int
1434 1436 ipsec_req_from_head(ipsec_policy_head_t *ph, ipsec_req_t *req, int af)
1435 1437 {
1436 1438 ipsec_policy_t *p;
1437 1439
1438 1440 /*
1439 1441 * FULL-PERSOCK: consult hash table, too?
1440 1442 */
1441 1443 for (p = ph->iph_root[IPSEC_INBOUND].ipr_nonhash[af];
1442 1444 p != NULL;
1443 1445 p = p->ipsp_hash.hash_next) {
1444 1446 if ((p->ipsp_sel->ipsl_key.ipsl_valid & IPSL_WILDCARD) == 0)
1445 1447 return (ipsec_req_from_act(p->ipsp_act, req));
1446 1448 }
1447 1449 return (sizeof (*req));
1448 1450 }
1449 1451
1450 1452 /*
1451 1453 * Based on per-socket or latched policy, convert to an appropriate
1452 1454 * IP_SEC_OPT ipsec_req_t for the socket option; return size so we can
1453 1455 * be tail-called from ip.
1454 1456 */
1455 1457 int
1456 1458 ipsec_req_from_conn(conn_t *connp, ipsec_req_t *req, int af)
1457 1459 {
1458 1460 ipsec_latch_t *ipl;
1459 1461 int rv = sizeof (ipsec_req_t);
1460 1462
1461 1463 bzero(req, sizeof (*req));
1462 1464
1463 1465 ASSERT(MUTEX_HELD(&connp->conn_lock));
1464 1466 ipl = connp->conn_latch;
1465 1467
1466 1468 /*
1467 1469 * Find appropriate policy. First choice is latched action;
1468 1470 * failing that, see latched policy; failing that,
1469 1471 * look at configured policy.
1470 1472 */
1471 1473 if (ipl != NULL) {
1472 1474 if (connp->conn_latch_in_action != NULL) {
1473 1475 rv = ipsec_req_from_act(connp->conn_latch_in_action,
1474 1476 req);
1475 1477 goto done;
1476 1478 }
1477 1479 if (connp->conn_latch_in_policy != NULL) {
1478 1480 rv = ipsec_req_from_act(
1479 1481 connp->conn_latch_in_policy->ipsp_act, req);
1480 1482 goto done;
1481 1483 }
1482 1484 }
1483 1485 if (connp->conn_policy != NULL)
1484 1486 rv = ipsec_req_from_head(connp->conn_policy, req, af);
1485 1487 done:
1486 1488 return (rv);
1487 1489 }
1488 1490
1489 1491 void
1490 1492 ipsec_actvec_free(ipsec_act_t *act, uint_t nact)
1491 1493 {
1492 1494 kmem_free(act, nact * sizeof (*act));
1493 1495 }
1494 1496
1495 1497 /*
1496 1498 * Consumes a reference to ipsp.
1497 1499 */
1498 1500 static mblk_t *
1499 1501 ipsec_check_loopback_policy(mblk_t *data_mp, ip_recv_attr_t *ira,
1500 1502 ipsec_policy_t *ipsp)
1501 1503 {
1502 1504 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
1503 1505 return (data_mp);
1504 1506
1505 1507 ASSERT(ira->ira_flags & IRAF_LOOPBACK);
1506 1508
1507 1509 IPPOL_REFRELE(ipsp);
1508 1510
1509 1511 /*
1510 1512 * We should do an actual policy check here. Revisit this
1511 1513 * when we revisit the IPsec API. (And pass a conn_t in when we
1512 1514 * get there.)
1513 1515 */
1514 1516
1515 1517 return (data_mp);
1516 1518 }
1517 1519
1518 1520 /*
1519 1521 * Check that packet's inbound ports & proto match the selectors
1520 1522 * expected by the SAs it traversed on the way in.
1521 1523 */
1522 1524 static boolean_t
1523 1525 ipsec_check_ipsecin_unique(ip_recv_attr_t *ira, const char **reason,
1524 1526 kstat_named_t **counter, uint64_t pkt_unique, netstack_t *ns)
1525 1527 {
1526 1528 uint64_t ah_mask, esp_mask;
1527 1529 ipsa_t *ah_assoc;
1528 1530 ipsa_t *esp_assoc;
1529 1531 ipsec_stack_t *ipss = ns->netstack_ipsec;
1530 1532
1531 1533 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1532 1534 ASSERT(!(ira->ira_flags & IRAF_LOOPBACK));
1533 1535
1534 1536 ah_assoc = ira->ira_ipsec_ah_sa;
1535 1537 esp_assoc = ira->ira_ipsec_esp_sa;
1536 1538 ASSERT((ah_assoc != NULL) || (esp_assoc != NULL));
1537 1539
1538 1540 ah_mask = (ah_assoc != NULL) ? ah_assoc->ipsa_unique_mask : 0;
1539 1541 esp_mask = (esp_assoc != NULL) ? esp_assoc->ipsa_unique_mask : 0;
1540 1542
1541 1543 if ((ah_mask == 0) && (esp_mask == 0))
1542 1544 return (B_TRUE);
1543 1545
1544 1546 /*
1545 1547 * The pkt_unique check will also check for tunnel mode on the SA
1546 1548 * vs. the tunneled_packet boolean. "Be liberal in what you receive"
1547 1549 * should not apply in this case. ;)
1548 1550 */
1549 1551
1550 1552 if (ah_mask != 0 &&
1551 1553 ah_assoc->ipsa_unique_id != (pkt_unique & ah_mask)) {
1552 1554 *reason = "AH inner header mismatch";
1553 1555 *counter = DROPPER(ipss, ipds_spd_ah_innermismatch);
1554 1556 return (B_FALSE);
1555 1557 }
1556 1558 if (esp_mask != 0 &&
1557 1559 esp_assoc->ipsa_unique_id != (pkt_unique & esp_mask)) {
1558 1560 *reason = "ESP inner header mismatch";
1559 1561 *counter = DROPPER(ipss, ipds_spd_esp_innermismatch);
1560 1562 return (B_FALSE);
1561 1563 }
1562 1564 return (B_TRUE);
1563 1565 }
1564 1566
1565 1567 static boolean_t
1566 1568 ipsec_check_ipsecin_action(ip_recv_attr_t *ira, mblk_t *mp, ipsec_action_t *ap,
1567 1569 ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter,
1568 1570 netstack_t *ns)
1569 1571 {
1570 1572 boolean_t ret = B_TRUE;
1571 1573 ipsec_prot_t *ipp;
1572 1574 ipsa_t *ah_assoc;
1573 1575 ipsa_t *esp_assoc;
1574 1576 boolean_t decaps;
1575 1577 ipsec_stack_t *ipss = ns->netstack_ipsec;
1576 1578
1577 1579 ASSERT((ipha == NULL && ip6h != NULL) ||
1578 1580 (ip6h == NULL && ipha != NULL));
1579 1581
1580 1582 if (ira->ira_flags & IRAF_LOOPBACK) {
1581 1583 /*
1582 1584 * Besides accepting pointer-equivalent actions, we also
1583 1585 * accept any ICMP errors we generated for ourselves,
1584 1586 * regardless of policy. If we do not wish to make this
1585 1587 * assumption in the future, check here, and where
1586 1588 * IXAF_TRUSTED_ICMP is initialized in ip.c and ip6.c.
1587 1589 */
1588 1590 if (ap == ira->ira_ipsec_action ||
1589 1591 (ira->ira_flags & IRAF_TRUSTED_ICMP))
1590 1592 return (B_TRUE);
1591 1593
1592 1594 /* Deep compare necessary here?? */
1593 1595 *counter = DROPPER(ipss, ipds_spd_loopback_mismatch);
1594 1596 *reason = "loopback policy mismatch";
1595 1597 return (B_FALSE);
1596 1598 }
1597 1599 ASSERT(!(ira->ira_flags & IRAF_TRUSTED_ICMP));
1598 1600 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1599 1601
1600 1602 ah_assoc = ira->ira_ipsec_ah_sa;
1601 1603 esp_assoc = ira->ira_ipsec_esp_sa;
1602 1604
1603 1605 decaps = (ira->ira_flags & IRAF_IPSEC_DECAPS);
1604 1606
1605 1607 switch (ap->ipa_act.ipa_type) {
1606 1608 case IPSEC_ACT_DISCARD:
1607 1609 case IPSEC_ACT_REJECT:
1608 1610 /* Should "fail hard" */
1609 1611 *counter = DROPPER(ipss, ipds_spd_explicit);
1610 1612 *reason = "blocked by policy";
1611 1613 return (B_FALSE);
1612 1614
1613 1615 case IPSEC_ACT_BYPASS:
1614 1616 case IPSEC_ACT_CLEAR:
1615 1617 *counter = DROPPER(ipss, ipds_spd_got_secure);
1616 1618 *reason = "expected clear, got protected";
1617 1619 return (B_FALSE);
1618 1620
1619 1621 case IPSEC_ACT_APPLY:
1620 1622 ipp = &ap->ipa_act.ipa_apply;
1621 1623 /*
1622 1624 * As of now we do the simple checks of whether
1623 1625 * the datagram has gone through the required IPSEC
1624 1626 * protocol constraints or not. We might have more
1625 1627 * in the future like sensitive levels, key bits, etc.
1626 1628 * If it fails the constraints, check whether we would
1627 1629 * have accepted this if it had come in clear.
1628 1630 */
1629 1631 if (ipp->ipp_use_ah) {
1630 1632 if (ah_assoc == NULL) {
1631 1633 ret = ipsec_inbound_accept_clear(mp, ipha,
1632 1634 ip6h);
1633 1635 *counter = DROPPER(ipss, ipds_spd_got_clear);
1634 1636 *reason = "unprotected not accepted";
1635 1637 break;
1636 1638 }
1637 1639 ASSERT(ah_assoc != NULL);
1638 1640 ASSERT(ipp->ipp_auth_alg != 0);
1639 1641
1640 1642 if (ah_assoc->ipsa_auth_alg !=
1641 1643 ipp->ipp_auth_alg) {
1642 1644 *counter = DROPPER(ipss, ipds_spd_bad_ahalg);
1643 1645 *reason = "unacceptable ah alg";
1644 1646 ret = B_FALSE;
1645 1647 break;
1646 1648 }
1647 1649 } else if (ah_assoc != NULL) {
1648 1650 /*
1649 1651 * Don't allow this. Check IPSEC NOTE above
1650 1652 * ip_fanout_proto().
1651 1653 */
1652 1654 *counter = DROPPER(ipss, ipds_spd_got_ah);
1653 1655 *reason = "unexpected AH";
1654 1656 ret = B_FALSE;
1655 1657 break;
1656 1658 }
1657 1659 if (ipp->ipp_use_esp) {
1658 1660 if (esp_assoc == NULL) {
1659 1661 ret = ipsec_inbound_accept_clear(mp, ipha,
1660 1662 ip6h);
1661 1663 *counter = DROPPER(ipss, ipds_spd_got_clear);
1662 1664 *reason = "unprotected not accepted";
1663 1665 break;
1664 1666 }
1665 1667 ASSERT(esp_assoc != NULL);
1666 1668 ASSERT(ipp->ipp_encr_alg != 0);
1667 1669
1668 1670 if (esp_assoc->ipsa_encr_alg !=
1669 1671 ipp->ipp_encr_alg) {
1670 1672 *counter = DROPPER(ipss, ipds_spd_bad_espealg);
1671 1673 *reason = "unacceptable esp alg";
1672 1674 ret = B_FALSE;
1673 1675 break;
1674 1676 }
1675 1677 /*
1676 1678 * If the client does not need authentication,
1677 1679 * we don't verify the alogrithm.
1678 1680 */
1679 1681 if (ipp->ipp_use_espa) {
1680 1682 if (esp_assoc->ipsa_auth_alg !=
1681 1683 ipp->ipp_esp_auth_alg) {
1682 1684 *counter = DROPPER(ipss,
1683 1685 ipds_spd_bad_espaalg);
1684 1686 *reason = "unacceptable esp auth alg";
1685 1687 ret = B_FALSE;
1686 1688 break;
1687 1689 }
1688 1690 }
1689 1691 } else if (esp_assoc != NULL) {
1690 1692 /*
1691 1693 * Don't allow this. Check IPSEC NOTE above
1692 1694 * ip_fanout_proto().
1693 1695 */
1694 1696 *counter = DROPPER(ipss, ipds_spd_got_esp);
1695 1697 *reason = "unexpected ESP";
1696 1698 ret = B_FALSE;
1697 1699 break;
1698 1700 }
1699 1701 if (ipp->ipp_use_se) {
1700 1702 if (!decaps) {
1701 1703 ret = ipsec_inbound_accept_clear(mp, ipha,
1702 1704 ip6h);
1703 1705 if (!ret) {
1704 1706 /* XXX mutant? */
1705 1707 *counter = DROPPER(ipss,
1706 1708 ipds_spd_bad_selfencap);
1707 1709 *reason = "self encap not found";
1708 1710 break;
1709 1711 }
1710 1712 }
1711 1713 } else if (decaps) {
1712 1714 /*
1713 1715 * XXX If the packet comes in tunneled and the
1714 1716 * recipient does not expect it to be tunneled, it
1715 1717 * is okay. But we drop to be consistent with the
1716 1718 * other cases.
1717 1719 */
1718 1720 *counter = DROPPER(ipss, ipds_spd_got_selfencap);
1719 1721 *reason = "unexpected self encap";
1720 1722 ret = B_FALSE;
1721 1723 break;
1722 1724 }
1723 1725 if (ira->ira_ipsec_action != NULL) {
1724 1726 /*
1725 1727 * This can happen if we do a double policy-check on
1726 1728 * a packet
1727 1729 * XXX XXX should fix this case!
1728 1730 */
1729 1731 IPACT_REFRELE(ira->ira_ipsec_action);
1730 1732 }
1731 1733 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1732 1734 ASSERT(ira->ira_ipsec_action == NULL);
1733 1735 IPACT_REFHOLD(ap);
1734 1736 ira->ira_ipsec_action = ap;
1735 1737 break; /* from switch */
1736 1738 }
1737 1739 return (ret);
1738 1740 }
1739 1741
1740 1742 static boolean_t
1741 1743 spd_match_inbound_ids(ipsec_latch_t *ipl, ipsa_t *sa)
1742 1744 {
1743 1745 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1744 1746 return ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_src_cid) &&
1745 1747 ipsid_equal(ipl->ipl_local_cid, sa->ipsa_dst_cid);
1746 1748 }
1747 1749
1748 1750 /*
1749 1751 * Takes a latched conn and an inbound packet and returns a unique_id suitable
1750 1752 * for SA comparisons. Most of the time we will copy from the conn_t, but
1751 1753 * there are cases when the conn_t is latched but it has wildcard selectors,
1752 1754 * and then we need to fallback to scooping them out of the packet.
1753 1755 *
1754 1756 * Assume we'll never have 0 with a conn_t present, so use 0 as a failure. We
1755 1757 * can get away with this because we only have non-zero ports/proto for
1756 1758 * latched conn_ts.
1757 1759 *
1758 1760 * Ideal candidate for an "inline" keyword, as we're JUST convoluted enough
1759 1761 * to not be a nice macro.
1760 1762 */
1761 1763 static uint64_t
1762 1764 conn_to_unique(conn_t *connp, mblk_t *data_mp, ipha_t *ipha, ip6_t *ip6h)
1763 1765 {
1764 1766 ipsec_selector_t sel;
1765 1767 uint8_t ulp = connp->conn_proto;
1766 1768
1767 1769 ASSERT(connp->conn_latch_in_policy != NULL);
1768 1770
1769 1771 if ((ulp == IPPROTO_TCP || ulp == IPPROTO_UDP || ulp == IPPROTO_SCTP) &&
1770 1772 (connp->conn_fport == 0 || connp->conn_lport == 0)) {
1771 1773 /* Slow path - we gotta grab from the packet. */
1772 1774 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
1773 1775 SEL_NONE) != SELRET_SUCCESS) {
1774 1776 /* Failure -> have caller free packet with ENOMEM. */
1775 1777 return (0);
1776 1778 }
1777 1779 return (SA_UNIQUE_ID(sel.ips_remote_port, sel.ips_local_port,
1778 1780 sel.ips_protocol, 0));
1779 1781 }
1780 1782
1781 1783 #ifdef DEBUG_NOT_UNTIL_6478464
1782 1784 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h, SEL_NONE) ==
1783 1785 SELRET_SUCCESS) {
1784 1786 ASSERT(sel.ips_local_port == connp->conn_lport);
1785 1787 ASSERT(sel.ips_remote_port == connp->conn_fport);
1786 1788 ASSERT(sel.ips_protocol == connp->conn_proto);
1787 1789 }
1788 1790 ASSERT(connp->conn_proto != 0);
1789 1791 #endif
1790 1792
1791 1793 return (SA_UNIQUE_ID(connp->conn_fport, connp->conn_lport, ulp, 0));
1792 1794 }
1793 1795
1794 1796 /*
1795 1797 * Called to check policy on a latched connection.
1796 1798 * Note that we don't dereference conn_latch or conn_ihere since the conn might
1797 1799 * be closing. The caller passes a held ipsec_latch_t instead.
1798 1800 */
1799 1801 static boolean_t
1800 1802 ipsec_check_ipsecin_latch(ip_recv_attr_t *ira, mblk_t *mp, ipsec_latch_t *ipl,
1801 1803 ipsec_action_t *ap, ipha_t *ipha, ip6_t *ip6h, const char **reason,
1802 1804 kstat_named_t **counter, conn_t *connp, netstack_t *ns)
1803 1805 {
1804 1806 ipsec_stack_t *ipss = ns->netstack_ipsec;
1805 1807
1806 1808 ASSERT(ipl->ipl_ids_latched == B_TRUE);
1807 1809 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1808 1810
1809 1811 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
1810 1812 /*
1811 1813 * Over loopback, there aren't real security associations,
1812 1814 * so there are neither identities nor "unique" values
1813 1815 * for us to check the packet against.
1814 1816 */
1815 1817 if (ira->ira_ipsec_ah_sa != NULL) {
1816 1818 if (!spd_match_inbound_ids(ipl,
1817 1819 ira->ira_ipsec_ah_sa)) {
1818 1820 *counter = DROPPER(ipss, ipds_spd_ah_badid);
1819 1821 *reason = "AH identity mismatch";
1820 1822 return (B_FALSE);
1821 1823 }
1822 1824 }
1823 1825
1824 1826 if (ira->ira_ipsec_esp_sa != NULL) {
1825 1827 if (!spd_match_inbound_ids(ipl,
1826 1828 ira->ira_ipsec_esp_sa)) {
1827 1829 *counter = DROPPER(ipss, ipds_spd_esp_badid);
1828 1830 *reason = "ESP identity mismatch";
1829 1831 return (B_FALSE);
1830 1832 }
1831 1833 }
1832 1834
1833 1835 /*
1834 1836 * Can fudge pkt_unique from connp because we're latched.
1835 1837 * In DEBUG kernels (see conn_to_unique()'s implementation),
1836 1838 * verify this even if it REALLY slows things down.
1837 1839 */
1838 1840 if (!ipsec_check_ipsecin_unique(ira, reason, counter,
1839 1841 conn_to_unique(connp, mp, ipha, ip6h), ns)) {
1840 1842 return (B_FALSE);
1841 1843 }
1842 1844 }
1843 1845 return (ipsec_check_ipsecin_action(ira, mp, ap, ipha, ip6h, reason,
1844 1846 counter, ns));
1845 1847 }
1846 1848
1847 1849 /*
1848 1850 * Check to see whether this secured datagram meets the policy
1849 1851 * constraints specified in ipsp.
1850 1852 *
1851 1853 * Called from ipsec_check_global_policy, and ipsec_check_inbound_policy.
1852 1854 *
1853 1855 * Consumes a reference to ipsp.
1854 1856 * Returns the mblk if ok.
1855 1857 */
1856 1858 static mblk_t *
1857 1859 ipsec_check_ipsecin_policy(mblk_t *data_mp, ipsec_policy_t *ipsp,
1858 1860 ipha_t *ipha, ip6_t *ip6h, uint64_t pkt_unique, ip_recv_attr_t *ira,
1859 1861 netstack_t *ns)
1860 1862 {
1861 1863 ipsec_action_t *ap;
1862 1864 const char *reason = "no policy actions found";
1863 1865 ip_stack_t *ipst = ns->netstack_ip;
1864 1866 ipsec_stack_t *ipss = ns->netstack_ipsec;
1865 1867 kstat_named_t *counter;
1866 1868
1867 1869 counter = DROPPER(ipss, ipds_spd_got_secure);
1868 1870
1869 1871 ASSERT(ipsp != NULL);
1870 1872
1871 1873 ASSERT((ipha == NULL && ip6h != NULL) ||
1872 1874 (ip6h == NULL && ipha != NULL));
1873 1875
1874 1876 if (ira->ira_flags & IRAF_LOOPBACK)
1875 1877 return (ipsec_check_loopback_policy(data_mp, ira, ipsp));
1876 1878
1877 1879 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1878 1880
1879 1881 if (ira->ira_ipsec_action != NULL) {
1880 1882 /*
1881 1883 * this can happen if we do a double policy-check on a packet
1882 1884 * Would be nice to be able to delete this test..
1883 1885 */
1884 1886 IPACT_REFRELE(ira->ira_ipsec_action);
1885 1887 }
1886 1888 ASSERT(ira->ira_ipsec_action == NULL);
1887 1889
1888 1890 if (!SA_IDS_MATCH(ira->ira_ipsec_ah_sa, ira->ira_ipsec_esp_sa)) {
1889 1891 reason = "inbound AH and ESP identities differ";
1890 1892 counter = DROPPER(ipss, ipds_spd_ahesp_diffid);
1891 1893 goto drop;
1892 1894 }
1893 1895
1894 1896 if (!ipsec_check_ipsecin_unique(ira, &reason, &counter, pkt_unique,
1895 1897 ns))
1896 1898 goto drop;
1897 1899
1898 1900 /*
1899 1901 * Ok, now loop through the possible actions and see if any
1900 1902 * of them work for us.
1901 1903 */
1902 1904
1903 1905 for (ap = ipsp->ipsp_act; ap != NULL; ap = ap->ipa_next) {
1904 1906 if (ipsec_check_ipsecin_action(ira, data_mp, ap,
1905 1907 ipha, ip6h, &reason, &counter, ns)) {
1906 1908 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
1907 1909 IPPOL_REFRELE(ipsp);
1908 1910 return (data_mp);
1909 1911 }
1910 1912 }
1911 1913 drop:
1912 1914 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1913 1915 "ipsec inbound policy mismatch: %s, packet dropped\n",
1914 1916 reason);
1915 1917 IPPOL_REFRELE(ipsp);
1916 1918 ASSERT(ira->ira_ipsec_action == NULL);
1917 1919 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
1918 1920 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
1919 1921 &ipss->ipsec_spd_dropper);
1920 1922 return (NULL);
1921 1923 }
1922 1924
1923 1925 /*
1924 1926 * sleazy prefix-length-based compare.
1925 1927 * another inlining candidate..
1926 1928 */
1927 1929 boolean_t
1928 1930 ip_addr_match(uint8_t *addr1, int pfxlen, in6_addr_t *addr2p)
1929 1931 {
1930 1932 int offset = pfxlen>>3;
1931 1933 int bitsleft = pfxlen & 7;
1932 1934 uint8_t *addr2 = (uint8_t *)addr2p;
1933 1935
1934 1936 /*
1935 1937 * and there was much evil..
1936 1938 * XXX should inline-expand the bcmp here and do this 32 bits
1937 1939 * or 64 bits at a time..
1938 1940 */
1939 1941 return ((bcmp(addr1, addr2, offset) == 0) &&
1940 1942 ((bitsleft == 0) ||
1941 1943 (((addr1[offset] ^ addr2[offset]) & (0xff<<(8-bitsleft))) == 0)));
1942 1944 }
1943 1945
1944 1946 static ipsec_policy_t *
1945 1947 ipsec_find_policy_chain(ipsec_policy_t *best, ipsec_policy_t *chain,
1946 1948 ipsec_selector_t *sel, boolean_t is_icmp_inv_acq)
1947 1949 {
1948 1950 ipsec_selkey_t *isel;
1949 1951 ipsec_policy_t *p;
1950 1952 int bpri = best ? best->ipsp_prio : 0;
1951 1953
1952 1954 for (p = chain; p != NULL; p = p->ipsp_hash.hash_next) {
1953 1955 uint32_t valid;
1954 1956
1955 1957 if (p->ipsp_prio <= bpri)
1956 1958 continue;
1957 1959 isel = &p->ipsp_sel->ipsl_key;
1958 1960 valid = isel->ipsl_valid;
1959 1961
1960 1962 if ((valid & IPSL_PROTOCOL) &&
1961 1963 (isel->ipsl_proto != sel->ips_protocol))
1962 1964 continue;
1963 1965
1964 1966 if ((valid & IPSL_REMOTE_ADDR) &&
1965 1967 !ip_addr_match((uint8_t *)&isel->ipsl_remote,
1966 1968 isel->ipsl_remote_pfxlen, &sel->ips_remote_addr_v6))
1967 1969 continue;
1968 1970
1969 1971 if ((valid & IPSL_LOCAL_ADDR) &&
1970 1972 !ip_addr_match((uint8_t *)&isel->ipsl_local,
1971 1973 isel->ipsl_local_pfxlen, &sel->ips_local_addr_v6))
1972 1974 continue;
1973 1975
1974 1976 if ((valid & IPSL_REMOTE_PORT) &&
1975 1977 isel->ipsl_rport != sel->ips_remote_port)
1976 1978 continue;
1977 1979
1978 1980 if ((valid & IPSL_LOCAL_PORT) &&
1979 1981 isel->ipsl_lport != sel->ips_local_port)
1980 1982 continue;
1981 1983
1982 1984 if (!is_icmp_inv_acq) {
1983 1985 if ((valid & IPSL_ICMP_TYPE) &&
1984 1986 (isel->ipsl_icmp_type > sel->ips_icmp_type ||
1985 1987 isel->ipsl_icmp_type_end < sel->ips_icmp_type)) {
1986 1988 continue;
1987 1989 }
1988 1990
1989 1991 if ((valid & IPSL_ICMP_CODE) &&
1990 1992 (isel->ipsl_icmp_code > sel->ips_icmp_code ||
1991 1993 isel->ipsl_icmp_code_end <
1992 1994 sel->ips_icmp_code)) {
1993 1995 continue;
1994 1996 }
1995 1997 } else {
1996 1998 /*
1997 1999 * special case for icmp inverse acquire
1998 2000 * we only want policies that aren't drop/pass
1999 2001 */
2000 2002 if (p->ipsp_act->ipa_act.ipa_type != IPSEC_ACT_APPLY)
2001 2003 continue;
2002 2004 }
2003 2005
2004 2006 /* we matched all the packet-port-field selectors! */
2005 2007 best = p;
2006 2008 bpri = p->ipsp_prio;
2007 2009 }
2008 2010
2009 2011 return (best);
2010 2012 }
2011 2013
2012 2014 /*
2013 2015 * Try to find and return the best policy entry under a given policy
2014 2016 * root for a given set of selectors; the first parameter "best" is
2015 2017 * the current best policy so far. If "best" is non-null, we have a
2016 2018 * reference to it. We return a reference to a policy; if that policy
2017 2019 * is not the original "best", we need to release that reference
2018 2020 * before returning.
2019 2021 */
2020 2022 ipsec_policy_t *
2021 2023 ipsec_find_policy_head(ipsec_policy_t *best, ipsec_policy_head_t *head,
2022 2024 int direction, ipsec_selector_t *sel)
2023 2025 {
2024 2026 ipsec_policy_t *curbest;
2025 2027 ipsec_policy_root_t *root;
2026 2028 uint8_t is_icmp_inv_acq = sel->ips_is_icmp_inv_acq;
2027 2029 int af = sel->ips_isv4 ? IPSEC_AF_V4 : IPSEC_AF_V6;
2028 2030
2029 2031 curbest = best;
2030 2032 root = &head->iph_root[direction];
2031 2033
2032 2034 #ifdef DEBUG
2033 2035 if (is_icmp_inv_acq) {
2034 2036 if (sel->ips_isv4) {
2035 2037 if (sel->ips_protocol != IPPROTO_ICMP) {
2036 2038 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2037 2039 " expecting icmp, got %d",
2038 2040 sel->ips_protocol);
2039 2041 }
2040 2042 } else {
2041 2043 if (sel->ips_protocol != IPPROTO_ICMPV6) {
2042 2044 cmn_err(CE_WARN, "ipsec_find_policy_head:"
2043 2045 " expecting icmpv6, got %d",
2044 2046 sel->ips_protocol);
2045 2047 }
2046 2048 }
2047 2049 }
2048 2050 #endif
2049 2051
2050 2052 rw_enter(&head->iph_lock, RW_READER);
2051 2053
2052 2054 if (root->ipr_nchains > 0) {
2053 2055 curbest = ipsec_find_policy_chain(curbest,
2054 2056 root->ipr_hash[selector_hash(sel, root)].hash_head, sel,
2055 2057 is_icmp_inv_acq);
2056 2058 }
2057 2059 curbest = ipsec_find_policy_chain(curbest, root->ipr_nonhash[af], sel,
2058 2060 is_icmp_inv_acq);
2059 2061
2060 2062 /*
2061 2063 * Adjust reference counts if we found anything new.
2062 2064 */
2063 2065 if (curbest != best) {
2064 2066 ASSERT(curbest != NULL);
2065 2067 IPPOL_REFHOLD(curbest);
2066 2068
2067 2069 if (best != NULL) {
2068 2070 IPPOL_REFRELE(best);
2069 2071 }
2070 2072 }
2071 2073
2072 2074 rw_exit(&head->iph_lock);
2073 2075
2074 2076 return (curbest);
2075 2077 }
2076 2078
2077 2079 /*
2078 2080 * Find the best system policy (either global or per-interface) which
2079 2081 * applies to the given selector; look in all the relevant policy roots
2080 2082 * to figure out which policy wins.
2081 2083 *
2082 2084 * Returns a reference to a policy; caller must release this
2083 2085 * reference when done.
2084 2086 */
2085 2087 ipsec_policy_t *
2086 2088 ipsec_find_policy(int direction, const conn_t *connp, ipsec_selector_t *sel,
2087 2089 netstack_t *ns)
2088 2090 {
2089 2091 ipsec_policy_t *p;
2090 2092 ipsec_stack_t *ipss = ns->netstack_ipsec;
2091 2093
2092 2094 p = ipsec_find_policy_head(NULL, &ipss->ipsec_system_policy,
2093 2095 direction, sel);
2094 2096 if ((connp != NULL) && (connp->conn_policy != NULL)) {
2095 2097 p = ipsec_find_policy_head(p, connp->conn_policy,
2096 2098 direction, sel);
2097 2099 }
2098 2100
2099 2101 return (p);
2100 2102 }
2101 2103
2102 2104 /*
2103 2105 * Check with global policy and see whether this inbound
2104 2106 * packet meets the policy constraints.
2105 2107 *
2106 2108 * Locate appropriate policy from global policy, supplemented by the
2107 2109 * conn's configured and/or cached policy if the conn is supplied.
2108 2110 *
2109 2111 * Dispatch to ipsec_check_ipsecin_policy if we have policy and an
2110 2112 * encrypted packet to see if they match.
2111 2113 *
2112 2114 * Otherwise, see if the policy allows cleartext; if not, drop it on the
2113 2115 * floor.
2114 2116 */
2115 2117 mblk_t *
2116 2118 ipsec_check_global_policy(mblk_t *data_mp, conn_t *connp,
2117 2119 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira, netstack_t *ns)
2118 2120 {
2119 2121 ipsec_policy_t *p;
2120 2122 ipsec_selector_t sel;
2121 2123 boolean_t policy_present;
2122 2124 kstat_named_t *counter;
2123 2125 uint64_t pkt_unique;
2124 2126 ip_stack_t *ipst = ns->netstack_ip;
2125 2127 ipsec_stack_t *ipss = ns->netstack_ipsec;
2126 2128
2127 2129 sel.ips_is_icmp_inv_acq = 0;
2128 2130
2129 2131 ASSERT((ipha == NULL && ip6h != NULL) ||
2130 2132 (ip6h == NULL && ipha != NULL));
2131 2133
2132 2134 if (ipha != NULL)
2133 2135 policy_present = ipss->ipsec_inbound_v4_policy_present;
2134 2136 else
2135 2137 policy_present = ipss->ipsec_inbound_v6_policy_present;
2136 2138
2137 2139 if (!policy_present && connp == NULL) {
2138 2140 /*
2139 2141 * No global policy and no per-socket policy;
2140 2142 * just pass it back (but we shouldn't get here in that case)
2141 2143 */
2142 2144 return (data_mp);
2143 2145 }
2144 2146
2145 2147 /*
2146 2148 * If we have cached policy, use it.
2147 2149 * Otherwise consult system policy.
2148 2150 */
2149 2151 if ((connp != NULL) && (connp->conn_latch != NULL)) {
2150 2152 p = connp->conn_latch_in_policy;
2151 2153 if (p != NULL) {
2152 2154 IPPOL_REFHOLD(p);
2153 2155 }
2154 2156 /*
2155 2157 * Fudge sel for UNIQUE_ID setting below.
2156 2158 */
2157 2159 pkt_unique = conn_to_unique(connp, data_mp, ipha, ip6h);
2158 2160 } else {
2159 2161 /* Initialize the ports in the selector */
2160 2162 if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
2161 2163 SEL_NONE) == SELRET_NOMEM) {
2162 2164 /*
2163 2165 * Technically not a policy mismatch, but it is
2164 2166 * an internal failure.
2165 2167 */
2166 2168 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2167 2169 "ipsec_init_inbound_sel", ipha, ip6h, B_TRUE, ns);
2168 2170 counter = DROPPER(ipss, ipds_spd_nomem);
2169 2171 goto fail;
2170 2172 }
2171 2173
2172 2174 /*
2173 2175 * Find the policy which best applies.
2174 2176 *
2175 2177 * If we find global policy, we should look at both
2176 2178 * local policy and global policy and see which is
2177 2179 * stronger and match accordingly.
2178 2180 *
2179 2181 * If we don't find a global policy, check with
2180 2182 * local policy alone.
2181 2183 */
2182 2184
2183 2185 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
2184 2186 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
2185 2187 sel.ips_local_port, sel.ips_protocol, 0);
2186 2188 }
2187 2189
2188 2190 if (p == NULL) {
2189 2191 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2190 2192 /*
2191 2193 * We have no policy; default to succeeding.
2192 2194 * XXX paranoid system design doesn't do this.
2193 2195 */
2194 2196 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2195 2197 return (data_mp);
2196 2198 } else {
2197 2199 counter = DROPPER(ipss, ipds_spd_got_secure);
2198 2200 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED,
2199 2201 "ipsec_check_global_policy", ipha, ip6h, B_TRUE,
2200 2202 ns);
2201 2203 goto fail;
2202 2204 }
2203 2205 }
2204 2206 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2205 2207 return (ipsec_check_ipsecin_policy(data_mp, p, ipha, ip6h,
2206 2208 pkt_unique, ira, ns));
2207 2209 }
2208 2210 if (p->ipsp_act->ipa_allow_clear) {
2209 2211 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2210 2212 IPPOL_REFRELE(p);
2211 2213 return (data_mp);
2212 2214 }
2213 2215 IPPOL_REFRELE(p);
2214 2216 /*
2215 2217 * If we reach here, we will drop the packet because it failed the
2216 2218 * global policy check because the packet was cleartext, and it
2217 2219 * should not have been.
2218 2220 */
2219 2221 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2220 2222 "ipsec_check_global_policy", ipha, ip6h, B_FALSE, ns);
2221 2223 counter = DROPPER(ipss, ipds_spd_got_clear);
2222 2224
2223 2225 fail:
2224 2226 ip_drop_packet(data_mp, B_TRUE, NULL, counter,
2225 2227 &ipss->ipsec_spd_dropper);
2226 2228 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2227 2229 return (NULL);
2228 2230 }
2229 2231
2230 2232 /*
2231 2233 * We check whether an inbound datagram is a valid one
2232 2234 * to accept in clear. If it is secure, it is the job
2233 2235 * of IPSEC to log information appropriately if it
2234 2236 * suspects that it may not be the real one.
2235 2237 *
2236 2238 * It is called only while fanning out to the ULP
2237 2239 * where ULP accepts only secure data and the incoming
2238 2240 * is clear. Usually we never accept clear datagrams in
2239 2241 * such cases. ICMP is the only exception.
2240 2242 *
2241 2243 * NOTE : We don't call this function if the client (ULP)
2242 2244 * is willing to accept things in clear.
2243 2245 */
2244 2246 boolean_t
2245 2247 ipsec_inbound_accept_clear(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h)
2246 2248 {
2247 2249 ushort_t iph_hdr_length;
2248 2250 icmph_t *icmph;
2249 2251 icmp6_t *icmp6;
2250 2252 uint8_t *nexthdrp;
2251 2253
2252 2254 ASSERT((ipha != NULL && ip6h == NULL) ||
2253 2255 (ipha == NULL && ip6h != NULL));
2254 2256
2255 2257 if (ip6h != NULL) {
2256 2258 iph_hdr_length = ip_hdr_length_v6(mp, ip6h);
2257 2259 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length,
2258 2260 &nexthdrp)) {
2259 2261 return (B_FALSE);
2260 2262 }
2261 2263 if (*nexthdrp != IPPROTO_ICMPV6)
2262 2264 return (B_FALSE);
2263 2265 icmp6 = (icmp6_t *)(&mp->b_rptr[iph_hdr_length]);
2264 2266 /* Match IPv6 ICMP policy as closely as IPv4 as possible. */
2265 2267 switch (icmp6->icmp6_type) {
2266 2268 case ICMP6_PARAM_PROB:
2267 2269 /* Corresponds to port/proto unreach in IPv4. */
2268 2270 case ICMP6_ECHO_REQUEST:
2269 2271 /* Just like IPv4. */
2270 2272 return (B_FALSE);
2271 2273
2272 2274 case MLD_LISTENER_QUERY:
2273 2275 case MLD_LISTENER_REPORT:
2274 2276 case MLD_LISTENER_REDUCTION:
2275 2277 /*
2276 2278 * XXX Seperate NDD in IPv4 what about here?
2277 2279 * Plus, mcast is important to ND.
2278 2280 */
2279 2281 case ICMP6_DST_UNREACH:
2280 2282 /* Corresponds to HOST/NET unreachable in IPv4. */
2281 2283 case ICMP6_PACKET_TOO_BIG:
2282 2284 case ICMP6_ECHO_REPLY:
2283 2285 /* These are trusted in IPv4. */
2284 2286 case ND_ROUTER_SOLICIT:
2285 2287 case ND_ROUTER_ADVERT:
2286 2288 case ND_NEIGHBOR_SOLICIT:
2287 2289 case ND_NEIGHBOR_ADVERT:
2288 2290 case ND_REDIRECT:
2289 2291 /* Trust ND messages for now. */
2290 2292 case ICMP6_TIME_EXCEEDED:
2291 2293 default:
2292 2294 return (B_TRUE);
2293 2295 }
2294 2296 } else {
2295 2297 /*
2296 2298 * If it is not ICMP, fail this request.
2297 2299 */
2298 2300 if (ipha->ipha_protocol != IPPROTO_ICMP) {
2299 2301 #ifdef FRAGCACHE_DEBUG
2300 2302 cmn_err(CE_WARN, "Dropping - ipha_proto = %d\n",
2301 2303 ipha->ipha_protocol);
2302 2304 #endif
2303 2305 return (B_FALSE);
2304 2306 }
2305 2307 iph_hdr_length = IPH_HDR_LENGTH(ipha);
2306 2308 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
2307 2309 /*
2308 2310 * It is an insecure icmp message. Check to see whether we are
2309 2311 * willing to accept this one.
2310 2312 */
2311 2313
2312 2314 switch (icmph->icmph_type) {
2313 2315 case ICMP_ECHO_REPLY:
2314 2316 case ICMP_TIME_STAMP_REPLY:
2315 2317 case ICMP_INFO_REPLY:
2316 2318 case ICMP_ROUTER_ADVERTISEMENT:
2317 2319 /*
2318 2320 * We should not encourage clear replies if this
2319 2321 * client expects secure. If somebody is replying
2320 2322 * in clear some mailicious user watching both the
2321 2323 * request and reply, can do chosen-plain-text attacks.
2322 2324 * With global policy we might be just expecting secure
2323 2325 * but sending out clear. We don't know what the right
2324 2326 * thing is. We can't do much here as we can't control
2325 2327 * the sender here. Till we are sure of what to do,
2326 2328 * accept them.
2327 2329 */
2328 2330 return (B_TRUE);
2329 2331 case ICMP_ECHO_REQUEST:
2330 2332 case ICMP_TIME_STAMP_REQUEST:
2331 2333 case ICMP_INFO_REQUEST:
2332 2334 case ICMP_ADDRESS_MASK_REQUEST:
2333 2335 case ICMP_ROUTER_SOLICITATION:
2334 2336 case ICMP_ADDRESS_MASK_REPLY:
2335 2337 /*
2336 2338 * Don't accept this as somebody could be sending
2337 2339 * us plain text to get encrypted data. If we reply,
2338 2340 * it will lead to chosen plain text attack.
2339 2341 */
2340 2342 return (B_FALSE);
2341 2343 case ICMP_DEST_UNREACHABLE:
2342 2344 switch (icmph->icmph_code) {
2343 2345 case ICMP_FRAGMENTATION_NEEDED:
2344 2346 /*
2345 2347 * Be in sync with icmp_inbound, where we have
2346 2348 * already set dce_pmtu
2347 2349 */
2348 2350 #ifdef FRAGCACHE_DEBUG
2349 2351 cmn_err(CE_WARN, "ICMP frag needed\n");
2350 2352 #endif
2351 2353 return (B_TRUE);
2352 2354 case ICMP_HOST_UNREACHABLE:
2353 2355 case ICMP_NET_UNREACHABLE:
2354 2356 /*
2355 2357 * By accepting, we could reset a connection.
2356 2358 * How do we solve the problem of some
2357 2359 * intermediate router sending in-secure ICMP
2358 2360 * messages ?
2359 2361 */
2360 2362 return (B_TRUE);
2361 2363 case ICMP_PORT_UNREACHABLE:
2362 2364 case ICMP_PROTOCOL_UNREACHABLE:
2363 2365 default :
2364 2366 return (B_FALSE);
2365 2367 }
2366 2368 case ICMP_SOURCE_QUENCH:
2367 2369 /*
2368 2370 * If this is an attack, TCP will slow start
2369 2371 * because of this. Is it very harmful ?
2370 2372 */
2371 2373 return (B_TRUE);
2372 2374 case ICMP_PARAM_PROBLEM:
2373 2375 return (B_FALSE);
2374 2376 case ICMP_TIME_EXCEEDED:
2375 2377 return (B_TRUE);
2376 2378 case ICMP_REDIRECT:
2377 2379 return (B_FALSE);
2378 2380 default :
2379 2381 return (B_FALSE);
2380 2382 }
2381 2383 }
2382 2384 }
2383 2385
2384 2386 void
2385 2387 ipsec_latch_ids(ipsec_latch_t *ipl, ipsid_t *local, ipsid_t *remote)
2386 2388 {
2387 2389 mutex_enter(&ipl->ipl_lock);
2388 2390
2389 2391 if (ipl->ipl_ids_latched) {
2390 2392 /* I lost, someone else got here before me */
2391 2393 mutex_exit(&ipl->ipl_lock);
2392 2394 return;
2393 2395 }
2394 2396
2395 2397 if (local != NULL)
2396 2398 IPSID_REFHOLD(local);
2397 2399 if (remote != NULL)
2398 2400 IPSID_REFHOLD(remote);
2399 2401
2400 2402 ipl->ipl_local_cid = local;
2401 2403 ipl->ipl_remote_cid = remote;
2402 2404 ipl->ipl_ids_latched = B_TRUE;
2403 2405 mutex_exit(&ipl->ipl_lock);
2404 2406 }
2405 2407
2406 2408 void
2407 2409 ipsec_latch_inbound(conn_t *connp, ip_recv_attr_t *ira)
2408 2410 {
2409 2411 ipsa_t *sa;
2410 2412 ipsec_latch_t *ipl = connp->conn_latch;
2411 2413
2412 2414 if (!ipl->ipl_ids_latched) {
2413 2415 ipsid_t *local = NULL;
2414 2416 ipsid_t *remote = NULL;
2415 2417
2416 2418 if (!(ira->ira_flags & IRAF_LOOPBACK)) {
2417 2419 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2418 2420 if (ira->ira_ipsec_esp_sa != NULL)
2419 2421 sa = ira->ira_ipsec_esp_sa;
2420 2422 else
2421 2423 sa = ira->ira_ipsec_ah_sa;
2422 2424 ASSERT(sa != NULL);
2423 2425 local = sa->ipsa_dst_cid;
2424 2426 remote = sa->ipsa_src_cid;
2425 2427 }
2426 2428 ipsec_latch_ids(ipl, local, remote);
2427 2429 }
2428 2430 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2429 2431 if (connp->conn_latch_in_action != NULL) {
2430 2432 /*
2431 2433 * Previously cached action. This is probably
2432 2434 * harmless, but in DEBUG kernels, check for
2433 2435 * action equality.
2434 2436 *
2435 2437 * Preserve the existing action to preserve latch
2436 2438 * invariance.
2437 2439 */
2438 2440 ASSERT(connp->conn_latch_in_action ==
2439 2441 ira->ira_ipsec_action);
2440 2442 return;
2441 2443 }
2442 2444 connp->conn_latch_in_action = ira->ira_ipsec_action;
2443 2445 IPACT_REFHOLD(connp->conn_latch_in_action);
2444 2446 }
2445 2447 }
2446 2448
2447 2449 /*
2448 2450 * Check whether the policy constraints are met either for an
2449 2451 * inbound datagram; called from IP in numerous places.
2450 2452 *
2451 2453 * Note that this is not a chokepoint for inbound policy checks;
2452 2454 * see also ipsec_check_ipsecin_latch() and ipsec_check_global_policy()
2453 2455 */
2454 2456 mblk_t *
2455 2457 ipsec_check_inbound_policy(mblk_t *mp, conn_t *connp,
2456 2458 ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira)
2457 2459 {
2458 2460 boolean_t ret;
2459 2461 ipsec_latch_t *ipl;
2460 2462 ipsec_action_t *ap;
2461 2463 uint64_t unique_id;
2462 2464 ipsec_stack_t *ipss;
2463 2465 ip_stack_t *ipst;
2464 2466 netstack_t *ns;
2465 2467 ipsec_policy_head_t *policy_head;
2466 2468 ipsec_policy_t *p = NULL;
2467 2469
2468 2470 ASSERT(connp != NULL);
2469 2471 ns = connp->conn_netstack;
2470 2472 ipss = ns->netstack_ipsec;
2471 2473 ipst = ns->netstack_ip;
2472 2474
2473 2475 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2474 2476 /*
2475 2477 * This is the case where the incoming datagram is
2476 2478 * cleartext and we need to see whether this client
2477 2479 * would like to receive such untrustworthy things from
2478 2480 * the wire.
2479 2481 */
2480 2482 ASSERT(mp != NULL);
2481 2483
2482 2484 mutex_enter(&connp->conn_lock);
2483 2485 if (connp->conn_state_flags & CONN_CONDEMNED) {
2484 2486 mutex_exit(&connp->conn_lock);
2485 2487 ip_drop_packet(mp, B_TRUE, NULL,
2486 2488 DROPPER(ipss, ipds_spd_got_clear),
2487 2489 &ipss->ipsec_spd_dropper);
2488 2490 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2489 2491 return (NULL);
2490 2492 }
2491 2493 if (connp->conn_latch != NULL) {
2492 2494 /* Hold a reference in case the conn is closing */
2493 2495 p = connp->conn_latch_in_policy;
2494 2496 if (p != NULL)
2495 2497 IPPOL_REFHOLD(p);
2496 2498 mutex_exit(&connp->conn_lock);
2497 2499 /*
2498 2500 * Policy is cached in the conn.
2499 2501 */
2500 2502 if (p != NULL && !p->ipsp_act->ipa_allow_clear) {
2501 2503 ret = ipsec_inbound_accept_clear(mp,
2502 2504 ipha, ip6h);
2503 2505 if (ret) {
2504 2506 BUMP_MIB(&ipst->ips_ip_mib,
2505 2507 ipsecInSucceeded);
2506 2508 IPPOL_REFRELE(p);
2507 2509 return (mp);
2508 2510 } else {
2509 2511 ipsec_log_policy_failure(
2510 2512 IPSEC_POLICY_MISMATCH,
2511 2513 "ipsec_check_inbound_policy", ipha,
2512 2514 ip6h, B_FALSE, ns);
2513 2515 ip_drop_packet(mp, B_TRUE, NULL,
2514 2516 DROPPER(ipss, ipds_spd_got_clear),
2515 2517 &ipss->ipsec_spd_dropper);
2516 2518 BUMP_MIB(&ipst->ips_ip_mib,
2517 2519 ipsecInFailed);
2518 2520 IPPOL_REFRELE(p);
2519 2521 return (NULL);
2520 2522 }
2521 2523 } else {
2522 2524 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2523 2525 if (p != NULL)
2524 2526 IPPOL_REFRELE(p);
2525 2527 return (mp);
2526 2528 }
2527 2529 } else {
2528 2530 policy_head = connp->conn_policy;
2529 2531
2530 2532 /* Hold a reference in case the conn is closing */
2531 2533 if (policy_head != NULL)
2532 2534 IPPH_REFHOLD(policy_head);
2533 2535 mutex_exit(&connp->conn_lock);
2534 2536 /*
2535 2537 * As this is a non-hardbound connection we need
2536 2538 * to look at both per-socket policy and global
2537 2539 * policy.
2538 2540 */
2539 2541 mp = ipsec_check_global_policy(mp, connp,
2540 2542 ipha, ip6h, ira, ns);
2541 2543 if (policy_head != NULL)
2542 2544 IPPH_REFRELE(policy_head, ns);
2543 2545 return (mp);
2544 2546 }
2545 2547 }
2546 2548
2547 2549 mutex_enter(&connp->conn_lock);
2548 2550 /* Connection is closing */
2549 2551 if (connp->conn_state_flags & CONN_CONDEMNED) {
2550 2552 mutex_exit(&connp->conn_lock);
2551 2553 ip_drop_packet(mp, B_TRUE, NULL,
2552 2554 DROPPER(ipss, ipds_spd_got_clear),
2553 2555 &ipss->ipsec_spd_dropper);
2554 2556 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2555 2557 return (NULL);
2556 2558 }
2557 2559
2558 2560 /*
2559 2561 * Once a connection is latched it remains so for life, the conn_latch
2560 2562 * pointer on the conn has not changed, simply initializing ipl here
2561 2563 * as the earlier initialization was done only in the cleartext case.
2562 2564 */
2563 2565 if ((ipl = connp->conn_latch) == NULL) {
2564 2566 mblk_t *retmp;
2565 2567 policy_head = connp->conn_policy;
2566 2568
2567 2569 /* Hold a reference in case the conn is closing */
2568 2570 if (policy_head != NULL)
2569 2571 IPPH_REFHOLD(policy_head);
2570 2572 mutex_exit(&connp->conn_lock);
2571 2573 /*
2572 2574 * We don't have policies cached in the conn
2573 2575 * for this stream. So, look at the global
2574 2576 * policy. It will check against conn or global
2575 2577 * depending on whichever is stronger.
2576 2578 */
2577 2579 retmp = ipsec_check_global_policy(mp, connp,
2578 2580 ipha, ip6h, ira, ns);
2579 2581 if (policy_head != NULL)
2580 2582 IPPH_REFRELE(policy_head, ns);
2581 2583 return (retmp);
2582 2584 }
2583 2585
2584 2586 IPLATCH_REFHOLD(ipl);
2585 2587 /* Hold reference on conn_latch_in_action in case conn is closing */
2586 2588 ap = connp->conn_latch_in_action;
2587 2589 if (ap != NULL)
2588 2590 IPACT_REFHOLD(ap);
2589 2591 mutex_exit(&connp->conn_lock);
2590 2592
2591 2593 if (ap != NULL) {
2592 2594 /* Policy is cached & latched; fast(er) path */
2593 2595 const char *reason;
2594 2596 kstat_named_t *counter;
2595 2597
2596 2598 if (ipsec_check_ipsecin_latch(ira, mp, ipl, ap,
2597 2599 ipha, ip6h, &reason, &counter, connp, ns)) {
2598 2600 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2599 2601 IPLATCH_REFRELE(ipl);
2600 2602 IPACT_REFRELE(ap);
2601 2603 return (mp);
2602 2604 }
2603 2605 ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0,
2604 2606 SL_ERROR|SL_WARN|SL_CONSOLE,
2605 2607 "ipsec inbound policy mismatch: %s, packet dropped\n",
2606 2608 reason);
2607 2609 ip_drop_packet(mp, B_TRUE, NULL, counter,
2608 2610 &ipss->ipsec_spd_dropper);
2609 2611 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2610 2612 IPLATCH_REFRELE(ipl);
2611 2613 IPACT_REFRELE(ap);
2612 2614 return (NULL);
2613 2615 }
2614 2616 if ((p = connp->conn_latch_in_policy) == NULL) {
2615 2617 ipsec_weird_null_inbound_policy++;
2616 2618 IPLATCH_REFRELE(ipl);
2617 2619 return (mp);
2618 2620 }
2619 2621
2620 2622 unique_id = conn_to_unique(connp, mp, ipha, ip6h);
2621 2623 IPPOL_REFHOLD(p);
2622 2624 mp = ipsec_check_ipsecin_policy(mp, p, ipha, ip6h, unique_id, ira, ns);
2623 2625 /*
2624 2626 * NOTE: ipsecIn{Failed,Succeeeded} bumped by
2625 2627 * ipsec_check_ipsecin_policy().
2626 2628 */
2627 2629 if (mp != NULL)
2628 2630 ipsec_latch_inbound(connp, ira);
2629 2631 IPLATCH_REFRELE(ipl);
2630 2632 return (mp);
2631 2633 }
2632 2634
2633 2635 /*
2634 2636 * Handle all sorts of cases like tunnel-mode and ICMP.
2635 2637 */
2636 2638 static int
2637 2639 prepended_length(mblk_t *mp, uintptr_t hptr)
2638 2640 {
2639 2641 int rc = 0;
2640 2642
2641 2643 while (mp != NULL) {
2642 2644 if (hptr >= (uintptr_t)mp->b_rptr && hptr <
2643 2645 (uintptr_t)mp->b_wptr) {
2644 2646 rc += (int)(hptr - (uintptr_t)mp->b_rptr);
2645 2647 break; /* out of while loop */
2646 2648 }
2647 2649 rc += (int)MBLKL(mp);
2648 2650 mp = mp->b_cont;
2649 2651 }
2650 2652
2651 2653 if (mp == NULL) {
2652 2654 /*
2653 2655 * IF (big IF) we make it here by naturally exiting the loop,
2654 2656 * then ip6h isn't in the mblk chain "mp" at all.
2655 2657 *
2656 2658 * The only case where this happens is with a reversed IP
2657 2659 * header that gets passed up by inbound ICMP processing.
2658 2660 * This unfortunately triggers longstanding bug 6478464. For
2659 2661 * now, just pass up 0 for the answer.
2660 2662 */
2661 2663 #ifdef DEBUG_NOT_UNTIL_6478464
2662 2664 ASSERT(mp != NULL);
2663 2665 #endif
2664 2666 rc = 0;
2665 2667 }
2666 2668
2667 2669 return (rc);
2668 2670 }
2669 2671
2670 2672 /*
2671 2673 * Returns:
2672 2674 *
2673 2675 * SELRET_NOMEM --> msgpullup() needed to gather things failed.
2674 2676 * SELRET_BADPKT --> If we're being called after tunnel-mode fragment
2675 2677 * gathering, the initial fragment is too short for
2676 2678 * useful data. Only returned if SEL_TUNNEL_FIRSTFRAG is
2677 2679 * set.
2678 2680 * SELRET_SUCCESS --> "sel" now has initialized IPsec selector data.
2679 2681 * SELRET_TUNFRAG --> This is a fragment in a tunnel-mode packet. Caller
2680 2682 * should put this packet in a fragment-gathering queue.
2681 2683 * Only returned if SEL_TUNNEL_MODE and SEL_PORT_POLICY
2682 2684 * is set.
2683 2685 *
2684 2686 * Note that ipha/ip6h can be in a different mblk (mp->b_cont) in the case
2685 2687 * of tunneled packets.
2686 2688 * Also, mp->b_rptr can be an ICMP error where ipha/ip6h is the packet in
2687 2689 * error past the ICMP error.
2688 2690 */
2689 2691 static selret_t
2690 2692 ipsec_init_inbound_sel(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2691 2693 ip6_t *ip6h, uint8_t sel_flags)
2692 2694 {
2693 2695 uint16_t *ports;
2694 2696 int outer_hdr_len = 0; /* For ICMP or tunnel-mode cases... */
2695 2697 ushort_t hdr_len;
2696 2698 mblk_t *spare_mp = NULL;
2697 2699 uint8_t *nexthdrp, *transportp;
2698 2700 uint8_t nexthdr;
2699 2701 uint8_t icmp_proto;
2700 2702 ip_pkt_t ipp;
2701 2703 boolean_t port_policy_present = (sel_flags & SEL_PORT_POLICY);
2702 2704 boolean_t is_icmp = (sel_flags & SEL_IS_ICMP);
2703 2705 boolean_t tunnel_mode = (sel_flags & SEL_TUNNEL_MODE);
2704 2706 boolean_t post_frag = (sel_flags & SEL_POST_FRAG);
2705 2707
2706 2708 ASSERT((ipha == NULL && ip6h != NULL) ||
2707 2709 (ipha != NULL && ip6h == NULL));
2708 2710
2709 2711 if (ip6h != NULL) {
2710 2712 outer_hdr_len = prepended_length(mp, (uintptr_t)ip6h);
2711 2713 nexthdr = ip6h->ip6_nxt;
2712 2714 icmp_proto = IPPROTO_ICMPV6;
2713 2715 sel->ips_isv4 = B_FALSE;
2714 2716 sel->ips_local_addr_v6 = ip6h->ip6_dst;
2715 2717 sel->ips_remote_addr_v6 = ip6h->ip6_src;
2716 2718
2717 2719 bzero(&ipp, sizeof (ipp));
2718 2720
2719 2721 switch (nexthdr) {
2720 2722 case IPPROTO_HOPOPTS:
2721 2723 case IPPROTO_ROUTING:
2722 2724 case IPPROTO_DSTOPTS:
2723 2725 case IPPROTO_FRAGMENT:
2724 2726 /*
2725 2727 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2726 2728 * mblk that's contiguous to feed it
2727 2729 */
2728 2730 if ((spare_mp = msgpullup(mp, -1)) == NULL)
2729 2731 return (SELRET_NOMEM);
2730 2732 if (!ip_hdr_length_nexthdr_v6(spare_mp,
2731 2733 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2732 2734 &hdr_len, &nexthdrp)) {
2733 2735 /* Malformed packet - caller frees. */
2734 2736 ipsec_freemsg_chain(spare_mp);
2735 2737 return (SELRET_BADPKT);
2736 2738 }
2737 2739 /* Repopulate now that we have the whole packet */
2738 2740 ip6h = (ip6_t *)(spare_mp->b_rptr + outer_hdr_len);
2739 2741 (void) ip_find_hdr_v6(spare_mp, ip6h, B_FALSE, &ipp,
2740 2742 NULL);
2741 2743 nexthdr = *nexthdrp;
2742 2744 /* We can just extract based on hdr_len now. */
2743 2745 break;
2744 2746 default:
2745 2747 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
2746 2748 hdr_len = IPV6_HDR_LEN;
2747 2749 break;
2748 2750 }
2749 2751 if (port_policy_present && IS_V6_FRAGMENT(ipp) && !is_icmp) {
2750 2752 /* IPv6 Fragment */
2751 2753 ipsec_freemsg_chain(spare_mp);
2752 2754 return (SELRET_TUNFRAG);
2753 2755 }
2754 2756 transportp = (uint8_t *)ip6h + hdr_len;
2755 2757 } else {
2756 2758 outer_hdr_len = prepended_length(mp, (uintptr_t)ipha);
2757 2759 icmp_proto = IPPROTO_ICMP;
2758 2760 sel->ips_isv4 = B_TRUE;
2759 2761 sel->ips_local_addr_v4 = ipha->ipha_dst;
2760 2762 sel->ips_remote_addr_v4 = ipha->ipha_src;
2761 2763 nexthdr = ipha->ipha_protocol;
2762 2764 hdr_len = IPH_HDR_LENGTH(ipha);
2763 2765
2764 2766 if (port_policy_present &&
2765 2767 IS_V4_FRAGMENT(ipha->ipha_fragment_offset_and_flags) &&
2766 2768 !is_icmp) {
2767 2769 /* IPv4 Fragment */
2768 2770 ipsec_freemsg_chain(spare_mp);
2769 2771 return (SELRET_TUNFRAG);
2770 2772 }
2771 2773 transportp = (uint8_t *)ipha + hdr_len;
2772 2774 }
2773 2775 sel->ips_protocol = nexthdr;
2774 2776
2775 2777 if ((nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2776 2778 nexthdr != IPPROTO_SCTP && nexthdr != icmp_proto) ||
2777 2779 (!port_policy_present && !post_frag && tunnel_mode)) {
2778 2780 sel->ips_remote_port = sel->ips_local_port = 0;
2779 2781 ipsec_freemsg_chain(spare_mp);
2780 2782 return (SELRET_SUCCESS);
2781 2783 }
2782 2784
2783 2785 if (transportp + 4 > mp->b_wptr) {
2784 2786 /* If we didn't pullup a copy already, do so now. */
2785 2787 /*
2786 2788 * XXX performance, will upper-layers frequently split TCP/UDP
2787 2789 * apart from IP or options? If so, perhaps we should revisit
2788 2790 * the spare_mp strategy.
2789 2791 */
2790 2792 ipsec_hdr_pullup_needed++;
2791 2793 if (spare_mp == NULL &&
2792 2794 (spare_mp = msgpullup(mp, -1)) == NULL) {
2793 2795 return (SELRET_NOMEM);
2794 2796 }
2795 2797 transportp = &spare_mp->b_rptr[hdr_len + outer_hdr_len];
2796 2798 }
2797 2799
2798 2800 if (nexthdr == icmp_proto) {
2799 2801 sel->ips_icmp_type = *transportp++;
2800 2802 sel->ips_icmp_code = *transportp;
2801 2803 sel->ips_remote_port = sel->ips_local_port = 0;
2802 2804 } else {
2803 2805 ports = (uint16_t *)transportp;
2804 2806 sel->ips_remote_port = *ports++;
2805 2807 sel->ips_local_port = *ports;
2806 2808 }
2807 2809 ipsec_freemsg_chain(spare_mp);
2808 2810 return (SELRET_SUCCESS);
2809 2811 }
2810 2812
2811 2813 /*
2812 2814 * This is called with a b_next chain of messages from the fragcache code,
2813 2815 * hence it needs to discard a chain on error.
2814 2816 */
2815 2817 static boolean_t
2816 2818 ipsec_init_outbound_ports(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2817 2819 ip6_t *ip6h, int outer_hdr_len, ipsec_stack_t *ipss)
2818 2820 {
2819 2821 /*
2820 2822 * XXX cut&paste shared with ipsec_init_inbound_sel
2821 2823 */
2822 2824 uint16_t *ports;
2823 2825 ushort_t hdr_len;
2824 2826 mblk_t *spare_mp = NULL;
2825 2827 uint8_t *nexthdrp;
2826 2828 uint8_t nexthdr;
2827 2829 uint8_t *typecode;
2828 2830 uint8_t check_proto;
2829 2831
2830 2832 ASSERT((ipha == NULL && ip6h != NULL) ||
2831 2833 (ipha != NULL && ip6h == NULL));
2832 2834
2833 2835 if (ip6h != NULL) {
2834 2836 check_proto = IPPROTO_ICMPV6;
2835 2837 nexthdr = ip6h->ip6_nxt;
2836 2838 switch (nexthdr) {
2837 2839 case IPPROTO_HOPOPTS:
2838 2840 case IPPROTO_ROUTING:
2839 2841 case IPPROTO_DSTOPTS:
2840 2842 case IPPROTO_FRAGMENT:
2841 2843 /*
2842 2844 * Use ip_hdr_length_nexthdr_v6(). And have a spare
2843 2845 * mblk that's contiguous to feed it
2844 2846 */
2845 2847 spare_mp = msgpullup(mp, -1);
2846 2848 if (spare_mp == NULL ||
2847 2849 !ip_hdr_length_nexthdr_v6(spare_mp,
2848 2850 (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2849 2851 &hdr_len, &nexthdrp)) {
2850 2852 /* Always works, even if NULL. */
2851 2853 ipsec_freemsg_chain(spare_mp);
2852 2854 ip_drop_packet_chain(mp, B_FALSE, NULL,
2853 2855 DROPPER(ipss, ipds_spd_nomem),
2854 2856 &ipss->ipsec_spd_dropper);
2855 2857 return (B_FALSE);
2856 2858 } else {
2857 2859 nexthdr = *nexthdrp;
2858 2860 /* We can just extract based on hdr_len now. */
2859 2861 }
2860 2862 break;
2861 2863 default:
2862 2864 hdr_len = IPV6_HDR_LEN;
2863 2865 break;
2864 2866 }
2865 2867 } else {
2866 2868 check_proto = IPPROTO_ICMP;
2867 2869 hdr_len = IPH_HDR_LENGTH(ipha);
2868 2870 nexthdr = ipha->ipha_protocol;
2869 2871 }
2870 2872
2871 2873 sel->ips_protocol = nexthdr;
2872 2874 if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2873 2875 nexthdr != IPPROTO_SCTP && nexthdr != check_proto) {
2874 2876 sel->ips_local_port = sel->ips_remote_port = 0;
2875 2877 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2876 2878 return (B_TRUE);
2877 2879 }
2878 2880
2879 2881 if (&mp->b_rptr[hdr_len] + 4 + outer_hdr_len > mp->b_wptr) {
2880 2882 /* If we didn't pullup a copy already, do so now. */
2881 2883 /*
2882 2884 * XXX performance, will upper-layers frequently split TCP/UDP
2883 2885 * apart from IP or options? If so, perhaps we should revisit
2884 2886 * the spare_mp strategy.
2885 2887 *
2886 2888 * XXX should this be msgpullup(mp, hdr_len+4) ???
2887 2889 */
2888 2890 if (spare_mp == NULL &&
2889 2891 (spare_mp = msgpullup(mp, -1)) == NULL) {
2890 2892 ip_drop_packet_chain(mp, B_FALSE, NULL,
2891 2893 DROPPER(ipss, ipds_spd_nomem),
2892 2894 &ipss->ipsec_spd_dropper);
2893 2895 return (B_FALSE);
2894 2896 }
2895 2897 ports = (uint16_t *)&spare_mp->b_rptr[hdr_len + outer_hdr_len];
2896 2898 } else {
2897 2899 ports = (uint16_t *)&mp->b_rptr[hdr_len + outer_hdr_len];
2898 2900 }
2899 2901
2900 2902 if (nexthdr == check_proto) {
2901 2903 typecode = (uint8_t *)ports;
2902 2904 sel->ips_icmp_type = *typecode++;
2903 2905 sel->ips_icmp_code = *typecode;
2904 2906 sel->ips_remote_port = sel->ips_local_port = 0;
2905 2907 } else {
2906 2908 sel->ips_local_port = *ports++;
2907 2909 sel->ips_remote_port = *ports;
2908 2910 }
2909 2911 ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2910 2912 return (B_TRUE);
2911 2913 }
2912 2914
2913 2915 /*
2914 2916 * Prepend an mblk with a ipsec_crypto_t to the message chain.
2915 2917 * Frees the argument and returns NULL should the allocation fail.
2916 2918 * Returns the pointer to the crypto data part.
2917 2919 */
2918 2920 mblk_t *
2919 2921 ipsec_add_crypto_data(mblk_t *data_mp, ipsec_crypto_t **icp)
2920 2922 {
2921 2923 mblk_t *mp;
2922 2924
2923 2925 mp = allocb(sizeof (ipsec_crypto_t), BPRI_MED);
2924 2926 if (mp == NULL) {
2925 2927 freemsg(data_mp);
2926 2928 return (NULL);
2927 2929 }
2928 2930 bzero(mp->b_rptr, sizeof (ipsec_crypto_t));
2929 2931 mp->b_wptr += sizeof (ipsec_crypto_t);
2930 2932 mp->b_cont = data_mp;
2931 2933 mp->b_datap->db_type = M_EVENT; /* For ASSERT */
2932 2934 *icp = (ipsec_crypto_t *)mp->b_rptr;
2933 2935 return (mp);
2934 2936 }
2935 2937
2936 2938 /*
2937 2939 * Remove what was prepended above. Return b_cont and a pointer to the
2938 2940 * crypto data.
2939 2941 * The caller must call ipsec_free_crypto_data for mblk once it is done
2940 2942 * with the crypto data.
2941 2943 */
2942 2944 mblk_t *
2943 2945 ipsec_remove_crypto_data(mblk_t *crypto_mp, ipsec_crypto_t **icp)
2944 2946 {
2945 2947 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2946 2948 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2947 2949
2948 2950 *icp = (ipsec_crypto_t *)crypto_mp->b_rptr;
2949 2951 return (crypto_mp->b_cont);
2950 2952 }
2951 2953
2952 2954 /*
2953 2955 * Free what was prepended above. Return b_cont.
2954 2956 */
2955 2957 mblk_t *
2956 2958 ipsec_free_crypto_data(mblk_t *crypto_mp)
2957 2959 {
2958 2960 mblk_t *mp;
2959 2961
2960 2962 ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2961 2963 ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2962 2964
2963 2965 mp = crypto_mp->b_cont;
2964 2966 freeb(crypto_mp);
2965 2967 return (mp);
2966 2968 }
2967 2969
2968 2970 /*
2969 2971 * Create an ipsec_action_t based on the way an inbound packet was protected.
2970 2972 * Used to reflect traffic back to a sender.
2971 2973 *
2972 2974 * We don't bother interning the action into the hash table.
2973 2975 */
2974 2976 ipsec_action_t *
2975 2977 ipsec_in_to_out_action(ip_recv_attr_t *ira)
2976 2978 {
2977 2979 ipsa_t *ah_assoc, *esp_assoc;
2978 2980 uint_t auth_alg = 0, encr_alg = 0, espa_alg = 0;
2979 2981 ipsec_action_t *ap;
2980 2982 boolean_t unique;
2981 2983
2982 2984 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
2983 2985
2984 2986 if (ap == NULL)
2985 2987 return (NULL);
2986 2988
2987 2989 bzero(ap, sizeof (*ap));
2988 2990 HASH_NULL(ap, ipa_hash);
2989 2991 ap->ipa_next = NULL;
2990 2992 ap->ipa_refs = 1;
2991 2993
2992 2994 /*
2993 2995 * Get the algorithms that were used for this packet.
2994 2996 */
2995 2997 ap->ipa_act.ipa_type = IPSEC_ACT_APPLY;
2996 2998 ap->ipa_act.ipa_log = 0;
2997 2999 ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2998 3000
2999 3001 ah_assoc = ira->ira_ipsec_ah_sa;
3000 3002 ap->ipa_act.ipa_apply.ipp_use_ah = (ah_assoc != NULL);
3001 3003
3002 3004 esp_assoc = ira->ira_ipsec_esp_sa;
3003 3005 ap->ipa_act.ipa_apply.ipp_use_esp = (esp_assoc != NULL);
3004 3006
3005 3007 if (esp_assoc != NULL) {
3006 3008 encr_alg = esp_assoc->ipsa_encr_alg;
3007 3009 espa_alg = esp_assoc->ipsa_auth_alg;
3008 3010 ap->ipa_act.ipa_apply.ipp_use_espa = (espa_alg != 0);
3009 3011 }
3010 3012 if (ah_assoc != NULL)
3011 3013 auth_alg = ah_assoc->ipsa_auth_alg;
3012 3014
3013 3015 ap->ipa_act.ipa_apply.ipp_encr_alg = (uint8_t)encr_alg;
3014 3016 ap->ipa_act.ipa_apply.ipp_auth_alg = (uint8_t)auth_alg;
3015 3017 ap->ipa_act.ipa_apply.ipp_esp_auth_alg = (uint8_t)espa_alg;
3016 3018 ap->ipa_act.ipa_apply.ipp_use_se =
3017 3019 !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3018 3020 unique = B_FALSE;
3019 3021
3020 3022 if (esp_assoc != NULL) {
3021 3023 ap->ipa_act.ipa_apply.ipp_espa_minbits =
3022 3024 esp_assoc->ipsa_authkeybits;
3023 3025 ap->ipa_act.ipa_apply.ipp_espa_maxbits =
3024 3026 esp_assoc->ipsa_authkeybits;
3025 3027 ap->ipa_act.ipa_apply.ipp_espe_minbits =
3026 3028 esp_assoc->ipsa_encrkeybits;
3027 3029 ap->ipa_act.ipa_apply.ipp_espe_maxbits =
3028 3030 esp_assoc->ipsa_encrkeybits;
3029 3031 ap->ipa_act.ipa_apply.ipp_km_proto = esp_assoc->ipsa_kmp;
3030 3032 ap->ipa_act.ipa_apply.ipp_km_cookie = esp_assoc->ipsa_kmc;
3031 3033 if (esp_assoc->ipsa_flags & IPSA_F_UNIQUE)
3032 3034 unique = B_TRUE;
3033 3035 }
3034 3036 if (ah_assoc != NULL) {
3035 3037 ap->ipa_act.ipa_apply.ipp_ah_minbits =
3036 3038 ah_assoc->ipsa_authkeybits;
3037 3039 ap->ipa_act.ipa_apply.ipp_ah_maxbits =
3038 3040 ah_assoc->ipsa_authkeybits;
3039 3041 ap->ipa_act.ipa_apply.ipp_km_proto = ah_assoc->ipsa_kmp;
3040 3042 ap->ipa_act.ipa_apply.ipp_km_cookie = ah_assoc->ipsa_kmc;
3041 3043 if (ah_assoc->ipsa_flags & IPSA_F_UNIQUE)
3042 3044 unique = B_TRUE;
3043 3045 }
3044 3046 ap->ipa_act.ipa_apply.ipp_use_unique = unique;
3045 3047 ap->ipa_want_unique = unique;
3046 3048 ap->ipa_allow_clear = B_FALSE;
3047 3049 ap->ipa_want_se = !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3048 3050 ap->ipa_want_ah = (ah_assoc != NULL);
3049 3051 ap->ipa_want_esp = (esp_assoc != NULL);
3050 3052
3051 3053 ap->ipa_ovhd = ipsec_act_ovhd(&ap->ipa_act);
3052 3054
3053 3055 ap->ipa_act.ipa_apply.ipp_replay_depth = 0; /* don't care */
3054 3056
3055 3057 return (ap);
3056 3058 }
3057 3059
3058 3060
3059 3061 /*
3060 3062 * Compute the worst-case amount of extra space required by an action.
3061 3063 * Note that, because of the ESP considerations listed below, this is
3062 3064 * actually not the same as the best-case reduction in the MTU; in the
3063 3065 * future, we should pass additional information to this function to
3064 3066 * allow the actual MTU impact to be computed.
3065 3067 *
3066 3068 * AH: Revisit this if we implement algorithms with
3067 3069 * a verifier size of more than 12 bytes.
3068 3070 *
3069 3071 * ESP: A more exact but more messy computation would take into
3070 3072 * account the interaction between the cipher block size and the
3071 3073 * effective MTU, yielding the inner payload size which reflects a
3072 3074 * packet with *minimum* ESP padding..
3073 3075 */
3074 3076 int32_t
3075 3077 ipsec_act_ovhd(const ipsec_act_t *act)
3076 3078 {
3077 3079 int32_t overhead = 0;
3078 3080
3079 3081 if (act->ipa_type == IPSEC_ACT_APPLY) {
3080 3082 const ipsec_prot_t *ipp = &act->ipa_apply;
3081 3083
3082 3084 if (ipp->ipp_use_ah)
3083 3085 overhead += IPSEC_MAX_AH_HDR_SIZE;
3084 3086 if (ipp->ipp_use_esp) {
3085 3087 overhead += IPSEC_MAX_ESP_HDR_SIZE;
3086 3088 overhead += sizeof (struct udphdr);
3087 3089 }
3088 3090 if (ipp->ipp_use_se)
3089 3091 overhead += IP_SIMPLE_HDR_LENGTH;
3090 3092 }
3091 3093 return (overhead);
3092 3094 }
3093 3095
3094 3096 /*
3095 3097 * This hash function is used only when creating policies and thus is not
3096 3098 * performance-critical for packet flows.
3097 3099 *
3098 3100 * Future work: canonicalize the structures hashed with this (i.e.,
3099 3101 * zeroize padding) so the hash works correctly.
3100 3102 */
3101 3103 /* ARGSUSED */
3102 3104 static uint32_t
3103 3105 policy_hash(int size, const void *start, const void *end)
3104 3106 {
3105 3107 return (0);
3106 3108 }
3107 3109
3108 3110
3109 3111 /*
3110 3112 * Hash function macros for each address type.
3111 3113 *
3112 3114 * The IPV6 hash function assumes that the low order 32-bits of the
3113 3115 * address (typically containing the low order 24 bits of the mac
3114 3116 * address) are reasonably well-distributed. Revisit this if we run
3115 3117 * into trouble from lots of collisions on ::1 addresses and the like
3116 3118 * (seems unlikely).
3117 3119 */
3118 3120 #define IPSEC_IPV4_HASH(a, n) ((a) % (n))
3119 3121 #define IPSEC_IPV6_HASH(a, n) (((a).s6_addr32[3]) % (n))
3120 3122
3121 3123 /*
3122 3124 * These two hash functions should produce coordinated values
3123 3125 * but have slightly different roles.
3124 3126 */
3125 3127 static uint32_t
3126 3128 selkey_hash(const ipsec_selkey_t *selkey, netstack_t *ns)
3127 3129 {
3128 3130 uint32_t valid = selkey->ipsl_valid;
3129 3131 ipsec_stack_t *ipss = ns->netstack_ipsec;
3130 3132
3131 3133 if (!(valid & IPSL_REMOTE_ADDR))
3132 3134 return (IPSEC_SEL_NOHASH);
3133 3135
3134 3136 if (valid & IPSL_IPV4) {
3135 3137 if (selkey->ipsl_remote_pfxlen == 32) {
3136 3138 return (IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3137 3139 ipss->ipsec_spd_hashsize));
3138 3140 }
3139 3141 }
3140 3142 if (valid & IPSL_IPV6) {
3141 3143 if (selkey->ipsl_remote_pfxlen == 128) {
3142 3144 return (IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3143 3145 ipss->ipsec_spd_hashsize));
3144 3146 }
3145 3147 }
3146 3148 return (IPSEC_SEL_NOHASH);
3147 3149 }
3148 3150
3149 3151 static uint32_t
3150 3152 selector_hash(ipsec_selector_t *sel, ipsec_policy_root_t *root)
3151 3153 {
3152 3154 if (sel->ips_isv4) {
3153 3155 return (IPSEC_IPV4_HASH(sel->ips_remote_addr_v4,
3154 3156 root->ipr_nchains));
3155 3157 }
3156 3158 return (IPSEC_IPV6_HASH(sel->ips_remote_addr_v6, root->ipr_nchains));
3157 3159 }
3158 3160
3159 3161 /*
3160 3162 * Intern actions into the action hash table.
3161 3163 */
3162 3164 ipsec_action_t *
3163 3165 ipsec_act_find(const ipsec_act_t *a, int n, netstack_t *ns)
3164 3166 {
3165 3167 int i;
3166 3168 uint32_t hval;
3167 3169 ipsec_action_t *ap;
3168 3170 ipsec_action_t *prev = NULL;
3169 3171 int32_t overhead, maxovhd = 0;
3170 3172 boolean_t allow_clear = B_FALSE;
3171 3173 boolean_t want_ah = B_FALSE;
3172 3174 boolean_t want_esp = B_FALSE;
3173 3175 boolean_t want_se = B_FALSE;
3174 3176 boolean_t want_unique = B_FALSE;
3175 3177 ipsec_stack_t *ipss = ns->netstack_ipsec;
3176 3178
3177 3179 /*
3178 3180 * TODO: should canonicalize a[] (i.e., zeroize any padding)
3179 3181 * so we can use a non-trivial policy_hash function.
3180 3182 */
3181 3183 for (i = n-1; i >= 0; i--) {
3182 3184 hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]);
3183 3185
3184 3186 HASH_LOCK(ipss->ipsec_action_hash, hval);
3185 3187
3186 3188 for (HASH_ITERATE(ap, ipa_hash,
3187 3189 ipss->ipsec_action_hash, hval)) {
3188 3190 if (bcmp(&ap->ipa_act, &a[i], sizeof (*a)) != 0)
3189 3191 continue;
3190 3192 if (ap->ipa_next != prev)
3191 3193 continue;
3192 3194 break;
3193 3195 }
3194 3196 if (ap != NULL) {
3195 3197 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3196 3198 prev = ap;
3197 3199 continue;
3198 3200 }
3199 3201 /*
3200 3202 * need to allocate a new one..
3201 3203 */
3202 3204 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
3203 3205 if (ap == NULL) {
3204 3206 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3205 3207 if (prev != NULL)
3206 3208 ipsec_action_free(prev);
3207 3209 return (NULL);
3208 3210 }
3209 3211 HASH_INSERT(ap, ipa_hash, ipss->ipsec_action_hash, hval);
3210 3212
3211 3213 ap->ipa_next = prev;
3212 3214 ap->ipa_act = a[i];
3213 3215
3214 3216 overhead = ipsec_act_ovhd(&a[i]);
3215 3217 if (maxovhd < overhead)
3216 3218 maxovhd = overhead;
3217 3219
3218 3220 if ((a[i].ipa_type == IPSEC_ACT_BYPASS) ||
3219 3221 (a[i].ipa_type == IPSEC_ACT_CLEAR))
3220 3222 allow_clear = B_TRUE;
3221 3223 if (a[i].ipa_type == IPSEC_ACT_APPLY) {
3222 3224 const ipsec_prot_t *ipp = &a[i].ipa_apply;
3223 3225
3224 3226 ASSERT(ipp->ipp_use_ah || ipp->ipp_use_esp);
3225 3227 want_ah |= ipp->ipp_use_ah;
3226 3228 want_esp |= ipp->ipp_use_esp;
3227 3229 want_se |= ipp->ipp_use_se;
3228 3230 want_unique |= ipp->ipp_use_unique;
3229 3231 }
3230 3232 ap->ipa_allow_clear = allow_clear;
3231 3233 ap->ipa_want_ah = want_ah;
3232 3234 ap->ipa_want_esp = want_esp;
3233 3235 ap->ipa_want_se = want_se;
3234 3236 ap->ipa_want_unique = want_unique;
3235 3237 ap->ipa_refs = 1; /* from the hash table */
3236 3238 ap->ipa_ovhd = maxovhd;
3237 3239 if (prev)
3238 3240 prev->ipa_refs++;
3239 3241 prev = ap;
3240 3242 HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3241 3243 }
3242 3244
3243 3245 ap->ipa_refs++; /* caller's reference */
3244 3246
3245 3247 return (ap);
3246 3248 }
3247 3249
3248 3250 /*
3249 3251 * Called when refcount goes to 0, indicating that all references to this
3250 3252 * node are gone.
3251 3253 *
3252 3254 * This does not unchain the action from the hash table.
3253 3255 */
3254 3256 void
3255 3257 ipsec_action_free(ipsec_action_t *ap)
3256 3258 {
3257 3259 for (;;) {
3258 3260 ipsec_action_t *np = ap->ipa_next;
3259 3261 ASSERT(ap->ipa_refs == 0);
3260 3262 ASSERT(ap->ipa_hash.hash_pp == NULL);
3261 3263 kmem_cache_free(ipsec_action_cache, ap);
3262 3264 ap = np;
3263 3265 /* Inlined IPACT_REFRELE -- avoid recursion */
3264 3266 if (ap == NULL)
3265 3267 break;
3266 3268 membar_exit();
3267 3269 if (atomic_dec_32_nv(&(ap)->ipa_refs) != 0)
3268 3270 break;
3269 3271 /* End inlined IPACT_REFRELE */
3270 3272 }
3271 3273 }
3272 3274
3273 3275 /*
3274 3276 * Called when the action hash table goes away.
3275 3277 *
3276 3278 * The actions can be queued on an mblk with ipsec_in or
3277 3279 * ipsec_out, hence the actions might still be around.
3278 3280 * But we decrement ipa_refs here since we no longer have
3279 3281 * a reference to the action from the hash table.
3280 3282 */
3281 3283 static void
3282 3284 ipsec_action_free_table(ipsec_action_t *ap)
3283 3285 {
3284 3286 while (ap != NULL) {
3285 3287 ipsec_action_t *np = ap->ipa_next;
3286 3288
3287 3289 /* FIXME: remove? */
3288 3290 (void) printf("ipsec_action_free_table(%p) ref %d\n",
3289 3291 (void *)ap, ap->ipa_refs);
3290 3292 ASSERT(ap->ipa_refs > 0);
3291 3293 IPACT_REFRELE(ap);
3292 3294 ap = np;
3293 3295 }
3294 3296 }
3295 3297
3296 3298 /*
3297 3299 * Need to walk all stack instances since the reclaim function
3298 3300 * is global for all instances
3299 3301 */
3300 3302 /* ARGSUSED */
3301 3303 static void
3302 3304 ipsec_action_reclaim(void *arg)
3303 3305 {
3304 3306 netstack_handle_t nh;
3305 3307 netstack_t *ns;
3306 3308 ipsec_stack_t *ipss;
3307 3309
3308 3310 netstack_next_init(&nh);
3309 3311 while ((ns = netstack_next(&nh)) != NULL) {
3310 3312 /*
3311 3313 * netstack_next() can return a netstack_t with a NULL
3312 3314 * netstack_ipsec at boot time.
3313 3315 */
3314 3316 if ((ipss = ns->netstack_ipsec) == NULL) {
3315 3317 netstack_rele(ns);
3316 3318 continue;
3317 3319 }
3318 3320 ipsec_action_reclaim_stack(ipss);
3319 3321 netstack_rele(ns);
3320 3322 }
3321 3323 netstack_next_fini(&nh);
3322 3324 }
3323 3325
3324 3326 /*
3325 3327 * Periodically sweep action hash table for actions with refcount==1, and
3326 3328 * nuke them. We cannot do this "on demand" (i.e., from IPACT_REFRELE)
3327 3329 * because we can't close the race between another thread finding the action
3328 3330 * in the hash table without holding the bucket lock during IPACT_REFRELE.
3329 3331 * Instead, we run this function sporadically to clean up after ourselves;
3330 3332 * we also set it as the "reclaim" function for the action kmem_cache.
3331 3333 *
3332 3334 * Note that it may take several passes of ipsec_action_gc() to free all
3333 3335 * "stale" actions.
3334 3336 */
3335 3337 static void
3336 3338 ipsec_action_reclaim_stack(ipsec_stack_t *ipss)
3337 3339 {
3338 3340 int i;
3339 3341
3340 3342 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
3341 3343 ipsec_action_t *ap, *np;
3342 3344
3343 3345 /* skip the lock if nobody home */
3344 3346 if (ipss->ipsec_action_hash[i].hash_head == NULL)
3345 3347 continue;
3346 3348
3347 3349 HASH_LOCK(ipss->ipsec_action_hash, i);
3348 3350 for (ap = ipss->ipsec_action_hash[i].hash_head;
3349 3351 ap != NULL; ap = np) {
3350 3352 ASSERT(ap->ipa_refs > 0);
3351 3353 np = ap->ipa_hash.hash_next;
3352 3354 if (ap->ipa_refs > 1)
3353 3355 continue;
3354 3356 HASH_UNCHAIN(ap, ipa_hash,
3355 3357 ipss->ipsec_action_hash, i);
3356 3358 IPACT_REFRELE(ap);
3357 3359 }
3358 3360 HASH_UNLOCK(ipss->ipsec_action_hash, i);
3359 3361 }
3360 3362 }
3361 3363
3362 3364 /*
3363 3365 * Intern a selector set into the selector set hash table.
3364 3366 * This is simpler than the actions case..
3365 3367 */
3366 3368 static ipsec_sel_t *
3367 3369 ipsec_find_sel(ipsec_selkey_t *selkey, netstack_t *ns)
3368 3370 {
3369 3371 ipsec_sel_t *sp;
3370 3372 uint32_t hval, bucket;
3371 3373 ipsec_stack_t *ipss = ns->netstack_ipsec;
3372 3374
3373 3375 /*
3374 3376 * Exactly one AF bit should be set in selkey.
3375 3377 */
3376 3378 ASSERT(!(selkey->ipsl_valid & IPSL_IPV4) ^
3377 3379 !(selkey->ipsl_valid & IPSL_IPV6));
3378 3380
3379 3381 hval = selkey_hash(selkey, ns);
3380 3382 /* Set pol_hval to uninitialized until we put it in a polhead. */
3381 3383 selkey->ipsl_sel_hval = hval;
3382 3384
3383 3385 bucket = (hval == IPSEC_SEL_NOHASH) ? 0 : hval;
3384 3386
3385 3387 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, bucket));
3386 3388 HASH_LOCK(ipss->ipsec_sel_hash, bucket);
3387 3389
3388 3390 for (HASH_ITERATE(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket)) {
3389 3391 if (bcmp(&sp->ipsl_key, selkey,
3390 3392 offsetof(ipsec_selkey_t, ipsl_pol_hval)) == 0)
3391 3393 break;
3392 3394 }
3393 3395 if (sp != NULL) {
3394 3396 sp->ipsl_refs++;
3395 3397
3396 3398 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3397 3399 return (sp);
3398 3400 }
3399 3401
3400 3402 sp = kmem_cache_alloc(ipsec_sel_cache, KM_NOSLEEP);
3401 3403 if (sp == NULL) {
3402 3404 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3403 3405 return (NULL);
3404 3406 }
3405 3407
3406 3408 HASH_INSERT(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket);
3407 3409 sp->ipsl_refs = 2; /* one for hash table, one for caller */
3408 3410 sp->ipsl_key = *selkey;
3409 3411 /* Set to uninitalized and have insertion into polhead fix things. */
3410 3412 if (selkey->ipsl_sel_hval != IPSEC_SEL_NOHASH)
3411 3413 sp->ipsl_key.ipsl_pol_hval = 0;
3412 3414 else
3413 3415 sp->ipsl_key.ipsl_pol_hval = IPSEC_SEL_NOHASH;
3414 3416
3415 3417 HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3416 3418
3417 3419 return (sp);
3418 3420 }
3419 3421
3420 3422 static void
3421 3423 ipsec_sel_rel(ipsec_sel_t **spp, netstack_t *ns)
3422 3424 {
3423 3425 ipsec_sel_t *sp = *spp;
3424 3426 int hval = sp->ipsl_key.ipsl_sel_hval;
3425 3427 ipsec_stack_t *ipss = ns->netstack_ipsec;
3426 3428
3427 3429 *spp = NULL;
3428 3430
3429 3431 if (hval == IPSEC_SEL_NOHASH)
3430 3432 hval = 0;
3431 3433
3432 3434 ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, hval));
3433 3435 HASH_LOCK(ipss->ipsec_sel_hash, hval);
3434 3436 if (--sp->ipsl_refs == 1) {
3435 3437 HASH_UNCHAIN(sp, ipsl_hash, ipss->ipsec_sel_hash, hval);
3436 3438 sp->ipsl_refs--;
3437 3439 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3438 3440 ASSERT(sp->ipsl_refs == 0);
3439 3441 kmem_cache_free(ipsec_sel_cache, sp);
3440 3442 /* Caller unlocks */
3441 3443 return;
3442 3444 }
3443 3445
3444 3446 HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3445 3447 }
3446 3448
3447 3449 /*
3448 3450 * Free a policy rule which we know is no longer being referenced.
3449 3451 */
3450 3452 void
3451 3453 ipsec_policy_free(ipsec_policy_t *ipp)
3452 3454 {
3453 3455 ASSERT(ipp->ipsp_refs == 0);
3454 3456 ASSERT(ipp->ipsp_sel != NULL);
3455 3457 ASSERT(ipp->ipsp_act != NULL);
3456 3458 ASSERT(ipp->ipsp_netstack != NULL);
3457 3459
3458 3460 ipsec_sel_rel(&ipp->ipsp_sel, ipp->ipsp_netstack);
3459 3461 IPACT_REFRELE(ipp->ipsp_act);
3460 3462 kmem_cache_free(ipsec_pol_cache, ipp);
3461 3463 }
3462 3464
3463 3465 /*
3464 3466 * Construction of new policy rules; construct a policy, and add it to
3465 3467 * the appropriate tables.
3466 3468 */
3467 3469 ipsec_policy_t *
3468 3470 ipsec_policy_create(ipsec_selkey_t *keys, const ipsec_act_t *a,
3469 3471 int nacts, int prio, uint64_t *index_ptr, netstack_t *ns)
3470 3472 {
3471 3473 ipsec_action_t *ap;
3472 3474 ipsec_sel_t *sp;
3473 3475 ipsec_policy_t *ipp;
3474 3476 ipsec_stack_t *ipss = ns->netstack_ipsec;
3475 3477
3476 3478 if (index_ptr == NULL)
3477 3479 index_ptr = &ipss->ipsec_next_policy_index;
3478 3480
3479 3481 ipp = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
3480 3482 ap = ipsec_act_find(a, nacts, ns);
3481 3483 sp = ipsec_find_sel(keys, ns);
3482 3484
3483 3485 if ((ap == NULL) || (sp == NULL) || (ipp == NULL)) {
3484 3486 if (ap != NULL) {
3485 3487 IPACT_REFRELE(ap);
3486 3488 }
3487 3489 if (sp != NULL)
3488 3490 ipsec_sel_rel(&sp, ns);
3489 3491 if (ipp != NULL)
3490 3492 kmem_cache_free(ipsec_pol_cache, ipp);
3491 3493 return (NULL);
3492 3494 }
3493 3495
3494 3496 HASH_NULL(ipp, ipsp_hash);
3495 3497
3496 3498 ipp->ipsp_netstack = ns; /* Needed for ipsec_policy_free */
3497 3499 ipp->ipsp_refs = 1; /* caller's reference */
3498 3500 ipp->ipsp_sel = sp;
3499 3501 ipp->ipsp_act = ap;
3500 3502 ipp->ipsp_prio = prio; /* rule priority */
3501 3503 ipp->ipsp_index = *index_ptr;
3502 3504 (*index_ptr)++;
3503 3505
3504 3506 return (ipp);
3505 3507 }
3506 3508
3507 3509 static void
3508 3510 ipsec_update_present_flags(ipsec_stack_t *ipss)
3509 3511 {
3510 3512 boolean_t hashpol;
3511 3513
3512 3514 hashpol = (avl_numnodes(&ipss->ipsec_system_policy.iph_rulebyid) > 0);
3513 3515
3514 3516 if (hashpol) {
3515 3517 ipss->ipsec_outbound_v4_policy_present = B_TRUE;
3516 3518 ipss->ipsec_outbound_v6_policy_present = B_TRUE;
3517 3519 ipss->ipsec_inbound_v4_policy_present = B_TRUE;
3518 3520 ipss->ipsec_inbound_v6_policy_present = B_TRUE;
3519 3521 return;
3520 3522 }
3521 3523
3522 3524 ipss->ipsec_outbound_v4_policy_present = (NULL !=
3523 3525 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3524 3526 ipr_nonhash[IPSEC_AF_V4]);
3525 3527 ipss->ipsec_outbound_v6_policy_present = (NULL !=
3526 3528 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3527 3529 ipr_nonhash[IPSEC_AF_V6]);
3528 3530 ipss->ipsec_inbound_v4_policy_present = (NULL !=
3529 3531 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3530 3532 ipr_nonhash[IPSEC_AF_V4]);
3531 3533 ipss->ipsec_inbound_v6_policy_present = (NULL !=
3532 3534 ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3533 3535 ipr_nonhash[IPSEC_AF_V6]);
3534 3536 }
3535 3537
3536 3538 boolean_t
3537 3539 ipsec_policy_delete(ipsec_policy_head_t *php, ipsec_selkey_t *keys, int dir,
3538 3540 netstack_t *ns)
3539 3541 {
3540 3542 ipsec_sel_t *sp;
3541 3543 ipsec_policy_t *ip, *nip, *head;
3542 3544 int af;
3543 3545 ipsec_policy_root_t *pr = &php->iph_root[dir];
3544 3546
3545 3547 sp = ipsec_find_sel(keys, ns);
3546 3548
3547 3549 if (sp == NULL)
3548 3550 return (B_FALSE);
3549 3551
3550 3552 af = (sp->ipsl_key.ipsl_valid & IPSL_IPV4) ? IPSEC_AF_V4 : IPSEC_AF_V6;
3551 3553
3552 3554 rw_enter(&php->iph_lock, RW_WRITER);
3553 3555
3554 3556 if (sp->ipsl_key.ipsl_pol_hval == IPSEC_SEL_NOHASH) {
3555 3557 head = pr->ipr_nonhash[af];
3556 3558 } else {
3557 3559 head = pr->ipr_hash[sp->ipsl_key.ipsl_pol_hval].hash_head;
3558 3560 }
3559 3561
3560 3562 for (ip = head; ip != NULL; ip = nip) {
3561 3563 nip = ip->ipsp_hash.hash_next;
3562 3564 if (ip->ipsp_sel != sp) {
3563 3565 continue;
3564 3566 }
3565 3567
3566 3568 IPPOL_UNCHAIN(php, ip);
3567 3569
3568 3570 php->iph_gen++;
3569 3571 ipsec_update_present_flags(ns->netstack_ipsec);
3570 3572
3571 3573 rw_exit(&php->iph_lock);
3572 3574
3573 3575 ipsec_sel_rel(&sp, ns);
3574 3576
3575 3577 return (B_TRUE);
3576 3578 }
3577 3579
3578 3580 rw_exit(&php->iph_lock);
3579 3581 ipsec_sel_rel(&sp, ns);
3580 3582 return (B_FALSE);
3581 3583 }
3582 3584
3583 3585 int
3584 3586 ipsec_policy_delete_index(ipsec_policy_head_t *php, uint64_t policy_index,
3585 3587 netstack_t *ns)
3586 3588 {
3587 3589 boolean_t found = B_FALSE;
3588 3590 ipsec_policy_t ipkey;
3589 3591 ipsec_policy_t *ip;
3590 3592 avl_index_t where;
3591 3593
3592 3594 bzero(&ipkey, sizeof (ipkey));
3593 3595 ipkey.ipsp_index = policy_index;
3594 3596
3595 3597 rw_enter(&php->iph_lock, RW_WRITER);
3596 3598
3597 3599 /*
3598 3600 * We could be cleverer here about the walk.
3599 3601 * but well, (k+1)*log(N) will do for now (k==number of matches,
3600 3602 * N==number of table entries
3601 3603 */
3602 3604 for (;;) {
3603 3605 ip = (ipsec_policy_t *)avl_find(&php->iph_rulebyid,
3604 3606 (void *)&ipkey, &where);
3605 3607 ASSERT(ip == NULL);
3606 3608
3607 3609 ip = avl_nearest(&php->iph_rulebyid, where, AVL_AFTER);
3608 3610
3609 3611 if (ip == NULL)
3610 3612 break;
3611 3613
3612 3614 if (ip->ipsp_index != policy_index) {
3613 3615 ASSERT(ip->ipsp_index > policy_index);
3614 3616 break;
3615 3617 }
3616 3618
3617 3619 IPPOL_UNCHAIN(php, ip);
3618 3620 found = B_TRUE;
3619 3621 }
3620 3622
3621 3623 if (found) {
3622 3624 php->iph_gen++;
3623 3625 ipsec_update_present_flags(ns->netstack_ipsec);
3624 3626 }
3625 3627
3626 3628 rw_exit(&php->iph_lock);
3627 3629
3628 3630 return (found ? 0 : ENOENT);
3629 3631 }
3630 3632
3631 3633 /*
3632 3634 * Given a constructed ipsec_policy_t policy rule, see if it can be entered
3633 3635 * into the correct policy ruleset. As a side-effect, it sets the hash
3634 3636 * entries on "ipp"'s ipsp_pol_hval.
3635 3637 *
3636 3638 * Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a
3637 3639 * duplicate policy exists with exactly the same selectors), or an icmp
3638 3640 * rule exists with a different encryption/authentication action.
3639 3641 */
3640 3642 boolean_t
3641 3643 ipsec_check_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction)
3642 3644 {
3643 3645 ipsec_policy_root_t *pr = &php->iph_root[direction];
3644 3646 int af = -1;
3645 3647 ipsec_policy_t *p2, *head;
3646 3648 uint8_t check_proto;
3647 3649 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3648 3650 uint32_t valid = selkey->ipsl_valid;
3649 3651
3650 3652 if (valid & IPSL_IPV6) {
3651 3653 ASSERT(!(valid & IPSL_IPV4));
3652 3654 af = IPSEC_AF_V6;
3653 3655 check_proto = IPPROTO_ICMPV6;
3654 3656 } else {
3655 3657 ASSERT(valid & IPSL_IPV4);
3656 3658 af = IPSEC_AF_V4;
3657 3659 check_proto = IPPROTO_ICMP;
3658 3660 }
3659 3661
3660 3662 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3661 3663
3662 3664 /*
3663 3665 * Double-check that we don't have any duplicate selectors here.
3664 3666 * Because selectors are interned below, we need only compare pointers
3665 3667 * for equality.
3666 3668 */
3667 3669 if (selkey->ipsl_sel_hval == IPSEC_SEL_NOHASH) {
3668 3670 head = pr->ipr_nonhash[af];
3669 3671 } else {
3670 3672 selkey->ipsl_pol_hval =
3671 3673 (selkey->ipsl_valid & IPSL_IPV4) ?
3672 3674 IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3673 3675 pr->ipr_nchains) :
3674 3676 IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3675 3677 pr->ipr_nchains);
3676 3678
3677 3679 head = pr->ipr_hash[selkey->ipsl_pol_hval].hash_head;
3678 3680 }
3679 3681
3680 3682 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3681 3683 if (p2->ipsp_sel == ipp->ipsp_sel)
3682 3684 return (B_FALSE);
3683 3685 }
3684 3686
3685 3687 /*
3686 3688 * If it's ICMP and not a drop or pass rule, run through the ICMP
3687 3689 * rules and make sure the action is either new or the same as any
3688 3690 * other actions. We don't have to check the full chain because
3689 3691 * discard and bypass will override all other actions
3690 3692 */
3691 3693
3692 3694 if (valid & IPSL_PROTOCOL &&
3693 3695 selkey->ipsl_proto == check_proto &&
3694 3696 (ipp->ipsp_act->ipa_act.ipa_type == IPSEC_ACT_APPLY)) {
3695 3697
3696 3698 for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3697 3699
3698 3700 if (p2->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL &&
3699 3701 p2->ipsp_sel->ipsl_key.ipsl_proto == check_proto &&
3700 3702 (p2->ipsp_act->ipa_act.ipa_type ==
3701 3703 IPSEC_ACT_APPLY)) {
3702 3704 return (ipsec_compare_action(p2, ipp));
3703 3705 }
3704 3706 }
3705 3707 }
3706 3708
3707 3709 return (B_TRUE);
3708 3710 }
3709 3711
3710 3712 /*
3711 3713 * compare the action chains of two policies for equality
3712 3714 * B_TRUE -> effective equality
3713 3715 */
3714 3716
3715 3717 static boolean_t
3716 3718 ipsec_compare_action(ipsec_policy_t *p1, ipsec_policy_t *p2)
3717 3719 {
3718 3720
3719 3721 ipsec_action_t *act1, *act2;
3720 3722
3721 3723 /* We have a valid rule. Let's compare the actions */
3722 3724 if (p1->ipsp_act == p2->ipsp_act) {
3723 3725 /* same action. We are good */
3724 3726 return (B_TRUE);
3725 3727 }
3726 3728
3727 3729 /* we have to walk the chain */
3728 3730
3729 3731 act1 = p1->ipsp_act;
3730 3732 act2 = p2->ipsp_act;
3731 3733
3732 3734 while (act1 != NULL && act2 != NULL) {
3733 3735
3734 3736 /* otherwise, Are we close enough? */
3735 3737 if (act1->ipa_allow_clear != act2->ipa_allow_clear ||
3736 3738 act1->ipa_want_ah != act2->ipa_want_ah ||
3737 3739 act1->ipa_want_esp != act2->ipa_want_esp ||
3738 3740 act1->ipa_want_se != act2->ipa_want_se) {
3739 3741 /* Nope, we aren't */
3740 3742 return (B_FALSE);
3741 3743 }
3742 3744
3743 3745 if (act1->ipa_want_ah) {
3744 3746 if (act1->ipa_act.ipa_apply.ipp_auth_alg !=
3745 3747 act2->ipa_act.ipa_apply.ipp_auth_alg) {
3746 3748 return (B_FALSE);
3747 3749 }
3748 3750
3749 3751 if (act1->ipa_act.ipa_apply.ipp_ah_minbits !=
3750 3752 act2->ipa_act.ipa_apply.ipp_ah_minbits ||
3751 3753 act1->ipa_act.ipa_apply.ipp_ah_maxbits !=
3752 3754 act2->ipa_act.ipa_apply.ipp_ah_maxbits) {
3753 3755 return (B_FALSE);
3754 3756 }
3755 3757 }
3756 3758
3757 3759 if (act1->ipa_want_esp) {
3758 3760 if (act1->ipa_act.ipa_apply.ipp_use_esp !=
3759 3761 act2->ipa_act.ipa_apply.ipp_use_esp ||
3760 3762 act1->ipa_act.ipa_apply.ipp_use_espa !=
3761 3763 act2->ipa_act.ipa_apply.ipp_use_espa) {
3762 3764 return (B_FALSE);
3763 3765 }
3764 3766
3765 3767 if (act1->ipa_act.ipa_apply.ipp_use_esp) {
3766 3768 if (act1->ipa_act.ipa_apply.ipp_encr_alg !=
3767 3769 act2->ipa_act.ipa_apply.ipp_encr_alg) {
3768 3770 return (B_FALSE);
3769 3771 }
3770 3772
3771 3773 if (act1->ipa_act.ipa_apply.ipp_espe_minbits !=
3772 3774 act2->ipa_act.ipa_apply.ipp_espe_minbits ||
3773 3775 act1->ipa_act.ipa_apply.ipp_espe_maxbits !=
3774 3776 act2->ipa_act.ipa_apply.ipp_espe_maxbits) {
3775 3777 return (B_FALSE);
3776 3778 }
3777 3779 }
3778 3780
3779 3781 if (act1->ipa_act.ipa_apply.ipp_use_espa) {
3780 3782 if (act1->ipa_act.ipa_apply.ipp_esp_auth_alg !=
3781 3783 act2->ipa_act.ipa_apply.ipp_esp_auth_alg) {
3782 3784 return (B_FALSE);
3783 3785 }
3784 3786
3785 3787 if (act1->ipa_act.ipa_apply.ipp_espa_minbits !=
3786 3788 act2->ipa_act.ipa_apply.ipp_espa_minbits ||
3787 3789 act1->ipa_act.ipa_apply.ipp_espa_maxbits !=
3788 3790 act2->ipa_act.ipa_apply.ipp_espa_maxbits) {
3789 3791 return (B_FALSE);
3790 3792 }
3791 3793 }
3792 3794
3793 3795 }
3794 3796
3795 3797 act1 = act1->ipa_next;
3796 3798 act2 = act2->ipa_next;
3797 3799 }
3798 3800
3799 3801 if (act1 != NULL || act2 != NULL) {
3800 3802 return (B_FALSE);
3801 3803 }
3802 3804
3803 3805 return (B_TRUE);
3804 3806 }
3805 3807
3806 3808
3807 3809 /*
3808 3810 * Given a constructed ipsec_policy_t policy rule, enter it into
3809 3811 * the correct policy ruleset.
3810 3812 *
3811 3813 * ipsec_check_policy() is assumed to have succeeded first (to check for
3812 3814 * duplicates).
3813 3815 */
3814 3816 void
3815 3817 ipsec_enter_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction,
3816 3818 netstack_t *ns)
3817 3819 {
3818 3820 ipsec_policy_root_t *pr = &php->iph_root[direction];
3819 3821 ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3820 3822 uint32_t valid = selkey->ipsl_valid;
3821 3823 uint32_t hval = selkey->ipsl_pol_hval;
3822 3824 int af = -1;
3823 3825
3824 3826 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3825 3827
3826 3828 if (valid & IPSL_IPV6) {
3827 3829 ASSERT(!(valid & IPSL_IPV4));
3828 3830 af = IPSEC_AF_V6;
3829 3831 } else {
3830 3832 ASSERT(valid & IPSL_IPV4);
3831 3833 af = IPSEC_AF_V4;
3832 3834 }
3833 3835
3834 3836 php->iph_gen++;
3835 3837
3836 3838 if (hval == IPSEC_SEL_NOHASH) {
3837 3839 HASHLIST_INSERT(ipp, ipsp_hash, pr->ipr_nonhash[af]);
3838 3840 } else {
3839 3841 HASH_LOCK(pr->ipr_hash, hval);
3840 3842 HASH_INSERT(ipp, ipsp_hash, pr->ipr_hash, hval);
3841 3843 HASH_UNLOCK(pr->ipr_hash, hval);
3842 3844 }
3843 3845
3844 3846 ipsec_insert_always(&php->iph_rulebyid, ipp);
3845 3847
3846 3848 ipsec_update_present_flags(ns->netstack_ipsec);
3847 3849 }
3848 3850
3849 3851 static void
3850 3852 ipsec_ipr_flush(ipsec_policy_head_t *php, ipsec_policy_root_t *ipr)
3851 3853 {
3852 3854 ipsec_policy_t *ip, *nip;
3853 3855 int af, chain, nchain;
3854 3856
3855 3857 for (af = 0; af < IPSEC_NAF; af++) {
3856 3858 for (ip = ipr->ipr_nonhash[af]; ip != NULL; ip = nip) {
3857 3859 nip = ip->ipsp_hash.hash_next;
3858 3860 IPPOL_UNCHAIN(php, ip);
3859 3861 }
3860 3862 ipr->ipr_nonhash[af] = NULL;
3861 3863 }
3862 3864 nchain = ipr->ipr_nchains;
3863 3865
3864 3866 for (chain = 0; chain < nchain; chain++) {
3865 3867 for (ip = ipr->ipr_hash[chain].hash_head; ip != NULL;
3866 3868 ip = nip) {
3867 3869 nip = ip->ipsp_hash.hash_next;
3868 3870 IPPOL_UNCHAIN(php, ip);
3869 3871 }
3870 3872 ipr->ipr_hash[chain].hash_head = NULL;
3871 3873 }
3872 3874 }
3873 3875
3874 3876 /*
3875 3877 * Create and insert inbound or outbound policy associated with actp for the
3876 3878 * address family fam into the policy head ph. Returns B_TRUE if policy was
3877 3879 * inserted, and B_FALSE otherwise.
3878 3880 */
3879 3881 boolean_t
3880 3882 ipsec_polhead_insert(ipsec_policy_head_t *ph, ipsec_act_t *actp, uint_t nact,
3881 3883 int fam, int ptype, netstack_t *ns)
3882 3884 {
3883 3885 ipsec_selkey_t sel;
3884 3886 ipsec_policy_t *pol;
3885 3887 ipsec_policy_root_t *pr;
3886 3888
3887 3889 bzero(&sel, sizeof (sel));
3888 3890 sel.ipsl_valid = (fam == IPSEC_AF_V4 ? IPSL_IPV4 : IPSL_IPV6);
3889 3891 if ((pol = ipsec_policy_create(&sel, actp, nact, IPSEC_PRIO_SOCKET,
3890 3892 NULL, ns)) != NULL) {
3891 3893 pr = &ph->iph_root[ptype];
3892 3894 HASHLIST_INSERT(pol, ipsp_hash, pr->ipr_nonhash[fam]);
3893 3895 ipsec_insert_always(&ph->iph_rulebyid, pol);
3894 3896 }
3895 3897 return (pol != NULL);
3896 3898 }
3897 3899
3898 3900 void
3899 3901 ipsec_polhead_flush(ipsec_policy_head_t *php, netstack_t *ns)
3900 3902 {
3901 3903 int dir;
3902 3904
3903 3905 ASSERT(RW_WRITE_HELD(&php->iph_lock));
3904 3906
3905 3907 for (dir = 0; dir < IPSEC_NTYPES; dir++)
3906 3908 ipsec_ipr_flush(php, &php->iph_root[dir]);
3907 3909
3908 3910 php->iph_gen++;
3909 3911 ipsec_update_present_flags(ns->netstack_ipsec);
3910 3912 }
3911 3913
3912 3914 void
3913 3915 ipsec_polhead_free(ipsec_policy_head_t *php, netstack_t *ns)
3914 3916 {
3915 3917 int dir;
3916 3918
3917 3919 ASSERT(php->iph_refs == 0);
3918 3920
3919 3921 rw_enter(&php->iph_lock, RW_WRITER);
3920 3922 ipsec_polhead_flush(php, ns);
3921 3923 rw_exit(&php->iph_lock);
3922 3924 rw_destroy(&php->iph_lock);
3923 3925 for (dir = 0; dir < IPSEC_NTYPES; dir++) {
3924 3926 ipsec_policy_root_t *ipr = &php->iph_root[dir];
3925 3927 int chain;
3926 3928
3927 3929 for (chain = 0; chain < ipr->ipr_nchains; chain++)
3928 3930 mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
3929 3931
3930 3932 }
3931 3933 ipsec_polhead_free_table(php);
3932 3934 kmem_free(php, sizeof (*php));
3933 3935 }
3934 3936
3935 3937 static void
3936 3938 ipsec_ipr_init(ipsec_policy_root_t *ipr)
3937 3939 {
3938 3940 int af;
3939 3941
3940 3942 ipr->ipr_nchains = 0;
3941 3943 ipr->ipr_hash = NULL;
3942 3944
3943 3945 for (af = 0; af < IPSEC_NAF; af++) {
3944 3946 ipr->ipr_nonhash[af] = NULL;
3945 3947 }
3946 3948 }
3947 3949
3948 3950 ipsec_policy_head_t *
3949 3951 ipsec_polhead_create(void)
3950 3952 {
3951 3953 ipsec_policy_head_t *php;
3952 3954
3953 3955 php = kmem_alloc(sizeof (*php), KM_NOSLEEP);
3954 3956 if (php == NULL)
3955 3957 return (php);
3956 3958
3957 3959 rw_init(&php->iph_lock, NULL, RW_DEFAULT, NULL);
3958 3960 php->iph_refs = 1;
3959 3961 php->iph_gen = 0;
3960 3962
3961 3963 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_INBOUND]);
3962 3964 ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_OUTBOUND]);
3963 3965
3964 3966 avl_create(&php->iph_rulebyid, ipsec_policy_cmpbyid,
3965 3967 sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
3966 3968
3967 3969 return (php);
3968 3970 }
3969 3971
3970 3972 /*
3971 3973 * Clone the policy head into a new polhead; release one reference to the
3972 3974 * old one and return the only reference to the new one.
3973 3975 * If the old one had a refcount of 1, just return it.
3974 3976 */
3975 3977 ipsec_policy_head_t *
3976 3978 ipsec_polhead_split(ipsec_policy_head_t *php, netstack_t *ns)
3977 3979 {
3978 3980 ipsec_policy_head_t *nphp;
3979 3981
3980 3982 if (php == NULL)
3981 3983 return (ipsec_polhead_create());
3982 3984 else if (php->iph_refs == 1)
3983 3985 return (php);
3984 3986
3985 3987 nphp = ipsec_polhead_create();
3986 3988 if (nphp == NULL)
3987 3989 return (NULL);
3988 3990
3989 3991 if (ipsec_copy_polhead(php, nphp, ns) != 0) {
3990 3992 ipsec_polhead_free(nphp, ns);
3991 3993 return (NULL);
3992 3994 }
3993 3995 IPPH_REFRELE(php, ns);
3994 3996 return (nphp);
3995 3997 }
3996 3998
3997 3999 /*
3998 4000 * When sending a response to a ICMP request or generating a RST
3999 4001 * in the TCP case, the outbound packets need to go at the same level
4000 4002 * of protection as the incoming ones i.e we associate our outbound
4001 4003 * policy with how the packet came in. We call this after we have
4002 4004 * accepted the incoming packet which may or may not have been in
4003 4005 * clear and hence we are sending the reply back with the policy
4004 4006 * matching the incoming datagram's policy.
4005 4007 *
4006 4008 * NOTE : This technology serves two purposes :
4007 4009 *
4008 4010 * 1) If we have multiple outbound policies, we send out a reply
4009 4011 * matching with how it came in rather than matching the outbound
4010 4012 * policy.
4011 4013 *
4012 4014 * 2) For assymetric policies, we want to make sure that incoming
4013 4015 * and outgoing has the same level of protection. Assymetric
4014 4016 * policies exist only with global policy where we may not have
4015 4017 * both outbound and inbound at the same time.
4016 4018 *
4017 4019 * NOTE2: This function is called by cleartext cases, so it needs to be
4018 4020 * in IP proper.
4019 4021 *
4020 4022 * Note: the caller has moved other parts of ira into ixa already.
4021 4023 */
4022 4024 boolean_t
4023 4025 ipsec_in_to_out(ip_recv_attr_t *ira, ip_xmit_attr_t *ixa, mblk_t *data_mp,
4024 4026 ipha_t *ipha, ip6_t *ip6h)
4025 4027 {
4026 4028 ipsec_selector_t sel;
4027 4029 ipsec_action_t *reflect_action = NULL;
4028 4030 netstack_t *ns = ixa->ixa_ipst->ips_netstack;
4029 4031
4030 4032 bzero((void*)&sel, sizeof (sel));
4031 4033
4032 4034 if (ira->ira_ipsec_action != NULL) {
4033 4035 /* transfer reference.. */
4034 4036 reflect_action = ira->ira_ipsec_action;
4035 4037 ira->ira_ipsec_action = NULL;
4036 4038 } else if (!(ira->ira_flags & IRAF_LOOPBACK))
4037 4039 reflect_action = ipsec_in_to_out_action(ira);
4038 4040
4039 4041 /*
4040 4042 * The caller is going to send the datagram out which might
4041 4043 * go on the wire or delivered locally through ire_send_local.
4042 4044 *
4043 4045 * 1) If it goes out on the wire, new associations will be
4044 4046 * obtained.
4045 4047 * 2) If it is delivered locally, ire_send_local will convert
4046 4048 * this ip_xmit_attr_t back to a ip_recv_attr_t looking at the
4047 4049 * requests.
4048 4050 */
4049 4051 ixa->ixa_ipsec_action = reflect_action;
4050 4052
4051 4053 if (!ipsec_init_outbound_ports(&sel, data_mp, ipha, ip6h, 0,
4052 4054 ns->netstack_ipsec)) {
4053 4055 /* Note: data_mp already consumed and ip_drop_packet done */
4054 4056 return (B_FALSE);
4055 4057 }
4056 4058 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4057 4059 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4058 4060 ixa->ixa_ipsec_proto = sel.ips_protocol;
4059 4061 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4060 4062 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4061 4063
4062 4064 /*
4063 4065 * Don't use global policy for this, as we want
4064 4066 * to use the same protection that was applied to the inbound packet.
4065 4067 * Thus we set IXAF_NO_IPSEC is it arrived in the clear to make
4066 4068 * it be sent in the clear.
4067 4069 */
4068 4070 if (ira->ira_flags & IRAF_IPSEC_SECURE)
4069 4071 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4070 4072 else
4071 4073 ixa->ixa_flags |= IXAF_NO_IPSEC;
4072 4074
4073 4075 return (B_TRUE);
4074 4076 }
4075 4077
4076 4078 void
4077 4079 ipsec_out_release_refs(ip_xmit_attr_t *ixa)
4078 4080 {
4079 4081 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4080 4082 return;
4081 4083
4082 4084 if (ixa->ixa_ipsec_ah_sa != NULL) {
4083 4085 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
4084 4086 ixa->ixa_ipsec_ah_sa = NULL;
4085 4087 }
4086 4088 if (ixa->ixa_ipsec_esp_sa != NULL) {
4087 4089 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
4088 4090 ixa->ixa_ipsec_esp_sa = NULL;
4089 4091 }
4090 4092 if (ixa->ixa_ipsec_policy != NULL) {
4091 4093 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4092 4094 ixa->ixa_ipsec_policy = NULL;
4093 4095 }
4094 4096 if (ixa->ixa_ipsec_action != NULL) {
4095 4097 IPACT_REFRELE(ixa->ixa_ipsec_action);
4096 4098 ixa->ixa_ipsec_action = NULL;
4097 4099 }
4098 4100 if (ixa->ixa_ipsec_latch) {
4099 4101 IPLATCH_REFRELE(ixa->ixa_ipsec_latch);
4100 4102 ixa->ixa_ipsec_latch = NULL;
4101 4103 }
4102 4104 /* Clear the soft references to the SAs */
4103 4105 ixa->ixa_ipsec_ref[0].ipsr_sa = NULL;
4104 4106 ixa->ixa_ipsec_ref[0].ipsr_bucket = NULL;
4105 4107 ixa->ixa_ipsec_ref[0].ipsr_gen = 0;
4106 4108 ixa->ixa_ipsec_ref[1].ipsr_sa = NULL;
4107 4109 ixa->ixa_ipsec_ref[1].ipsr_bucket = NULL;
4108 4110 ixa->ixa_ipsec_ref[1].ipsr_gen = 0;
4109 4111 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4110 4112 }
4111 4113
4112 4114 void
4113 4115 ipsec_in_release_refs(ip_recv_attr_t *ira)
4114 4116 {
4115 4117 if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
4116 4118 return;
4117 4119
4118 4120 if (ira->ira_ipsec_ah_sa != NULL) {
4119 4121 IPSA_REFRELE(ira->ira_ipsec_ah_sa);
4120 4122 ira->ira_ipsec_ah_sa = NULL;
4121 4123 }
4122 4124 if (ira->ira_ipsec_esp_sa != NULL) {
4123 4125 IPSA_REFRELE(ira->ira_ipsec_esp_sa);
4124 4126 ira->ira_ipsec_esp_sa = NULL;
4125 4127 }
4126 4128 ira->ira_flags &= ~IRAF_IPSEC_SECURE;
4127 4129 }
4128 4130
4129 4131 /*
4130 4132 * This is called from ire_send_local when a packet
4131 4133 * is looped back. We setup the ip_recv_attr_t "borrowing" the references
4132 4134 * held by the callers.
4133 4135 * Note that we don't do any IPsec but we carry the actions and IPSEC flags
4134 4136 * across so that the fanout policy checks see that IPsec was applied.
4135 4137 *
4136 4138 * The caller should do ipsec_in_release_refs() on the ira by calling
4137 4139 * ira_cleanup().
4138 4140 */
4139 4141 void
4140 4142 ipsec_out_to_in(ip_xmit_attr_t *ixa, ill_t *ill, ip_recv_attr_t *ira)
4141 4143 {
4142 4144 ipsec_policy_t *pol;
4143 4145 ipsec_action_t *act;
4144 4146
4145 4147 /* Non-IPsec operations */
4146 4148 ira->ira_free_flags = 0;
4147 4149 ira->ira_zoneid = ixa->ixa_zoneid;
4148 4150 ira->ira_cred = ixa->ixa_cred;
4149 4151 ira->ira_cpid = ixa->ixa_cpid;
4150 4152 ira->ira_tsl = ixa->ixa_tsl;
4151 4153 ira->ira_ill = ira->ira_rill = ill;
4152 4154 ira->ira_flags = ixa->ixa_flags & IAF_MASK;
4153 4155 ira->ira_no_loop_zoneid = ixa->ixa_no_loop_zoneid;
4154 4156 ira->ira_pktlen = ixa->ixa_pktlen;
4155 4157 ira->ira_ip_hdr_length = ixa->ixa_ip_hdr_length;
4156 4158 ira->ira_protocol = ixa->ixa_protocol;
4157 4159 ira->ira_mhip = NULL;
4158 4160
4159 4161 ira->ira_flags |= IRAF_LOOPBACK | IRAF_L2SRC_LOOPBACK;
4160 4162
4161 4163 ira->ira_sqp = ixa->ixa_sqp;
4162 4164 ira->ira_ring = NULL;
4163 4165
4164 4166 ira->ira_ruifindex = ill->ill_phyint->phyint_ifindex;
4165 4167 ira->ira_rifindex = ira->ira_ruifindex;
4166 4168
4167 4169 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4168 4170 return;
4169 4171
4170 4172 ira->ira_flags |= IRAF_IPSEC_SECURE;
4171 4173
4172 4174 ira->ira_ipsec_ah_sa = NULL;
4173 4175 ira->ira_ipsec_esp_sa = NULL;
4174 4176
4175 4177 act = ixa->ixa_ipsec_action;
4176 4178 if (act == NULL) {
4177 4179 pol = ixa->ixa_ipsec_policy;
4178 4180 if (pol != NULL) {
4179 4181 act = pol->ipsp_act;
4180 4182 IPACT_REFHOLD(act);
4181 4183 }
4182 4184 }
4183 4185 ixa->ixa_ipsec_action = NULL;
4184 4186 ira->ira_ipsec_action = act;
4185 4187 }
4186 4188
4187 4189 /*
4188 4190 * Consults global policy and per-socket policy to see whether this datagram
4189 4191 * should go out secure. If so it updates the ip_xmit_attr_t
4190 4192 * Should not be used when connecting, since then we want to latch the policy.
4191 4193 *
4192 4194 * If connp is NULL we just look at the global policy.
4193 4195 *
4194 4196 * Returns NULL if the packet was dropped, in which case the MIB has
4195 4197 * been incremented and ip_drop_packet done.
4196 4198 */
4197 4199 mblk_t *
4198 4200 ip_output_attach_policy(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4199 4201 const conn_t *connp, ip_xmit_attr_t *ixa)
4200 4202 {
4201 4203 ipsec_selector_t sel;
4202 4204 boolean_t policy_present;
4203 4205 ip_stack_t *ipst = ixa->ixa_ipst;
4204 4206 netstack_t *ns = ipst->ips_netstack;
4205 4207 ipsec_stack_t *ipss = ns->netstack_ipsec;
4206 4208 ipsec_policy_t *p;
4207 4209
4208 4210 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4209 4211 ASSERT((ipha != NULL && ip6h == NULL) ||
4210 4212 (ip6h != NULL && ipha == NULL));
4211 4213
4212 4214 if (ipha != NULL)
4213 4215 policy_present = ipss->ipsec_outbound_v4_policy_present;
4214 4216 else
4215 4217 policy_present = ipss->ipsec_outbound_v6_policy_present;
4216 4218
4217 4219 if (!policy_present && (connp == NULL || connp->conn_policy == NULL))
4218 4220 return (mp);
4219 4221
4220 4222 bzero((void*)&sel, sizeof (sel));
4221 4223
4222 4224 if (ipha != NULL) {
4223 4225 sel.ips_local_addr_v4 = ipha->ipha_src;
4224 4226 sel.ips_remote_addr_v4 = ip_get_dst(ipha);
4225 4227 sel.ips_isv4 = B_TRUE;
4226 4228 } else {
4227 4229 sel.ips_isv4 = B_FALSE;
4228 4230 sel.ips_local_addr_v6 = ip6h->ip6_src;
4229 4231 sel.ips_remote_addr_v6 = ip_get_dst_v6(ip6h, mp, NULL);
4230 4232 }
4231 4233 sel.ips_protocol = ixa->ixa_protocol;
4232 4234
4233 4235 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h, 0, ipss)) {
4234 4236 if (ipha != NULL) {
4235 4237 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
4236 4238 } else {
4237 4239 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
4238 4240 }
4239 4241 /* Note: mp already consumed and ip_drop_packet done */
4240 4242 return (NULL);
4241 4243 }
4242 4244
4243 4245 ASSERT(ixa->ixa_ipsec_policy == NULL);
4244 4246 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4245 4247 ixa->ixa_ipsec_policy = p;
4246 4248 if (p != NULL) {
4247 4249 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4248 4250 if (connp == NULL || connp->conn_policy == NULL)
4249 4251 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4250 4252 } else {
4251 4253 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4252 4254 }
4253 4255
4254 4256 /*
4255 4257 * Copy the right port information.
4256 4258 */
4257 4259 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4258 4260 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4259 4261 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4260 4262 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4261 4263 ixa->ixa_ipsec_proto = sel.ips_protocol;
4262 4264 return (mp);
4263 4265 }
4264 4266
4265 4267 /*
4266 4268 * When appropriate, this function caches inbound and outbound policy
4267 4269 * for this connection. The outbound policy is stored in conn_ixa.
4268 4270 * Note that it can not be used for SCTP since conn_faddr isn't set for SCTP.
4269 4271 *
4270 4272 * XXX need to work out more details about per-interface policy and
4271 4273 * caching here!
4272 4274 *
4273 4275 * XXX may want to split inbound and outbound caching for ill..
4274 4276 */
4275 4277 int
4276 4278 ipsec_conn_cache_policy(conn_t *connp, boolean_t isv4)
4277 4279 {
4278 4280 boolean_t global_policy_present;
4279 4281 netstack_t *ns = connp->conn_netstack;
4280 4282 ipsec_stack_t *ipss = ns->netstack_ipsec;
4281 4283
4282 4284 connp->conn_ixa->ixa_ipsec_policy_gen =
4283 4285 ipss->ipsec_system_policy.iph_gen;
4284 4286 /*
4285 4287 * There is no policy latching for ICMP sockets because we can't
4286 4288 * decide on which policy to use until we see the packet and get
4287 4289 * type/code selectors.
4288 4290 */
4289 4291 if (connp->conn_proto == IPPROTO_ICMP ||
4290 4292 connp->conn_proto == IPPROTO_ICMPV6) {
4291 4293 connp->conn_in_enforce_policy =
4292 4294 connp->conn_out_enforce_policy = B_TRUE;
4293 4295 if (connp->conn_latch != NULL) {
4294 4296 IPLATCH_REFRELE(connp->conn_latch);
4295 4297 connp->conn_latch = NULL;
4296 4298 }
4297 4299 if (connp->conn_latch_in_policy != NULL) {
4298 4300 IPPOL_REFRELE(connp->conn_latch_in_policy);
4299 4301 connp->conn_latch_in_policy = NULL;
4300 4302 }
4301 4303 if (connp->conn_latch_in_action != NULL) {
4302 4304 IPACT_REFRELE(connp->conn_latch_in_action);
4303 4305 connp->conn_latch_in_action = NULL;
4304 4306 }
4305 4307 if (connp->conn_ixa->ixa_ipsec_policy != NULL) {
4306 4308 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4307 4309 connp->conn_ixa->ixa_ipsec_policy = NULL;
4308 4310 }
4309 4311 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4310 4312 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4311 4313 connp->conn_ixa->ixa_ipsec_action = NULL;
4312 4314 }
4313 4315 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4314 4316 return (0);
4315 4317 }
4316 4318
4317 4319 global_policy_present = isv4 ?
4318 4320 (ipss->ipsec_outbound_v4_policy_present ||
4319 4321 ipss->ipsec_inbound_v4_policy_present) :
4320 4322 (ipss->ipsec_outbound_v6_policy_present ||
4321 4323 ipss->ipsec_inbound_v6_policy_present);
4322 4324
4323 4325 if ((connp->conn_policy != NULL) || global_policy_present) {
4324 4326 ipsec_selector_t sel;
4325 4327 ipsec_policy_t *p;
4326 4328
4327 4329 if (connp->conn_latch == NULL &&
4328 4330 (connp->conn_latch = iplatch_create()) == NULL) {
4329 4331 return (ENOMEM);
4330 4332 }
4331 4333
4332 4334 bzero((void*)&sel, sizeof (sel));
4333 4335
4334 4336 sel.ips_protocol = connp->conn_proto;
4335 4337 sel.ips_local_port = connp->conn_lport;
4336 4338 sel.ips_remote_port = connp->conn_fport;
4337 4339 sel.ips_is_icmp_inv_acq = 0;
4338 4340 sel.ips_isv4 = isv4;
4339 4341 if (isv4) {
4340 4342 sel.ips_local_addr_v4 = connp->conn_laddr_v4;
4341 4343 sel.ips_remote_addr_v4 = connp->conn_faddr_v4;
4342 4344 } else {
4343 4345 sel.ips_local_addr_v6 = connp->conn_laddr_v6;
4344 4346 sel.ips_remote_addr_v6 = connp->conn_faddr_v6;
4345 4347 }
4346 4348
4347 4349 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
4348 4350 if (connp->conn_latch_in_policy != NULL)
4349 4351 IPPOL_REFRELE(connp->conn_latch_in_policy);
4350 4352 connp->conn_latch_in_policy = p;
4351 4353 connp->conn_in_enforce_policy = (p != NULL);
4352 4354
4353 4355 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4354 4356 if (connp->conn_ixa->ixa_ipsec_policy != NULL)
4355 4357 IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4356 4358 connp->conn_ixa->ixa_ipsec_policy = p;
4357 4359 connp->conn_out_enforce_policy = (p != NULL);
4358 4360 if (p != NULL) {
4359 4361 connp->conn_ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4360 4362 if (connp->conn_policy == NULL) {
4361 4363 connp->conn_ixa->ixa_flags |=
4362 4364 IXAF_IPSEC_GLOBAL_POLICY;
4363 4365 }
4364 4366 } else {
4365 4367 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4366 4368 }
4367 4369 /* Clear the latched actions too, in case we're recaching. */
4368 4370 if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4369 4371 IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4370 4372 connp->conn_ixa->ixa_ipsec_action = NULL;
4371 4373 }
4372 4374 if (connp->conn_latch_in_action != NULL) {
4373 4375 IPACT_REFRELE(connp->conn_latch_in_action);
4374 4376 connp->conn_latch_in_action = NULL;
4375 4377 }
4376 4378 connp->conn_ixa->ixa_ipsec_src_port = sel.ips_local_port;
4377 4379 connp->conn_ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4378 4380 connp->conn_ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4379 4381 connp->conn_ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4380 4382 connp->conn_ixa->ixa_ipsec_proto = sel.ips_protocol;
4381 4383 } else {
4382 4384 connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4383 4385 }
4384 4386
4385 4387 /*
4386 4388 * We may or may not have policy for this endpoint. We still set
4387 4389 * conn_policy_cached so that inbound datagrams don't have to look
4388 4390 * at global policy as policy is considered latched for these
4389 4391 * endpoints. We should not set conn_policy_cached until the conn
4390 4392 * reflects the actual policy. If we *set* this before inheriting
4391 4393 * the policy there is a window where the check
4392 4394 * CONN_INBOUND_POLICY_PRESENT, will neither check with the policy
4393 4395 * on the conn (because we have not yet copied the policy on to
4394 4396 * conn and hence not set conn_in_enforce_policy) nor with the
4395 4397 * global policy (because conn_policy_cached is already set).
4396 4398 */
4397 4399 connp->conn_policy_cached = B_TRUE;
4398 4400 return (0);
4399 4401 }
4400 4402
4401 4403 /*
4402 4404 * When appropriate, this function caches outbound policy for faddr/fport.
4403 4405 * It is used when we are not connected i.e., when we can not latch the
4404 4406 * policy.
4405 4407 */
4406 4408 void
4407 4409 ipsec_cache_outbound_policy(const conn_t *connp, const in6_addr_t *v6src,
4408 4410 const in6_addr_t *v6dst, in_port_t dstport, ip_xmit_attr_t *ixa)
4409 4411 {
4410 4412 boolean_t isv4 = (ixa->ixa_flags & IXAF_IS_IPV4) != 0;
4411 4413 boolean_t global_policy_present;
4412 4414 netstack_t *ns = connp->conn_netstack;
4413 4415 ipsec_stack_t *ipss = ns->netstack_ipsec;
4414 4416
4415 4417 ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4416 4418
4417 4419 /*
4418 4420 * There is no policy caching for ICMP sockets because we can't
4419 4421 * decide on which policy to use until we see the packet and get
4420 4422 * type/code selectors.
4421 4423 */
4422 4424 if (connp->conn_proto == IPPROTO_ICMP ||
4423 4425 connp->conn_proto == IPPROTO_ICMPV6) {
4424 4426 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4425 4427 if (ixa->ixa_ipsec_policy != NULL) {
4426 4428 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4427 4429 ixa->ixa_ipsec_policy = NULL;
4428 4430 }
4429 4431 if (ixa->ixa_ipsec_action != NULL) {
4430 4432 IPACT_REFRELE(ixa->ixa_ipsec_action);
4431 4433 ixa->ixa_ipsec_action = NULL;
4432 4434 }
4433 4435 return;
4434 4436 }
4435 4437
4436 4438 global_policy_present = isv4 ?
4437 4439 (ipss->ipsec_outbound_v4_policy_present ||
4438 4440 ipss->ipsec_inbound_v4_policy_present) :
4439 4441 (ipss->ipsec_outbound_v6_policy_present ||
4440 4442 ipss->ipsec_inbound_v6_policy_present);
4441 4443
4442 4444 if ((connp->conn_policy != NULL) || global_policy_present) {
4443 4445 ipsec_selector_t sel;
4444 4446 ipsec_policy_t *p;
4445 4447
4446 4448 bzero((void*)&sel, sizeof (sel));
4447 4449
4448 4450 sel.ips_protocol = connp->conn_proto;
4449 4451 sel.ips_local_port = connp->conn_lport;
4450 4452 sel.ips_remote_port = dstport;
4451 4453 sel.ips_is_icmp_inv_acq = 0;
4452 4454 sel.ips_isv4 = isv4;
4453 4455 if (isv4) {
4454 4456 IN6_V4MAPPED_TO_IPADDR(v6src, sel.ips_local_addr_v4);
4455 4457 IN6_V4MAPPED_TO_IPADDR(v6dst, sel.ips_remote_addr_v4);
4456 4458 } else {
4457 4459 sel.ips_local_addr_v6 = *v6src;
4458 4460 sel.ips_remote_addr_v6 = *v6dst;
4459 4461 }
4460 4462
4461 4463 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4462 4464 if (ixa->ixa_ipsec_policy != NULL)
4463 4465 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4464 4466 ixa->ixa_ipsec_policy = p;
4465 4467 if (p != NULL) {
4466 4468 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4467 4469 if (connp->conn_policy == NULL)
4468 4470 ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4469 4471 } else {
4470 4472 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4471 4473 }
4472 4474 /* Clear the latched actions too, in case we're recaching. */
4473 4475 if (ixa->ixa_ipsec_action != NULL) {
4474 4476 IPACT_REFRELE(ixa->ixa_ipsec_action);
4475 4477 ixa->ixa_ipsec_action = NULL;
4476 4478 }
4477 4479
4478 4480 ixa->ixa_ipsec_src_port = sel.ips_local_port;
4479 4481 ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4480 4482 ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4481 4483 ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4482 4484 ixa->ixa_ipsec_proto = sel.ips_protocol;
4483 4485 } else {
4484 4486 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4485 4487 if (ixa->ixa_ipsec_policy != NULL) {
4486 4488 IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4487 4489 ixa->ixa_ipsec_policy = NULL;
4488 4490 }
4489 4491 if (ixa->ixa_ipsec_action != NULL) {
4490 4492 IPACT_REFRELE(ixa->ixa_ipsec_action);
4491 4493 ixa->ixa_ipsec_action = NULL;
4492 4494 }
4493 4495 }
4494 4496 }
4495 4497
4496 4498 /*
4497 4499 * Returns B_FALSE if the policy has gone stale.
4498 4500 */
4499 4501 boolean_t
4500 4502 ipsec_outbound_policy_current(ip_xmit_attr_t *ixa)
4501 4503 {
4502 4504 ipsec_stack_t *ipss = ixa->ixa_ipst->ips_netstack->netstack_ipsec;
4503 4505
4504 4506 if (!(ixa->ixa_flags & IXAF_IPSEC_GLOBAL_POLICY))
4505 4507 return (B_TRUE);
4506 4508
4507 4509 return (ixa->ixa_ipsec_policy_gen == ipss->ipsec_system_policy.iph_gen);
4508 4510 }
4509 4511
4510 4512 void
4511 4513 iplatch_free(ipsec_latch_t *ipl)
4512 4514 {
4513 4515 if (ipl->ipl_local_cid != NULL)
4514 4516 IPSID_REFRELE(ipl->ipl_local_cid);
4515 4517 if (ipl->ipl_remote_cid != NULL)
4516 4518 IPSID_REFRELE(ipl->ipl_remote_cid);
4517 4519 mutex_destroy(&ipl->ipl_lock);
4518 4520 kmem_free(ipl, sizeof (*ipl));
4519 4521 }
4520 4522
4521 4523 ipsec_latch_t *
4522 4524 iplatch_create()
4523 4525 {
4524 4526 ipsec_latch_t *ipl = kmem_zalloc(sizeof (*ipl), KM_NOSLEEP);
4525 4527 if (ipl == NULL)
4526 4528 return (ipl);
4527 4529 mutex_init(&ipl->ipl_lock, NULL, MUTEX_DEFAULT, NULL);
4528 4530 ipl->ipl_refcnt = 1;
4529 4531 return (ipl);
4530 4532 }
4531 4533
4532 4534 /*
4533 4535 * Hash function for ID hash table.
4534 4536 */
4535 4537 static uint32_t
4536 4538 ipsid_hash(int idtype, char *idstring)
4537 4539 {
4538 4540 uint32_t hval = idtype;
4539 4541 unsigned char c;
4540 4542
4541 4543 while ((c = *idstring++) != 0) {
4542 4544 hval = (hval << 4) | (hval >> 28);
4543 4545 hval ^= c;
4544 4546 }
4545 4547 hval = hval ^ (hval >> 16);
4546 4548 return (hval & (IPSID_HASHSIZE-1));
4547 4549 }
4548 4550
4549 4551 /*
4550 4552 * Look up identity string in hash table. Return identity object
4551 4553 * corresponding to the name -- either preexisting, or newly allocated.
4552 4554 *
4553 4555 * Return NULL if we need to allocate a new one and can't get memory.
4554 4556 */
4555 4557 ipsid_t *
4556 4558 ipsid_lookup(int idtype, char *idstring, netstack_t *ns)
4557 4559 {
4558 4560 ipsid_t *retval;
4559 4561 char *nstr;
4560 4562 int idlen = strlen(idstring) + 1;
4561 4563 ipsec_stack_t *ipss = ns->netstack_ipsec;
4562 4564 ipsif_t *bucket;
4563 4565
4564 4566 bucket = &ipss->ipsec_ipsid_buckets[ipsid_hash(idtype, idstring)];
4565 4567
4566 4568 mutex_enter(&bucket->ipsif_lock);
4567 4569
4568 4570 for (retval = bucket->ipsif_head; retval != NULL;
4569 4571 retval = retval->ipsid_next) {
4570 4572 if (idtype != retval->ipsid_type)
4571 4573 continue;
4572 4574 if (bcmp(idstring, retval->ipsid_cid, idlen) != 0)
4573 4575 continue;
4574 4576
4575 4577 IPSID_REFHOLD(retval);
4576 4578 mutex_exit(&bucket->ipsif_lock);
4577 4579 return (retval);
4578 4580 }
4579 4581
4580 4582 retval = kmem_alloc(sizeof (*retval), KM_NOSLEEP);
4581 4583 if (!retval) {
4582 4584 mutex_exit(&bucket->ipsif_lock);
4583 4585 return (NULL);
4584 4586 }
4585 4587
4586 4588 nstr = kmem_alloc(idlen, KM_NOSLEEP);
4587 4589 if (!nstr) {
4588 4590 mutex_exit(&bucket->ipsif_lock);
4589 4591 kmem_free(retval, sizeof (*retval));
4590 4592 return (NULL);
4591 4593 }
4592 4594
4593 4595 retval->ipsid_refcnt = 1;
4594 4596 retval->ipsid_next = bucket->ipsif_head;
4595 4597 if (retval->ipsid_next != NULL)
4596 4598 retval->ipsid_next->ipsid_ptpn = &retval->ipsid_next;
4597 4599 retval->ipsid_ptpn = &bucket->ipsif_head;
4598 4600 retval->ipsid_type = idtype;
4599 4601 retval->ipsid_cid = nstr;
4600 4602 bucket->ipsif_head = retval;
4601 4603 bcopy(idstring, nstr, idlen);
4602 4604 mutex_exit(&bucket->ipsif_lock);
4603 4605
4604 4606 return (retval);
4605 4607 }
4606 4608
4607 4609 /*
4608 4610 * Garbage collect the identity hash table.
4609 4611 */
4610 4612 void
4611 4613 ipsid_gc(netstack_t *ns)
4612 4614 {
4613 4615 int i, len;
4614 4616 ipsid_t *id, *nid;
4615 4617 ipsif_t *bucket;
4616 4618 ipsec_stack_t *ipss = ns->netstack_ipsec;
4617 4619
4618 4620 for (i = 0; i < IPSID_HASHSIZE; i++) {
4619 4621 bucket = &ipss->ipsec_ipsid_buckets[i];
4620 4622 mutex_enter(&bucket->ipsif_lock);
4621 4623 for (id = bucket->ipsif_head; id != NULL; id = nid) {
4622 4624 nid = id->ipsid_next;
4623 4625 if (id->ipsid_refcnt == 0) {
4624 4626 *id->ipsid_ptpn = nid;
4625 4627 if (nid != NULL)
4626 4628 nid->ipsid_ptpn = id->ipsid_ptpn;
4627 4629 len = strlen(id->ipsid_cid) + 1;
4628 4630 kmem_free(id->ipsid_cid, len);
4629 4631 kmem_free(id, sizeof (*id));
4630 4632 }
4631 4633 }
4632 4634 mutex_exit(&bucket->ipsif_lock);
4633 4635 }
4634 4636 }
4635 4637
4636 4638 /*
4637 4639 * Return true if two identities are the same.
4638 4640 */
4639 4641 boolean_t
4640 4642 ipsid_equal(ipsid_t *id1, ipsid_t *id2)
4641 4643 {
4642 4644 if (id1 == id2)
4643 4645 return (B_TRUE);
4644 4646 #ifdef DEBUG
4645 4647 if ((id1 == NULL) || (id2 == NULL))
4646 4648 return (B_FALSE);
4647 4649 /*
4648 4650 * test that we're interning id's correctly..
4649 4651 */
4650 4652 ASSERT((strcmp(id1->ipsid_cid, id2->ipsid_cid) != 0) ||
4651 4653 (id1->ipsid_type != id2->ipsid_type));
4652 4654 #endif
4653 4655 return (B_FALSE);
4654 4656 }
4655 4657
4656 4658 /*
4657 4659 * Initialize identity table; called during module initialization.
4658 4660 */
4659 4661 static void
4660 4662 ipsid_init(netstack_t *ns)
4661 4663 {
4662 4664 ipsif_t *bucket;
4663 4665 int i;
4664 4666 ipsec_stack_t *ipss = ns->netstack_ipsec;
4665 4667
4666 4668 for (i = 0; i < IPSID_HASHSIZE; i++) {
4667 4669 bucket = &ipss->ipsec_ipsid_buckets[i];
4668 4670 mutex_init(&bucket->ipsif_lock, NULL, MUTEX_DEFAULT, NULL);
4669 4671 }
4670 4672 }
4671 4673
4672 4674 /*
4673 4675 * Free identity table (preparatory to module unload)
4674 4676 */
4675 4677 static void
4676 4678 ipsid_fini(netstack_t *ns)
4677 4679 {
4678 4680 ipsif_t *bucket;
4679 4681 int i;
|
↓ open down ↓ |
3871 lines elided |
↑ open up ↑ |
4680 4682 ipsec_stack_t *ipss = ns->netstack_ipsec;
4681 4683
4682 4684 for (i = 0; i < IPSID_HASHSIZE; i++) {
4683 4685 bucket = &ipss->ipsec_ipsid_buckets[i];
4684 4686 ASSERT(bucket->ipsif_head == NULL);
4685 4687 mutex_destroy(&bucket->ipsif_lock);
4686 4688 }
4687 4689 }
4688 4690
4689 4691 /*
4690 - * Update the minimum and maximum supported key sizes for the
4691 - * specified algorithm. Must be called while holding the algorithms lock.
4692 + * Update the minimum and maximum supported key sizes for the specified
4693 + * algorithm, which is either a member of a netstack alg array or about to be,
4694 + * and therefore must be called holding ipsec_alg_lock for write.
4692 4695 */
4693 4696 void
4694 4697 ipsec_alg_fix_min_max(ipsec_alginfo_t *alg, ipsec_algtype_t alg_type,
4695 4698 netstack_t *ns)
4696 4699 {
4697 4700 size_t crypto_min = (size_t)-1, crypto_max = 0;
4698 4701 size_t cur_crypto_min, cur_crypto_max;
4699 4702 boolean_t is_valid;
4700 4703 crypto_mechanism_info_t *mech_infos;
4701 4704 uint_t nmech_infos;
4702 4705 int crypto_rc, i;
4703 4706 crypto_mech_usage_t mask;
4704 4707 ipsec_stack_t *ipss = ns->netstack_ipsec;
4705 4708
4706 - ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
4709 + ASSERT(RW_WRITE_HELD(&ipss->ipsec_alg_lock));
4707 4710
4708 4711 /*
4709 4712 * Compute the min, max, and default key sizes (in number of
4710 4713 * increments to the default key size in bits) as defined
4711 4714 * by the algorithm mappings. This range of key sizes is used
4712 4715 * for policy related operations. The effective key sizes
4713 4716 * supported by the framework could be more limited than
4714 4717 * those defined for an algorithm.
4715 4718 */
4716 4719 alg->alg_default_bits = alg->alg_key_sizes[0];
4717 4720 alg->alg_default = 0;
4718 4721 if (alg->alg_increment != 0) {
4719 4722 /* key sizes are defined by range & increment */
4720 4723 alg->alg_minbits = alg->alg_key_sizes[1];
4721 4724 alg->alg_maxbits = alg->alg_key_sizes[2];
4722 4725 } else if (alg->alg_nkey_sizes == 0) {
4723 4726 /* no specified key size for algorithm */
4724 4727 alg->alg_minbits = alg->alg_maxbits = 0;
4725 4728 } else {
4726 4729 /* key sizes are defined by enumeration */
4727 4730 alg->alg_minbits = (uint16_t)-1;
4728 4731 alg->alg_maxbits = 0;
4729 4732
4730 4733 for (i = 0; i < alg->alg_nkey_sizes; i++) {
4731 4734 if (alg->alg_key_sizes[i] < alg->alg_minbits)
4732 4735 alg->alg_minbits = alg->alg_key_sizes[i];
4733 4736 if (alg->alg_key_sizes[i] > alg->alg_maxbits)
4734 4737 alg->alg_maxbits = alg->alg_key_sizes[i];
4735 4738 }
4736 4739 }
4737 4740
4738 4741 if (!(alg->alg_flags & ALG_FLAG_VALID))
4739 4742 return;
4740 4743
4741 4744 /*
4742 4745 * Mechanisms do not apply to the NULL encryption
4743 4746 * algorithm, so simply return for this case.
4744 4747 */
4745 4748 if (alg->alg_id == SADB_EALG_NULL)
4746 4749 return;
4747 4750
4748 4751 /*
4749 4752 * Find the min and max key sizes supported by the cryptographic
4750 4753 * framework providers.
4751 4754 */
4752 4755
4753 4756 /* get the key sizes supported by the framework */
4754 4757 crypto_rc = crypto_get_all_mech_info(alg->alg_mech_type,
4755 4758 &mech_infos, &nmech_infos, KM_SLEEP);
4756 4759 if (crypto_rc != CRYPTO_SUCCESS || nmech_infos == 0) {
4757 4760 alg->alg_flags &= ~ALG_FLAG_VALID;
4758 4761 return;
4759 4762 }
4760 4763
4761 4764 /* min and max key sizes supported by framework */
4762 4765 for (i = 0, is_valid = B_FALSE; i < nmech_infos; i++) {
4763 4766 int unit_bits;
4764 4767
4765 4768 /*
4766 4769 * Ignore entries that do not support the operations
4767 4770 * needed for the algorithm type.
4768 4771 */
4769 4772 if (alg_type == IPSEC_ALG_AUTH) {
4770 4773 mask = CRYPTO_MECH_USAGE_MAC;
4771 4774 } else {
4772 4775 mask = CRYPTO_MECH_USAGE_ENCRYPT |
4773 4776 CRYPTO_MECH_USAGE_DECRYPT;
4774 4777 }
4775 4778 if ((mech_infos[i].mi_usage & mask) != mask)
4776 4779 continue;
4777 4780
4778 4781 unit_bits = (mech_infos[i].mi_keysize_unit ==
4779 4782 CRYPTO_KEYSIZE_UNIT_IN_BYTES) ? 8 : 1;
4780 4783 /* adjust min/max supported by framework */
4781 4784 cur_crypto_min = mech_infos[i].mi_min_key_size * unit_bits;
4782 4785 cur_crypto_max = mech_infos[i].mi_max_key_size * unit_bits;
4783 4786
4784 4787 if (cur_crypto_min < crypto_min)
4785 4788 crypto_min = cur_crypto_min;
4786 4789
4787 4790 /*
4788 4791 * CRYPTO_EFFECTIVELY_INFINITE is a special value of
4789 4792 * the crypto framework which means "no upper limit".
4790 4793 */
4791 4794 if (mech_infos[i].mi_max_key_size ==
4792 4795 CRYPTO_EFFECTIVELY_INFINITE) {
4793 4796 crypto_max = (size_t)-1;
4794 4797 } else if (cur_crypto_max > crypto_max) {
4795 4798 crypto_max = cur_crypto_max;
4796 4799 }
4797 4800
4798 4801 is_valid = B_TRUE;
4799 4802 }
4800 4803
4801 4804 kmem_free(mech_infos, sizeof (crypto_mechanism_info_t) *
4802 4805 nmech_infos);
4803 4806
4804 4807 if (!is_valid) {
4805 4808 /* no key sizes supported by framework */
4806 4809 alg->alg_flags &= ~ALG_FLAG_VALID;
4807 4810 return;
4808 4811 }
4809 4812
4810 4813 /*
4811 4814 * Determine min and max key sizes from alg_key_sizes[].
4812 4815 * defined for the algorithm entry. Adjust key sizes based on
4813 4816 * those supported by the framework.
4814 4817 */
4815 4818 alg->alg_ef_default_bits = alg->alg_key_sizes[0];
4816 4819
4817 4820 /*
4818 4821 * For backwards compatability, assume that the IV length
4819 4822 * is the same as the data length.
4820 4823 */
4821 4824 alg->alg_ivlen = alg->alg_datalen;
4822 4825
4823 4826 /*
4824 4827 * Copy any algorithm parameters (if provided) into dedicated
4825 4828 * elements in the ipsec_alginfo_t structure.
4826 4829 * There may be a better place to put this code.
4827 4830 */
4828 4831 for (i = 0; i < alg->alg_nparams; i++) {
4829 4832 switch (i) {
4830 4833 case 0:
4831 4834 /* Initialisation Vector length (bytes) */
4832 4835 alg->alg_ivlen = alg->alg_params[0];
4833 4836 break;
4834 4837 case 1:
4835 4838 /* Integrity Check Vector length (bytes) */
4836 4839 alg->alg_icvlen = alg->alg_params[1];
4837 4840 break;
4838 4841 case 2:
4839 4842 /* Salt length (bytes) */
4840 4843 alg->alg_saltlen = (uint8_t)alg->alg_params[2];
4841 4844 break;
4842 4845 default:
4843 4846 break;
4844 4847 }
4845 4848 }
4846 4849
4847 4850 /* Default if the IV length is not specified. */
4848 4851 if (alg_type == IPSEC_ALG_ENCR && alg->alg_ivlen == 0)
4849 4852 alg->alg_ivlen = alg->alg_datalen;
4850 4853
4851 4854 alg_flag_check(alg);
4852 4855
4853 4856 if (alg->alg_increment != 0) {
4854 4857 /* supported key sizes are defined by range & increment */
4855 4858 crypto_min = ALGBITS_ROUND_UP(crypto_min, alg->alg_increment);
4856 4859 crypto_max = ALGBITS_ROUND_DOWN(crypto_max, alg->alg_increment);
4857 4860
4858 4861 alg->alg_ef_minbits = MAX(alg->alg_minbits,
4859 4862 (uint16_t)crypto_min);
4860 4863 alg->alg_ef_maxbits = MIN(alg->alg_maxbits,
4861 4864 (uint16_t)crypto_max);
4862 4865
4863 4866 /*
4864 4867 * If the sizes supported by the framework are outside
4865 4868 * the range of sizes defined by the algorithm mappings,
4866 4869 * the algorithm cannot be used. Check for this
4867 4870 * condition here.
4868 4871 */
4869 4872 if (alg->alg_ef_minbits > alg->alg_ef_maxbits) {
4870 4873 alg->alg_flags &= ~ALG_FLAG_VALID;
4871 4874 return;
4872 4875 }
4873 4876 if (alg->alg_ef_default_bits < alg->alg_ef_minbits)
4874 4877 alg->alg_ef_default_bits = alg->alg_ef_minbits;
4875 4878 if (alg->alg_ef_default_bits > alg->alg_ef_maxbits)
4876 4879 alg->alg_ef_default_bits = alg->alg_ef_maxbits;
4877 4880 } else if (alg->alg_nkey_sizes == 0) {
4878 4881 /* no specified key size for algorithm */
4879 4882 alg->alg_ef_minbits = alg->alg_ef_maxbits = 0;
4880 4883 } else {
4881 4884 /* supported key sizes are defined by enumeration */
4882 4885 alg->alg_ef_minbits = (uint16_t)-1;
4883 4886 alg->alg_ef_maxbits = 0;
4884 4887
4885 4888 for (i = 0, is_valid = B_FALSE; i < alg->alg_nkey_sizes; i++) {
4886 4889 /*
4887 4890 * Ignore the current key size if it is not in the
4888 4891 * range of sizes supported by the framework.
4889 4892 */
4890 4893 if (alg->alg_key_sizes[i] < crypto_min ||
4891 4894 alg->alg_key_sizes[i] > crypto_max)
4892 4895 continue;
4893 4896 if (alg->alg_key_sizes[i] < alg->alg_ef_minbits)
4894 4897 alg->alg_ef_minbits = alg->alg_key_sizes[i];
4895 4898 if (alg->alg_key_sizes[i] > alg->alg_ef_maxbits)
4896 4899 alg->alg_ef_maxbits = alg->alg_key_sizes[i];
4897 4900 is_valid = B_TRUE;
4898 4901 }
4899 4902
4900 4903 if (!is_valid) {
4901 4904 alg->alg_flags &= ~ALG_FLAG_VALID;
4902 4905 return;
4903 4906 }
4904 4907 alg->alg_ef_default = 0;
4905 4908 }
4906 4909 }
4907 4910
4908 4911 /*
4909 4912 * Sanity check parameters provided by ipsecalgs(1m). Assume that
4910 4913 * the algoritm is marked as valid, there is a check at the top
4911 4914 * of this function. If any of the checks below fail, the algorithm
4912 4915 * entry is invalid.
4913 4916 */
4914 4917 void
4915 4918 alg_flag_check(ipsec_alginfo_t *alg)
4916 4919 {
4917 4920 alg->alg_flags &= ~ALG_FLAG_VALID;
4918 4921
4919 4922 /*
4920 4923 * Can't have the algorithm marked as CCM and GCM.
4921 4924 * Check the ALG_FLAG_COMBINED and ALG_FLAG_COUNTERMODE
4922 4925 * flags are set for CCM & GCM.
4923 4926 */
4924 4927 if ((alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) ==
4925 4928 (ALG_FLAG_CCM|ALG_FLAG_GCM))
4926 4929 return;
4927 4930 if (alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) {
4928 4931 if (!(alg->alg_flags & ALG_FLAG_COUNTERMODE))
4929 4932 return;
4930 4933 if (!(alg->alg_flags & ALG_FLAG_COMBINED))
4931 4934 return;
4932 4935 }
4933 4936
4934 4937 /*
4935 4938 * For ALG_FLAG_COUNTERMODE, check the parameters
4936 4939 * fit in the ipsec_nonce_t structure.
4937 4940 */
4938 4941 if (alg->alg_flags & ALG_FLAG_COUNTERMODE) {
4939 4942 if (alg->alg_ivlen != sizeof (((ipsec_nonce_t *)NULL)->iv))
4940 4943 return;
4941 4944 if (alg->alg_saltlen > sizeof (((ipsec_nonce_t *)NULL)->salt))
4942 4945 return;
4943 4946 }
4944 4947 if ((alg->alg_flags & ALG_FLAG_COMBINED) &&
4945 4948 (alg->alg_icvlen == 0))
4946 4949 return;
4947 4950
4948 4951 /* all is well. */
4949 4952 alg->alg_flags |= ALG_FLAG_VALID;
4950 4953 }
4951 4954
4952 4955 /*
4953 4956 * Free the memory used by the specified algorithm.
4954 4957 */
4955 4958 void
4956 4959 ipsec_alg_free(ipsec_alginfo_t *alg)
4957 4960 {
4958 4961 if (alg == NULL)
4959 4962 return;
4960 4963
4961 4964 if (alg->alg_key_sizes != NULL) {
4962 4965 kmem_free(alg->alg_key_sizes,
4963 4966 (alg->alg_nkey_sizes + 1) * sizeof (uint16_t));
4964 4967 alg->alg_key_sizes = NULL;
4965 4968 }
4966 4969 if (alg->alg_block_sizes != NULL) {
4967 4970 kmem_free(alg->alg_block_sizes,
4968 4971 (alg->alg_nblock_sizes + 1) * sizeof (uint16_t));
4969 4972 alg->alg_block_sizes = NULL;
4970 4973 }
4971 4974 if (alg->alg_params != NULL) {
4972 4975 kmem_free(alg->alg_params,
4973 4976 (alg->alg_nparams + 1) * sizeof (uint16_t));
4974 4977 alg->alg_params = NULL;
4975 4978 }
4976 4979 kmem_free(alg, sizeof (*alg));
4977 4980 }
4978 4981
4979 4982 /*
4980 4983 * Check the validity of the specified key size for an algorithm.
4981 4984 * Returns B_TRUE if key size is valid, B_FALSE otherwise.
4982 4985 */
4983 4986 boolean_t
4984 4987 ipsec_valid_key_size(uint16_t key_size, ipsec_alginfo_t *alg)
4985 4988 {
4986 4989 if (key_size < alg->alg_ef_minbits || key_size > alg->alg_ef_maxbits)
4987 4990 return (B_FALSE);
4988 4991
4989 4992 if (alg->alg_increment == 0 && alg->alg_nkey_sizes != 0) {
4990 4993 /*
4991 4994 * If the key sizes are defined by enumeration, the new
4992 4995 * key size must be equal to one of the supported values.
4993 4996 */
4994 4997 int i;
4995 4998
4996 4999 for (i = 0; i < alg->alg_nkey_sizes; i++)
4997 5000 if (key_size == alg->alg_key_sizes[i])
4998 5001 break;
4999 5002 if (i == alg->alg_nkey_sizes)
5000 5003 return (B_FALSE);
5001 5004 }
5002 5005
5003 5006 return (B_TRUE);
5004 5007 }
5005 5008
5006 5009 /*
5007 5010 * Callback function invoked by the crypto framework when a provider
5008 5011 * registers or unregisters. This callback updates the algorithms
5009 5012 * tables when a crypto algorithm is no longer available or becomes
5010 5013 * available, and triggers the freeing/creation of context templates
5011 5014 * associated with existing SAs, if needed.
5012 5015 *
5013 5016 * Need to walk all stack instances since the callback is global
5014 5017 * for all instances
5015 5018 */
5016 5019 void
5017 5020 ipsec_prov_update_callback(uint32_t event, void *event_arg)
5018 5021 {
5019 5022 netstack_handle_t nh;
5020 5023 netstack_t *ns;
5021 5024
5022 5025 netstack_next_init(&nh);
5023 5026 while ((ns = netstack_next(&nh)) != NULL) {
5024 5027 ipsec_prov_update_callback_stack(event, event_arg, ns);
5025 5028 netstack_rele(ns);
5026 5029 }
5027 5030 netstack_next_fini(&nh);
5028 5031 }
5029 5032
5030 5033 static void
5031 5034 ipsec_prov_update_callback_stack(uint32_t event, void *event_arg,
5032 5035 netstack_t *ns)
5033 5036 {
5034 5037 crypto_notify_event_change_t *prov_change =
5035 5038 (crypto_notify_event_change_t *)event_arg;
5036 5039 uint_t algidx, algid, algtype, mech_count, mech_idx;
5037 5040 ipsec_alginfo_t *alg;
5038 5041 ipsec_alginfo_t oalg;
5039 5042 crypto_mech_name_t *mechs;
5040 5043 boolean_t alg_changed = B_FALSE;
5041 5044 ipsec_stack_t *ipss = ns->netstack_ipsec;
5042 5045
5043 5046 /* ignore events for which we didn't register */
5044 5047 if (event != CRYPTO_EVENT_MECHS_CHANGED) {
5045 5048 ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x "
5046 5049 " received from crypto framework\n", event));
5047 5050 return;
5048 5051 }
|
↓ open down ↓ |
332 lines elided |
↑ open up ↑ |
5049 5052
5050 5053 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
5051 5054 if (mechs == NULL)
5052 5055 return;
5053 5056
5054 5057 /*
5055 5058 * Walk the list of currently defined IPsec algorithm. Update
5056 5059 * the algorithm valid flag and trigger an update of the
5057 5060 * SAs that depend on that algorithm.
5058 5061 */
5059 - mutex_enter(&ipss->ipsec_alg_lock);
5062 + rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
5060 5063 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
5061 5064 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
5062 5065 algidx++) {
5063 5066
5064 5067 algid = ipss->ipsec_sortlist[algtype][algidx];
5065 5068 alg = ipss->ipsec_alglists[algtype][algid];
5066 5069 ASSERT(alg != NULL);
5067 5070
5068 5071 /*
5069 5072 * Skip the algorithms which do not map to the
5070 5073 * crypto framework provider being added or removed.
5071 5074 */
5072 5075 if (strncmp(alg->alg_mech_name,
5073 5076 prov_change->ec_mech_name,
5074 5077 CRYPTO_MAX_MECH_NAME) != 0)
5075 5078 continue;
5076 5079
5077 5080 /*
5078 5081 * Determine if the mechanism is valid. If it
5079 5082 * is not, mark the algorithm as being invalid. If
5080 5083 * it is, mark the algorithm as being valid.
5081 5084 */
5082 5085 for (mech_idx = 0; mech_idx < mech_count; mech_idx++)
5083 5086 if (strncmp(alg->alg_mech_name,
5084 5087 mechs[mech_idx], CRYPTO_MAX_MECH_NAME) == 0)
5085 5088 break;
5086 5089 if (mech_idx == mech_count &&
5087 5090 alg->alg_flags & ALG_FLAG_VALID) {
5088 5091 alg->alg_flags &= ~ALG_FLAG_VALID;
5089 5092 alg_changed = B_TRUE;
5090 5093 } else if (mech_idx < mech_count &&
5091 5094 !(alg->alg_flags & ALG_FLAG_VALID)) {
5092 5095 alg->alg_flags |= ALG_FLAG_VALID;
5093 5096 alg_changed = B_TRUE;
5094 5097 }
5095 5098
5096 5099 /*
5097 5100 * Update the supported key sizes, regardless
5098 5101 * of whether a crypto provider was added or
5099 5102 * removed.
5100 5103 */
5101 5104 oalg = *alg;
5102 5105 ipsec_alg_fix_min_max(alg, algtype, ns);
5103 5106 if (!alg_changed &&
5104 5107 alg->alg_ef_minbits != oalg.alg_ef_minbits ||
5105 5108 alg->alg_ef_maxbits != oalg.alg_ef_maxbits ||
5106 5109 alg->alg_ef_default != oalg.alg_ef_default ||
5107 5110 alg->alg_ef_default_bits !=
5108 5111 oalg.alg_ef_default_bits)
5109 5112 alg_changed = B_TRUE;
5110 5113
5111 5114 /*
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
5112 5115 * Update the affected SAs if a software provider is
5113 5116 * being added or removed.
5114 5117 */
5115 5118 if (prov_change->ec_provider_type ==
5116 5119 CRYPTO_SW_PROVIDER)
5117 5120 sadb_alg_update(algtype, alg->alg_id,
5118 5121 prov_change->ec_change ==
5119 5122 CRYPTO_MECH_ADDED, ns);
5120 5123 }
5121 5124 }
5122 - mutex_exit(&ipss->ipsec_alg_lock);
5125 + rw_exit(&ipss->ipsec_alg_lock);
5123 5126 crypto_free_mech_list(mechs, mech_count);
5124 5127
5125 5128 if (alg_changed) {
5126 5129 /*
5127 5130 * An algorithm has changed, i.e. it became valid or
5128 5131 * invalid, or its support key sizes have changed.
5129 5132 * Notify ipsecah and ipsecesp of this change so
5130 5133 * that they can send a SADB_REGISTER to their consumers.
5131 5134 */
5132 5135 ipsecah_algs_changed(ns);
5133 5136 ipsecesp_algs_changed(ns);
5134 5137 }
5135 5138 }
5136 5139
5137 5140 /*
5138 5141 * Registers with the crypto framework to be notified of crypto
5139 5142 * providers changes. Used to update the algorithm tables and
5140 5143 * to free or create context templates if needed. Invoked after IPsec
5141 5144 * is loaded successfully.
5142 5145 *
5143 5146 * This is called separately for each IP instance, so we ensure we only
5144 5147 * register once.
5145 5148 */
5146 5149 void
5147 5150 ipsec_register_prov_update(void)
5148 5151 {
5149 5152 if (prov_update_handle != NULL)
5150 5153 return;
5151 5154
5152 5155 prov_update_handle = crypto_notify_events(
5153 5156 ipsec_prov_update_callback, CRYPTO_EVENT_MECHS_CHANGED);
5154 5157 }
5155 5158
5156 5159 /*
5157 5160 * Unregisters from the framework to be notified of crypto providers
5158 5161 * changes. Called from ipsec_policy_g_destroy().
5159 5162 */
5160 5163 static void
5161 5164 ipsec_unregister_prov_update(void)
5162 5165 {
5163 5166 if (prov_update_handle != NULL)
5164 5167 crypto_unnotify_events(prov_update_handle);
5165 5168 }
5166 5169
5167 5170 /*
5168 5171 * Tunnel-mode support routines.
5169 5172 */
5170 5173
5171 5174 /*
5172 5175 * Returns an mblk chain suitable for putnext() if policies match and IPsec
5173 5176 * SAs are available. If there's no per-tunnel policy, or a match comes back
5174 5177 * with no match, then still return the packet and have global policy take
5175 5178 * a crack at it in IP.
5176 5179 * This updates the ip_xmit_attr with the IPsec policy.
5177 5180 *
5178 5181 * Remember -> we can be forwarding packets. Keep that in mind w.r.t.
5179 5182 * inner-packet contents.
5180 5183 */
5181 5184 mblk_t *
5182 5185 ipsec_tun_outbound(mblk_t *mp, iptun_t *iptun, ipha_t *inner_ipv4,
5183 5186 ip6_t *inner_ipv6, ipha_t *outer_ipv4, ip6_t *outer_ipv6, int outer_hdr_len,
5184 5187 ip_xmit_attr_t *ixa)
5185 5188 {
5186 5189 ipsec_policy_head_t *polhead;
5187 5190 ipsec_selector_t sel;
5188 5191 mblk_t *nmp;
5189 5192 boolean_t is_fragment;
5190 5193 ipsec_policy_t *pol;
5191 5194 ipsec_tun_pol_t *itp = iptun->iptun_itp;
5192 5195 netstack_t *ns = iptun->iptun_ns;
5193 5196 ipsec_stack_t *ipss = ns->netstack_ipsec;
5194 5197
5195 5198 ASSERT(outer_ipv6 != NULL && outer_ipv4 == NULL ||
5196 5199 outer_ipv4 != NULL && outer_ipv6 == NULL);
5197 5200 /* We take care of inners in a bit. */
5198 5201
5199 5202 /* Are the IPsec fields initialized at all? */
5200 5203 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE)) {
5201 5204 ASSERT(ixa->ixa_ipsec_policy == NULL);
5202 5205 ASSERT(ixa->ixa_ipsec_latch == NULL);
5203 5206 ASSERT(ixa->ixa_ipsec_action == NULL);
5204 5207 ASSERT(ixa->ixa_ipsec_ah_sa == NULL);
5205 5208 ASSERT(ixa->ixa_ipsec_esp_sa == NULL);
5206 5209 }
5207 5210
5208 5211 ASSERT(itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE));
5209 5212 polhead = itp->itp_policy;
5210 5213
5211 5214 bzero(&sel, sizeof (sel));
5212 5215 if (inner_ipv4 != NULL) {
5213 5216 ASSERT(inner_ipv6 == NULL);
5214 5217 sel.ips_isv4 = B_TRUE;
5215 5218 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5216 5219 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5217 5220 sel.ips_protocol = (uint8_t)inner_ipv4->ipha_protocol;
5218 5221 } else {
5219 5222 ASSERT(inner_ipv6 != NULL);
5220 5223 sel.ips_isv4 = B_FALSE;
5221 5224 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5222 5225 /*
5223 5226 * We don't care about routing-header dests in the
5224 5227 * forwarding/tunnel path, so just grab ip6_dst.
5225 5228 */
5226 5229 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5227 5230 }
5228 5231
5229 5232 if (itp->itp_flags & ITPF_P_PER_PORT_SECURITY) {
5230 5233 /*
5231 5234 * Caller can prepend the outer header, which means
5232 5235 * inner_ipv[46] may be stuck in the middle. Pullup the whole
5233 5236 * mess now if need-be, for easier processing later. Don't
5234 5237 * forget to rewire the outer header too.
5235 5238 */
5236 5239 if (mp->b_cont != NULL) {
5237 5240 nmp = msgpullup(mp, -1);
5238 5241 if (nmp == NULL) {
5239 5242 ip_drop_packet(mp, B_FALSE, NULL,
5240 5243 DROPPER(ipss, ipds_spd_nomem),
5241 5244 &ipss->ipsec_spd_dropper);
5242 5245 return (NULL);
5243 5246 }
5244 5247 freemsg(mp);
5245 5248 mp = nmp;
5246 5249 if (outer_ipv4 != NULL)
5247 5250 outer_ipv4 = (ipha_t *)mp->b_rptr;
5248 5251 else
5249 5252 outer_ipv6 = (ip6_t *)mp->b_rptr;
5250 5253 if (inner_ipv4 != NULL) {
5251 5254 inner_ipv4 =
5252 5255 (ipha_t *)(mp->b_rptr + outer_hdr_len);
5253 5256 } else {
5254 5257 inner_ipv6 =
5255 5258 (ip6_t *)(mp->b_rptr + outer_hdr_len);
5256 5259 }
5257 5260 }
5258 5261 if (inner_ipv4 != NULL) {
5259 5262 is_fragment = IS_V4_FRAGMENT(
5260 5263 inner_ipv4->ipha_fragment_offset_and_flags);
5261 5264 } else {
5262 5265 sel.ips_remote_addr_v6 = ip_get_dst_v6(inner_ipv6, mp,
5263 5266 &is_fragment);
5264 5267 }
5265 5268
5266 5269 if (is_fragment) {
5267 5270 ipha_t *oiph;
5268 5271 ipha_t *iph = NULL;
5269 5272 ip6_t *ip6h = NULL;
5270 5273 int hdr_len;
5271 5274 uint16_t ip6_hdr_length;
5272 5275 uint8_t v6_proto;
5273 5276 uint8_t *v6_proto_p;
5274 5277
5275 5278 /*
5276 5279 * We have a fragment we need to track!
5277 5280 */
5278 5281 mp = ipsec_fragcache_add(&itp->itp_fragcache, NULL, mp,
5279 5282 outer_hdr_len, ipss);
5280 5283 if (mp == NULL)
5281 5284 return (NULL);
5282 5285 ASSERT(mp->b_cont == NULL);
5283 5286
5284 5287 /*
5285 5288 * If we get here, we have a full fragment chain
5286 5289 */
5287 5290
5288 5291 oiph = (ipha_t *)mp->b_rptr;
5289 5292 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
5290 5293 hdr_len = ((outer_hdr_len != 0) ?
5291 5294 IPH_HDR_LENGTH(oiph) : 0);
5292 5295 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5293 5296 } else {
5294 5297 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
5295 5298 ip6h = (ip6_t *)mp->b_rptr;
5296 5299 if (!ip_hdr_length_nexthdr_v6(mp, ip6h,
5297 5300 &ip6_hdr_length, &v6_proto_p)) {
5298 5301 ip_drop_packet_chain(mp, B_FALSE, NULL,
5299 5302 DROPPER(ipss,
5300 5303 ipds_spd_malformed_packet),
5301 5304 &ipss->ipsec_spd_dropper);
5302 5305 return (NULL);
5303 5306 }
5304 5307 hdr_len = ip6_hdr_length;
5305 5308 }
5306 5309 outer_hdr_len = hdr_len;
5307 5310
5308 5311 if (sel.ips_isv4) {
5309 5312 if (iph == NULL) {
5310 5313 /* Was v6 outer */
5311 5314 iph = (ipha_t *)(mp->b_rptr + hdr_len);
5312 5315 }
5313 5316 inner_ipv4 = iph;
5314 5317 sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5315 5318 sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5316 5319 sel.ips_protocol =
5317 5320 (uint8_t)inner_ipv4->ipha_protocol;
5318 5321 } else {
5319 5322 inner_ipv6 = (ip6_t *)(mp->b_rptr +
5320 5323 hdr_len);
5321 5324 sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5322 5325 sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5323 5326 if (!ip_hdr_length_nexthdr_v6(mp,
5324 5327 inner_ipv6, &ip6_hdr_length, &v6_proto_p)) {
5325 5328 ip_drop_packet_chain(mp, B_FALSE, NULL,
5326 5329 DROPPER(ipss,
5327 5330 ipds_spd_malformed_frag),
5328 5331 &ipss->ipsec_spd_dropper);
5329 5332 return (NULL);
5330 5333 }
5331 5334 v6_proto = *v6_proto_p;
5332 5335 sel.ips_protocol = v6_proto;
5333 5336 #ifdef FRAGCACHE_DEBUG
5334 5337 cmn_err(CE_WARN, "v6_sel.ips_protocol = %d\n",
5335 5338 sel.ips_protocol);
5336 5339 #endif
5337 5340 }
5338 5341 /* Ports are extracted below */
5339 5342 }
5340 5343
5341 5344 /* Get ports... */
5342 5345 if (!ipsec_init_outbound_ports(&sel, mp,
5343 5346 inner_ipv4, inner_ipv6, outer_hdr_len, ipss)) {
5344 5347 /* callee did ip_drop_packet_chain() on mp. */
5345 5348 return (NULL);
5346 5349 }
5347 5350 #ifdef FRAGCACHE_DEBUG
5348 5351 if (inner_ipv4 != NULL)
5349 5352 cmn_err(CE_WARN,
5350 5353 "(v4) sel.ips_protocol = %d, "
5351 5354 "sel.ips_local_port = %d, "
5352 5355 "sel.ips_remote_port = %d\n",
5353 5356 sel.ips_protocol, ntohs(sel.ips_local_port),
5354 5357 ntohs(sel.ips_remote_port));
5355 5358 if (inner_ipv6 != NULL)
5356 5359 cmn_err(CE_WARN,
5357 5360 "(v6) sel.ips_protocol = %d, "
5358 5361 "sel.ips_local_port = %d, "
5359 5362 "sel.ips_remote_port = %d\n",
5360 5363 sel.ips_protocol, ntohs(sel.ips_local_port),
5361 5364 ntohs(sel.ips_remote_port));
5362 5365 #endif
5363 5366 /* Success so far! */
5364 5367 }
5365 5368 rw_enter(&polhead->iph_lock, RW_READER);
5366 5369 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_OUTBOUND, &sel);
5367 5370 rw_exit(&polhead->iph_lock);
5368 5371 if (pol == NULL) {
5369 5372 /*
5370 5373 * No matching policy on this tunnel, drop the packet.
5371 5374 *
5372 5375 * NOTE: Tunnel-mode tunnels are different from the
5373 5376 * IP global transport mode policy head. For a tunnel-mode
5374 5377 * tunnel, we drop the packet in lieu of passing it
5375 5378 * along accepted the way a global-policy miss would.
5376 5379 *
5377 5380 * NOTE2: "negotiate transport" tunnels should match ALL
5378 5381 * inbound packets, but we do not uncomment the ASSERT()
5379 5382 * below because if/when we open PF_POLICY, a user can
5380 5383 * shoot themself in the foot with a 0 priority.
5381 5384 */
5382 5385
5383 5386 /* ASSERT(itp->itp_flags & ITPF_P_TUNNEL); */
5384 5387 #ifdef FRAGCACHE_DEBUG
5385 5388 cmn_err(CE_WARN, "ipsec_tun_outbound(): No matching tunnel "
5386 5389 "per-port policy\n");
5387 5390 #endif
5388 5391 ip_drop_packet_chain(mp, B_FALSE, NULL,
5389 5392 DROPPER(ipss, ipds_spd_explicit),
5390 5393 &ipss->ipsec_spd_dropper);
5391 5394 return (NULL);
5392 5395 }
5393 5396
5394 5397 #ifdef FRAGCACHE_DEBUG
5395 5398 cmn_err(CE_WARN, "Having matching tunnel per-port policy\n");
5396 5399 #endif
5397 5400
5398 5401 /*
5399 5402 * NOTE: ixa_cleanup() function will release pol references.
5400 5403 */
5401 5404 ixa->ixa_ipsec_policy = pol;
5402 5405 /*
5403 5406 * NOTE: There is a subtle difference between iptun_zoneid and
5404 5407 * iptun_connp->conn_zoneid explained in iptun_conn_create(). When
5405 5408 * interacting with the ip module, we must use conn_zoneid.
5406 5409 */
5407 5410 ixa->ixa_zoneid = iptun->iptun_connp->conn_zoneid;
5408 5411
5409 5412 ASSERT((outer_ipv4 != NULL) ? (ixa->ixa_flags & IXAF_IS_IPV4) :
5410 5413 !(ixa->ixa_flags & IXAF_IS_IPV4));
5411 5414 ASSERT(ixa->ixa_ipsec_policy != NULL);
5412 5415 ixa->ixa_flags |= IXAF_IPSEC_SECURE;
5413 5416
5414 5417 if (!(itp->itp_flags & ITPF_P_TUNNEL)) {
5415 5418 /* Set up transport mode for tunnelled packets. */
5416 5419 ixa->ixa_ipsec_proto = (inner_ipv4 != NULL) ? IPPROTO_ENCAP :
5417 5420 IPPROTO_IPV6;
5418 5421 return (mp);
5419 5422 }
5420 5423
5421 5424 /* Fill in tunnel-mode goodies here. */
5422 5425 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
5423 5426 /* XXX Do I need to fill in all of the goodies here? */
5424 5427 if (inner_ipv4) {
5425 5428 ixa->ixa_ipsec_inaf = AF_INET;
5426 5429 ixa->ixa_ipsec_insrc[0] =
5427 5430 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v4;
5428 5431 ixa->ixa_ipsec_indst[0] =
5429 5432 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v4;
5430 5433 } else {
5431 5434 ixa->ixa_ipsec_inaf = AF_INET6;
5432 5435 ixa->ixa_ipsec_insrc[0] =
5433 5436 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[0];
5434 5437 ixa->ixa_ipsec_insrc[1] =
5435 5438 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[1];
5436 5439 ixa->ixa_ipsec_insrc[2] =
5437 5440 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[2];
5438 5441 ixa->ixa_ipsec_insrc[3] =
5439 5442 pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[3];
5440 5443 ixa->ixa_ipsec_indst[0] =
5441 5444 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[0];
5442 5445 ixa->ixa_ipsec_indst[1] =
5443 5446 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[1];
5444 5447 ixa->ixa_ipsec_indst[2] =
5445 5448 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[2];
5446 5449 ixa->ixa_ipsec_indst[3] =
5447 5450 pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[3];
5448 5451 }
5449 5452 ixa->ixa_ipsec_insrcpfx = pol->ipsp_sel->ipsl_key.ipsl_local_pfxlen;
5450 5453 ixa->ixa_ipsec_indstpfx = pol->ipsp_sel->ipsl_key.ipsl_remote_pfxlen;
5451 5454 /* NOTE: These are used for transport mode too. */
5452 5455 ixa->ixa_ipsec_src_port = pol->ipsp_sel->ipsl_key.ipsl_lport;
5453 5456 ixa->ixa_ipsec_dst_port = pol->ipsp_sel->ipsl_key.ipsl_rport;
5454 5457 ixa->ixa_ipsec_proto = pol->ipsp_sel->ipsl_key.ipsl_proto;
5455 5458
5456 5459 return (mp);
5457 5460 }
5458 5461
5459 5462 /*
5460 5463 * NOTE: The following releases pol's reference and
5461 5464 * calls ip_drop_packet() for me on NULL returns.
5462 5465 */
5463 5466 mblk_t *
5464 5467 ipsec_check_ipsecin_policy_reasm(mblk_t *attr_mp, ipsec_policy_t *pol,
5465 5468 ipha_t *inner_ipv4, ip6_t *inner_ipv6, uint64_t pkt_unique, netstack_t *ns)
5466 5469 {
5467 5470 /* Assume attr_mp is a chain of b_next-linked ip_recv_attr mblk. */
5468 5471 mblk_t *data_chain = NULL, *data_tail = NULL;
5469 5472 mblk_t *next;
5470 5473 mblk_t *data_mp;
5471 5474 ip_recv_attr_t iras;
5472 5475
5473 5476 while (attr_mp != NULL) {
5474 5477 ASSERT(ip_recv_attr_is_mblk(attr_mp));
5475 5478 next = attr_mp->b_next;
5476 5479 attr_mp->b_next = NULL; /* No tripping asserts. */
5477 5480
5478 5481 data_mp = attr_mp->b_cont;
5479 5482 attr_mp->b_cont = NULL;
5480 5483 if (!ip_recv_attr_from_mblk(attr_mp, &iras)) {
5481 5484 /* The ill or ip_stack_t disappeared on us */
5482 5485 freemsg(data_mp); /* ip_drop_packet?? */
5483 5486 ira_cleanup(&iras, B_TRUE);
5484 5487 goto fail;
5485 5488 }
5486 5489
5487 5490 /*
5488 5491 * Need IPPOL_REFHOLD(pol) for extras because
5489 5492 * ipsecin_policy does the refrele.
5490 5493 */
5491 5494 IPPOL_REFHOLD(pol);
5492 5495
5493 5496 data_mp = ipsec_check_ipsecin_policy(data_mp, pol, inner_ipv4,
5494 5497 inner_ipv6, pkt_unique, &iras, ns);
5495 5498 ira_cleanup(&iras, B_TRUE);
5496 5499
5497 5500 if (data_mp == NULL)
5498 5501 goto fail;
5499 5502
5500 5503 if (data_tail == NULL) {
5501 5504 /* First one */
5502 5505 data_chain = data_tail = data_mp;
5503 5506 } else {
5504 5507 data_tail->b_next = data_mp;
5505 5508 data_tail = data_mp;
5506 5509 }
5507 5510 attr_mp = next;
5508 5511 }
5509 5512 /*
5510 5513 * One last release because either the loop bumped it up, or we never
5511 5514 * called ipsec_check_ipsecin_policy().
5512 5515 */
5513 5516 IPPOL_REFRELE(pol);
5514 5517
5515 5518 /* data_chain is ready for return to tun module. */
5516 5519 return (data_chain);
5517 5520
5518 5521 fail:
5519 5522 /*
5520 5523 * Need to get rid of any extra pol
5521 5524 * references, and any remaining bits as well.
5522 5525 */
5523 5526 IPPOL_REFRELE(pol);
5524 5527 ipsec_freemsg_chain(data_chain);
5525 5528 ipsec_freemsg_chain(next); /* ipdrop stats? */
5526 5529 return (NULL);
5527 5530 }
5528 5531
5529 5532 /*
5530 5533 * Return a message if the inbound packet passed an IPsec policy check. Returns
5531 5534 * NULL if it failed or if it is a fragment needing its friends before a
5532 5535 * policy check can be performed.
5533 5536 *
5534 5537 * Expects a non-NULL data_mp, and a non-NULL polhead.
5535 5538 * The returned mblk may be a b_next chain of packets if fragments
5536 5539 * neeeded to be collected for a proper policy check.
5537 5540 *
5538 5541 * This function calls ip_drop_packet() on data_mp if need be.
5539 5542 *
5540 5543 * NOTE: outer_hdr_len is signed. If it's a negative value, the caller
5541 5544 * is inspecting an ICMP packet.
5542 5545 */
5543 5546 mblk_t *
5544 5547 ipsec_tun_inbound(ip_recv_attr_t *ira, mblk_t *data_mp, ipsec_tun_pol_t *itp,
5545 5548 ipha_t *inner_ipv4, ip6_t *inner_ipv6, ipha_t *outer_ipv4,
5546 5549 ip6_t *outer_ipv6, int outer_hdr_len, netstack_t *ns)
5547 5550 {
5548 5551 ipsec_policy_head_t *polhead;
5549 5552 ipsec_selector_t sel;
5550 5553 ipsec_policy_t *pol;
5551 5554 uint16_t tmpport;
5552 5555 selret_t rc;
5553 5556 boolean_t port_policy_present, is_icmp, global_present;
5554 5557 in6_addr_t tmpaddr;
5555 5558 ipaddr_t tmp4;
5556 5559 uint8_t flags, *inner_hdr;
5557 5560 ipsec_stack_t *ipss = ns->netstack_ipsec;
5558 5561
5559 5562 sel.ips_is_icmp_inv_acq = 0;
5560 5563
5561 5564 if (outer_ipv4 != NULL) {
5562 5565 ASSERT(outer_ipv6 == NULL);
5563 5566 global_present = ipss->ipsec_inbound_v4_policy_present;
5564 5567 } else {
5565 5568 ASSERT(outer_ipv6 != NULL);
5566 5569 global_present = ipss->ipsec_inbound_v6_policy_present;
5567 5570 }
5568 5571
5569 5572 ASSERT(inner_ipv4 != NULL && inner_ipv6 == NULL ||
5570 5573 inner_ipv4 == NULL && inner_ipv6 != NULL);
5571 5574
5572 5575 if (outer_hdr_len < 0) {
5573 5576 outer_hdr_len = (-outer_hdr_len);
5574 5577 is_icmp = B_TRUE;
5575 5578 } else {
5576 5579 is_icmp = B_FALSE;
5577 5580 }
5578 5581
5579 5582 if (itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE)) {
5580 5583 mblk_t *mp = data_mp;
5581 5584
5582 5585 polhead = itp->itp_policy;
5583 5586 /*
5584 5587 * We need to perform full Tunnel-Mode enforcement,
5585 5588 * and we need to have inner-header data for such enforcement.
5586 5589 *
5587 5590 * See ipsec_init_inbound_sel() for the 0x80000000 on inbound
5588 5591 * and on return.
5589 5592 */
5590 5593
5591 5594 port_policy_present = ((itp->itp_flags &
5592 5595 ITPF_P_PER_PORT_SECURITY) ? B_TRUE : B_FALSE);
5593 5596 /*
5594 5597 * NOTE: Even if our policy is transport mode, set the
5595 5598 * SEL_TUNNEL_MODE flag so ipsec_init_inbound_sel() can
5596 5599 * do the right thing w.r.t. outer headers.
5597 5600 */
5598 5601 flags = ((port_policy_present ? SEL_PORT_POLICY : SEL_NONE) |
5599 5602 (is_icmp ? SEL_IS_ICMP : SEL_NONE) | SEL_TUNNEL_MODE);
5600 5603
5601 5604 rc = ipsec_init_inbound_sel(&sel, data_mp, inner_ipv4,
5602 5605 inner_ipv6, flags);
5603 5606
5604 5607 switch (rc) {
5605 5608 case SELRET_NOMEM:
5606 5609 ip_drop_packet(data_mp, B_TRUE, NULL,
5607 5610 DROPPER(ipss, ipds_spd_nomem),
5608 5611 &ipss->ipsec_spd_dropper);
5609 5612 return (NULL);
5610 5613 case SELRET_TUNFRAG:
5611 5614 /*
5612 5615 * At this point, if we're cleartext, we don't want
5613 5616 * to go there.
5614 5617 */
5615 5618 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5616 5619 ip_drop_packet(data_mp, B_TRUE, NULL,
5617 5620 DROPPER(ipss, ipds_spd_got_clear),
5618 5621 &ipss->ipsec_spd_dropper);
5619 5622 return (NULL);
5620 5623 }
5621 5624
5622 5625 /*
5623 5626 * Inner and outer headers may not be contiguous.
5624 5627 * Pullup the data_mp now to satisfy assumptions of
5625 5628 * ipsec_fragcache_add()
5626 5629 */
5627 5630 if (data_mp->b_cont != NULL) {
5628 5631 mblk_t *nmp;
5629 5632
5630 5633 nmp = msgpullup(data_mp, -1);
5631 5634 if (nmp == NULL) {
5632 5635 ip_drop_packet(data_mp, B_TRUE, NULL,
5633 5636 DROPPER(ipss, ipds_spd_nomem),
5634 5637 &ipss->ipsec_spd_dropper);
5635 5638 return (NULL);
5636 5639 }
5637 5640 freemsg(data_mp);
5638 5641 data_mp = nmp;
5639 5642 if (outer_ipv4 != NULL)
5640 5643 outer_ipv4 =
5641 5644 (ipha_t *)data_mp->b_rptr;
5642 5645 else
5643 5646 outer_ipv6 =
5644 5647 (ip6_t *)data_mp->b_rptr;
5645 5648 if (inner_ipv4 != NULL) {
5646 5649 inner_ipv4 =
5647 5650 (ipha_t *)(data_mp->b_rptr +
5648 5651 outer_hdr_len);
5649 5652 } else {
5650 5653 inner_ipv6 =
5651 5654 (ip6_t *)(data_mp->b_rptr +
5652 5655 outer_hdr_len);
5653 5656 }
5654 5657 }
5655 5658
5656 5659 /*
5657 5660 * If we need to queue the packet. First we
5658 5661 * get an mblk with the attributes. ipsec_fragcache_add
5659 5662 * will prepend that to the queued data and return
5660 5663 * a list of b_next messages each of which starts with
5661 5664 * the attribute mblk.
5662 5665 */
5663 5666 mp = ip_recv_attr_to_mblk(ira);
5664 5667 if (mp == NULL) {
5665 5668 ip_drop_packet(data_mp, B_TRUE, NULL,
5666 5669 DROPPER(ipss, ipds_spd_nomem),
5667 5670 &ipss->ipsec_spd_dropper);
5668 5671 return (NULL);
5669 5672 }
5670 5673
5671 5674 mp = ipsec_fragcache_add(&itp->itp_fragcache,
5672 5675 mp, data_mp, outer_hdr_len, ipss);
5673 5676
5674 5677 if (mp == NULL) {
5675 5678 /*
5676 5679 * Data is cached, fragment chain is not
5677 5680 * complete.
5678 5681 */
5679 5682 return (NULL);
5680 5683 }
5681 5684
5682 5685 /*
5683 5686 * If we get here, we have a full fragment chain.
5684 5687 * Reacquire headers and selectors from first fragment.
5685 5688 */
5686 5689 ASSERT(ip_recv_attr_is_mblk(mp));
5687 5690 data_mp = mp->b_cont;
5688 5691 inner_hdr = data_mp->b_rptr;
5689 5692 if (outer_ipv4 != NULL) {
5690 5693 inner_hdr += IPH_HDR_LENGTH(
5691 5694 (ipha_t *)data_mp->b_rptr);
5692 5695 } else {
5693 5696 inner_hdr += ip_hdr_length_v6(data_mp,
5694 5697 (ip6_t *)data_mp->b_rptr);
5695 5698 }
5696 5699 ASSERT(inner_hdr <= data_mp->b_wptr);
5697 5700
5698 5701 if (inner_ipv4 != NULL) {
5699 5702 inner_ipv4 = (ipha_t *)inner_hdr;
5700 5703 inner_ipv6 = NULL;
5701 5704 } else {
5702 5705 inner_ipv6 = (ip6_t *)inner_hdr;
5703 5706 inner_ipv4 = NULL;
5704 5707 }
5705 5708
5706 5709 /*
5707 5710 * Use SEL_TUNNEL_MODE to take into account the outer
5708 5711 * header. Use SEL_POST_FRAG so we always get ports.
5709 5712 */
5710 5713 rc = ipsec_init_inbound_sel(&sel, data_mp,
5711 5714 inner_ipv4, inner_ipv6,
5712 5715 SEL_TUNNEL_MODE | SEL_POST_FRAG);
5713 5716 switch (rc) {
5714 5717 case SELRET_SUCCESS:
5715 5718 /*
5716 5719 * Get to same place as first caller's
5717 5720 * SELRET_SUCCESS case.
5718 5721 */
5719 5722 break;
5720 5723 case SELRET_NOMEM:
5721 5724 ip_drop_packet_chain(mp, B_TRUE, NULL,
5722 5725 DROPPER(ipss, ipds_spd_nomem),
5723 5726 &ipss->ipsec_spd_dropper);
5724 5727 return (NULL);
5725 5728 case SELRET_BADPKT:
5726 5729 ip_drop_packet_chain(mp, B_TRUE, NULL,
5727 5730 DROPPER(ipss, ipds_spd_malformed_frag),
5728 5731 &ipss->ipsec_spd_dropper);
5729 5732 return (NULL);
5730 5733 case SELRET_TUNFRAG:
5731 5734 cmn_err(CE_WARN, "(TUNFRAG on 2nd call...)");
5732 5735 /* FALLTHRU */
5733 5736 default:
5734 5737 cmn_err(CE_WARN, "ipsec_init_inbound_sel(mark2)"
5735 5738 " returns bizarro 0x%x", rc);
5736 5739 /* Guaranteed panic! */
5737 5740 ASSERT(rc == SELRET_NOMEM);
5738 5741 return (NULL);
5739 5742 }
5740 5743 /* FALLTHRU */
5741 5744 case SELRET_SUCCESS:
5742 5745 /*
5743 5746 * Common case:
5744 5747 * No per-port policy or a non-fragment. Keep going.
5745 5748 */
5746 5749 break;
5747 5750 case SELRET_BADPKT:
5748 5751 /*
5749 5752 * We may receive ICMP (with IPv6 inner) packets that
5750 5753 * trigger this return value. Send 'em in for
5751 5754 * enforcement checking.
5752 5755 */
5753 5756 cmn_err(CE_NOTE, "ipsec_tun_inbound(): "
5754 5757 "sending 'bad packet' in for enforcement");
5755 5758 break;
5756 5759 default:
5757 5760 cmn_err(CE_WARN,
5758 5761 "ipsec_init_inbound_sel() returns bizarro 0x%x",
5759 5762 rc);
5760 5763 ASSERT(rc == SELRET_NOMEM); /* Guaranteed panic! */
5761 5764 return (NULL);
5762 5765 }
5763 5766
5764 5767 if (is_icmp) {
5765 5768 /*
5766 5769 * Swap local/remote because this is an ICMP packet.
5767 5770 */
5768 5771 tmpaddr = sel.ips_local_addr_v6;
5769 5772 sel.ips_local_addr_v6 = sel.ips_remote_addr_v6;
5770 5773 sel.ips_remote_addr_v6 = tmpaddr;
5771 5774 tmpport = sel.ips_local_port;
5772 5775 sel.ips_local_port = sel.ips_remote_port;
5773 5776 sel.ips_remote_port = tmpport;
5774 5777 }
5775 5778
5776 5779 /* find_policy_head() */
5777 5780 rw_enter(&polhead->iph_lock, RW_READER);
5778 5781 pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_INBOUND,
5779 5782 &sel);
5780 5783 rw_exit(&polhead->iph_lock);
5781 5784 if (pol != NULL) {
5782 5785 uint64_t pkt_unique;
5783 5786
5784 5787 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5785 5788 if (!pol->ipsp_act->ipa_allow_clear) {
5786 5789 /*
5787 5790 * XXX should never get here with
5788 5791 * tunnel reassembled fragments?
5789 5792 */
5790 5793 ASSERT(mp == data_mp);
5791 5794 ip_drop_packet(data_mp, B_TRUE, NULL,
5792 5795 DROPPER(ipss, ipds_spd_got_clear),
5793 5796 &ipss->ipsec_spd_dropper);
5794 5797 IPPOL_REFRELE(pol);
5795 5798 return (NULL);
5796 5799 } else {
5797 5800 IPPOL_REFRELE(pol);
5798 5801 return (mp);
5799 5802 }
5800 5803 }
5801 5804 pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
5802 5805 sel.ips_local_port,
5803 5806 (inner_ipv4 == NULL) ? IPPROTO_IPV6 :
5804 5807 IPPROTO_ENCAP, sel.ips_protocol);
5805 5808
5806 5809 /*
5807 5810 * NOTE: The following releases pol's reference and
5808 5811 * calls ip_drop_packet() for me on NULL returns.
5809 5812 *
5810 5813 * "sel" is still good here, so let's use it!
5811 5814 */
5812 5815 if (data_mp == mp) {
5813 5816 /* A single packet without attributes */
5814 5817 data_mp = ipsec_check_ipsecin_policy(data_mp,
5815 5818 pol, inner_ipv4, inner_ipv6, pkt_unique,
5816 5819 ira, ns);
5817 5820 } else {
5818 5821 /*
5819 5822 * We pass in the b_next chain of attr_mp's
5820 5823 * and get back a b_next chain of data_mp's.
5821 5824 */
5822 5825 data_mp = ipsec_check_ipsecin_policy_reasm(mp,
5823 5826 pol, inner_ipv4, inner_ipv6, pkt_unique,
5824 5827 ns);
5825 5828 }
5826 5829 return (data_mp);
5827 5830 }
5828 5831
5829 5832 /*
5830 5833 * Else fallthru and check the global policy on the outer
5831 5834 * header(s) if this tunnel is an old-style transport-mode
5832 5835 * one. Drop the packet explicitly (no policy entry) for
5833 5836 * a new-style tunnel-mode tunnel.
5834 5837 */
5835 5838 if ((itp->itp_flags & ITPF_P_TUNNEL) && !is_icmp) {
5836 5839 ip_drop_packet_chain(data_mp, B_TRUE, NULL,
5837 5840 DROPPER(ipss, ipds_spd_explicit),
5838 5841 &ipss->ipsec_spd_dropper);
5839 5842 return (NULL);
5840 5843 }
5841 5844 }
5842 5845
5843 5846 /*
5844 5847 * NOTE: If we reach here, we will not have packet chains from
5845 5848 * fragcache_add(), because the only way I get chains is on a
5846 5849 * tunnel-mode tunnel, which either returns with a pass, or gets
5847 5850 * hit by the ip_drop_packet_chain() call right above here.
5848 5851 */
5849 5852 ASSERT(data_mp->b_next == NULL);
5850 5853
5851 5854 /* If no per-tunnel security, check global policy now. */
5852 5855 if ((ira->ira_flags & IRAF_IPSEC_SECURE) && !global_present) {
5853 5856 if (ira->ira_flags & IRAF_TRUSTED_ICMP) {
5854 5857 /*
5855 5858 * This is an ICMP message that was geenrated locally.
5856 5859 * We should accept it.
5857 5860 */
5858 5861 return (data_mp);
5859 5862 }
5860 5863
5861 5864 ip_drop_packet(data_mp, B_TRUE, NULL,
5862 5865 DROPPER(ipss, ipds_spd_got_secure),
5863 5866 &ipss->ipsec_spd_dropper);
5864 5867 return (NULL);
5865 5868 }
5866 5869
5867 5870 if (is_icmp) {
5868 5871 /*
5869 5872 * For ICMP packets, "outer_ipvN" is set to the outer header
5870 5873 * that is *INSIDE* the ICMP payload. For global policy
5871 5874 * checking, we need to reverse src/dst on the payload in
5872 5875 * order to construct selectors appropriately. See "ripha"
5873 5876 * constructions in ip.c. To avoid a bug like 6478464 (see
5874 5877 * earlier in this file), we will actually exchange src/dst
5875 5878 * in the packet, and reverse if after the call to
5876 5879 * ipsec_check_global_policy().
5877 5880 */
5878 5881 if (outer_ipv4 != NULL) {
5879 5882 tmp4 = outer_ipv4->ipha_src;
5880 5883 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5881 5884 outer_ipv4->ipha_dst = tmp4;
5882 5885 } else {
5883 5886 ASSERT(outer_ipv6 != NULL);
5884 5887 tmpaddr = outer_ipv6->ip6_src;
5885 5888 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5886 5889 outer_ipv6->ip6_dst = tmpaddr;
5887 5890 }
5888 5891 }
5889 5892
5890 5893 data_mp = ipsec_check_global_policy(data_mp, NULL, outer_ipv4,
5891 5894 outer_ipv6, ira, ns);
5892 5895 if (data_mp == NULL)
5893 5896 return (NULL);
5894 5897
5895 5898 if (is_icmp) {
5896 5899 /* Set things back to normal. */
5897 5900 if (outer_ipv4 != NULL) {
5898 5901 tmp4 = outer_ipv4->ipha_src;
5899 5902 outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5900 5903 outer_ipv4->ipha_dst = tmp4;
5901 5904 } else {
5902 5905 /* No need for ASSERT()s now. */
5903 5906 tmpaddr = outer_ipv6->ip6_src;
5904 5907 outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5905 5908 outer_ipv6->ip6_dst = tmpaddr;
5906 5909 }
5907 5910 }
5908 5911
5909 5912 /*
5910 5913 * At this point, we pretend it's a cleartext accepted
5911 5914 * packet.
5912 5915 */
5913 5916 return (data_mp);
5914 5917 }
5915 5918
5916 5919 /*
5917 5920 * AVL comparison routine for our list of tunnel polheads.
5918 5921 */
5919 5922 static int
5920 5923 tunnel_compare(const void *arg1, const void *arg2)
5921 5924 {
5922 5925 ipsec_tun_pol_t *left, *right;
5923 5926 int rc;
5924 5927
5925 5928 left = (ipsec_tun_pol_t *)arg1;
5926 5929 right = (ipsec_tun_pol_t *)arg2;
5927 5930
5928 5931 rc = strncmp(left->itp_name, right->itp_name, LIFNAMSIZ);
5929 5932 return (rc == 0 ? rc : (rc > 0 ? 1 : -1));
5930 5933 }
5931 5934
5932 5935 /*
5933 5936 * Free a tunnel policy node.
5934 5937 */
5935 5938 void
5936 5939 itp_free(ipsec_tun_pol_t *node, netstack_t *ns)
5937 5940 {
5938 5941 if (node->itp_policy != NULL) {
5939 5942 IPPH_REFRELE(node->itp_policy, ns);
5940 5943 node->itp_policy = NULL;
5941 5944 }
5942 5945 if (node->itp_inactive != NULL) {
5943 5946 IPPH_REFRELE(node->itp_inactive, ns);
5944 5947 node->itp_inactive = NULL;
5945 5948 }
5946 5949 mutex_destroy(&node->itp_lock);
5947 5950 kmem_free(node, sizeof (*node));
5948 5951 }
5949 5952
5950 5953 void
5951 5954 itp_unlink(ipsec_tun_pol_t *node, netstack_t *ns)
5952 5955 {
5953 5956 ipsec_stack_t *ipss = ns->netstack_ipsec;
5954 5957
5955 5958 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
5956 5959 ipss->ipsec_tunnel_policy_gen++;
5957 5960 ipsec_fragcache_uninit(&node->itp_fragcache, ipss);
5958 5961 avl_remove(&ipss->ipsec_tunnel_policies, node);
5959 5962 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5960 5963 ITP_REFRELE(node, ns);
5961 5964 }
5962 5965
5963 5966 /*
5964 5967 * Public interface to look up a tunnel security policy by name. Used by
5965 5968 * spdsock mostly. Returns "node" with a bumped refcnt.
5966 5969 */
5967 5970 ipsec_tun_pol_t *
5968 5971 get_tunnel_policy(char *name, netstack_t *ns)
5969 5972 {
5970 5973 ipsec_tun_pol_t *node, lookup;
5971 5974 ipsec_stack_t *ipss = ns->netstack_ipsec;
5972 5975
5973 5976 (void) strncpy(lookup.itp_name, name, LIFNAMSIZ);
5974 5977
5975 5978 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5976 5979 node = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
5977 5980 &lookup, NULL);
5978 5981 if (node != NULL) {
5979 5982 ITP_REFHOLD(node);
5980 5983 }
5981 5984 rw_exit(&ipss->ipsec_tunnel_policy_lock);
5982 5985
5983 5986 return (node);
5984 5987 }
5985 5988
5986 5989 /*
5987 5990 * Public interface to walk all tunnel security polcies. Useful for spdsock
5988 5991 * DUMP operations. iterator() will not consume a reference.
5989 5992 */
5990 5993 void
5991 5994 itp_walk(void (*iterator)(ipsec_tun_pol_t *, void *, netstack_t *),
5992 5995 void *arg, netstack_t *ns)
5993 5996 {
5994 5997 ipsec_tun_pol_t *node;
5995 5998 ipsec_stack_t *ipss = ns->netstack_ipsec;
5996 5999
5997 6000 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5998 6001 for (node = avl_first(&ipss->ipsec_tunnel_policies); node != NULL;
5999 6002 node = AVL_NEXT(&ipss->ipsec_tunnel_policies, node)) {
6000 6003 iterator(node, arg, ns);
6001 6004 }
6002 6005 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6003 6006 }
6004 6007
6005 6008 /*
6006 6009 * Initialize policy head. This can only fail if there's a memory problem.
6007 6010 */
6008 6011 static boolean_t
6009 6012 tunnel_polhead_init(ipsec_policy_head_t *iph, netstack_t *ns)
6010 6013 {
6011 6014 ipsec_stack_t *ipss = ns->netstack_ipsec;
6012 6015
6013 6016 rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
6014 6017 iph->iph_refs = 1;
6015 6018 iph->iph_gen = 0;
6016 6019 if (ipsec_alloc_table(iph, ipss->ipsec_tun_spd_hashsize,
6017 6020 KM_SLEEP, B_FALSE, ns) != 0) {
6018 6021 ipsec_polhead_free_table(iph);
6019 6022 return (B_FALSE);
6020 6023 }
6021 6024 ipsec_polhead_init(iph, ipss->ipsec_tun_spd_hashsize);
6022 6025 return (B_TRUE);
6023 6026 }
6024 6027
6025 6028 /*
6026 6029 * Create a tunnel policy node with "name". Set errno with
6027 6030 * ENOMEM if there's a memory problem, and EEXIST if there's an existing
6028 6031 * node.
6029 6032 */
6030 6033 ipsec_tun_pol_t *
6031 6034 create_tunnel_policy(char *name, int *errno, uint64_t *gen, netstack_t *ns)
6032 6035 {
6033 6036 ipsec_tun_pol_t *newbie, *existing;
6034 6037 avl_index_t where;
6035 6038 ipsec_stack_t *ipss = ns->netstack_ipsec;
6036 6039
6037 6040 newbie = kmem_zalloc(sizeof (*newbie), KM_NOSLEEP);
6038 6041 if (newbie == NULL) {
6039 6042 *errno = ENOMEM;
6040 6043 return (NULL);
6041 6044 }
6042 6045 if (!ipsec_fragcache_init(&newbie->itp_fragcache)) {
6043 6046 kmem_free(newbie, sizeof (*newbie));
6044 6047 *errno = ENOMEM;
6045 6048 return (NULL);
6046 6049 }
6047 6050
6048 6051 (void) strncpy(newbie->itp_name, name, LIFNAMSIZ);
6049 6052
6050 6053 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
6051 6054 existing = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
6052 6055 newbie, &where);
6053 6056 if (existing != NULL) {
6054 6057 itp_free(newbie, ns);
6055 6058 *errno = EEXIST;
6056 6059 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6057 6060 return (NULL);
6058 6061 }
6059 6062 ipss->ipsec_tunnel_policy_gen++;
6060 6063 *gen = ipss->ipsec_tunnel_policy_gen;
6061 6064 newbie->itp_refcnt = 2; /* One for the caller, one for the tree. */
6062 6065 newbie->itp_next_policy_index = 1;
6063 6066 avl_insert(&ipss->ipsec_tunnel_policies, newbie, where);
6064 6067 mutex_init(&newbie->itp_lock, NULL, MUTEX_DEFAULT, NULL);
6065 6068 newbie->itp_policy = kmem_zalloc(sizeof (ipsec_policy_head_t),
6066 6069 KM_NOSLEEP);
6067 6070 if (newbie->itp_policy == NULL)
6068 6071 goto nomem;
6069 6072 newbie->itp_inactive = kmem_zalloc(sizeof (ipsec_policy_head_t),
6070 6073 KM_NOSLEEP);
6071 6074 if (newbie->itp_inactive == NULL) {
6072 6075 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6073 6076 goto nomem;
6074 6077 }
6075 6078
6076 6079 if (!tunnel_polhead_init(newbie->itp_policy, ns)) {
6077 6080 kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6078 6081 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6079 6082 goto nomem;
6080 6083 } else if (!tunnel_polhead_init(newbie->itp_inactive, ns)) {
6081 6084 IPPH_REFRELE(newbie->itp_policy, ns);
6082 6085 kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6083 6086 goto nomem;
6084 6087 }
6085 6088 rw_exit(&ipss->ipsec_tunnel_policy_lock);
6086 6089
6087 6090 return (newbie);
6088 6091 nomem:
6089 6092 *errno = ENOMEM;
6090 6093 kmem_free(newbie, sizeof (*newbie));
6091 6094 return (NULL);
6092 6095 }
6093 6096
6094 6097 /*
6095 6098 * Given two addresses, find a tunnel instance's IPsec policy heads.
6096 6099 * Returns NULL on failure.
6097 6100 */
6098 6101 ipsec_tun_pol_t *
6099 6102 itp_get_byaddr(uint32_t *laddr, uint32_t *faddr, int af, ip_stack_t *ipst)
6100 6103 {
6101 6104 conn_t *connp;
6102 6105 iptun_t *iptun;
6103 6106 ipsec_tun_pol_t *itp = NULL;
6104 6107
6105 6108 /* Classifiers are used to "src" being foreign. */
6106 6109 if (af == AF_INET) {
6107 6110 connp = ipcl_iptun_classify_v4((ipaddr_t *)faddr,
6108 6111 (ipaddr_t *)laddr, ipst);
6109 6112 } else {
6110 6113 ASSERT(af == AF_INET6);
6111 6114 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)laddr));
6112 6115 ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)faddr));
6113 6116 connp = ipcl_iptun_classify_v6((in6_addr_t *)faddr,
6114 6117 (in6_addr_t *)laddr, ipst);
6115 6118 }
6116 6119
6117 6120 if (connp == NULL)
6118 6121 return (NULL);
6119 6122
6120 6123 if (IPCL_IS_IPTUN(connp)) {
6121 6124 iptun = connp->conn_iptun;
6122 6125 if (iptun != NULL) {
6123 6126 itp = iptun->iptun_itp;
6124 6127 if (itp != NULL) {
6125 6128 /* Braces due to the macro's nature... */
6126 6129 ITP_REFHOLD(itp);
6127 6130 }
6128 6131 } /* Else itp is already NULL. */
6129 6132 }
6130 6133
6131 6134 CONN_DEC_REF(connp);
6132 6135 return (itp);
6133 6136 }
6134 6137
6135 6138 /*
6136 6139 * Frag cache code, based on SunScreen 3.2 source
6137 6140 * screen/kernel/common/screen_fragcache.c
6138 6141 */
6139 6142
6140 6143 #define IPSEC_FRAG_TTL_MAX 5
6141 6144 /*
6142 6145 * Note that the following parameters create 256 hash buckets
6143 6146 * with 1024 free entries to be distributed. Things are cleaned
6144 6147 * periodically and are attempted to be cleaned when there is no
6145 6148 * free space, but this system errs on the side of dropping packets
6146 6149 * over creating memory exhaustion. We may decide to make hash
6147 6150 * factor a tunable if this proves to be a bad decision.
6148 6151 */
6149 6152 #define IPSEC_FRAG_HASH_SLOTS (1<<8)
6150 6153 #define IPSEC_FRAG_HASH_FACTOR 4
6151 6154 #define IPSEC_FRAG_HASH_SIZE (IPSEC_FRAG_HASH_SLOTS * IPSEC_FRAG_HASH_FACTOR)
6152 6155
6153 6156 #define IPSEC_FRAG_HASH_MASK (IPSEC_FRAG_HASH_SLOTS - 1)
6154 6157 #define IPSEC_FRAG_HASH_FUNC(id) (((id) & IPSEC_FRAG_HASH_MASK) ^ \
6155 6158 (((id) / \
6156 6159 (ushort_t)IPSEC_FRAG_HASH_SLOTS) & \
6157 6160 IPSEC_FRAG_HASH_MASK))
6158 6161
6159 6162 /* Maximum fragments per packet. 48 bytes payload x 1366 packets > 64KB */
6160 6163 #define IPSEC_MAX_FRAGS 1366
6161 6164
6162 6165 #define V4_FRAG_OFFSET(ipha) ((ntohs(ipha->ipha_fragment_offset_and_flags) & \
6163 6166 IPH_OFFSET) << 3)
6164 6167 #define V4_MORE_FRAGS(ipha) (ntohs(ipha->ipha_fragment_offset_and_flags) & \
6165 6168 IPH_MF)
6166 6169
6167 6170 /*
6168 6171 * Initialize an ipsec fragcache instance.
6169 6172 * Returns B_FALSE if memory allocation fails.
6170 6173 */
6171 6174 boolean_t
6172 6175 ipsec_fragcache_init(ipsec_fragcache_t *frag)
6173 6176 {
6174 6177 ipsec_fragcache_entry_t *ftemp;
6175 6178 int i;
6176 6179
6177 6180 mutex_init(&frag->itpf_lock, NULL, MUTEX_DEFAULT, NULL);
6178 6181 frag->itpf_ptr = (ipsec_fragcache_entry_t **)
6179 6182 kmem_zalloc(sizeof (ipsec_fragcache_entry_t *) *
6180 6183 IPSEC_FRAG_HASH_SLOTS, KM_NOSLEEP);
6181 6184 if (frag->itpf_ptr == NULL)
6182 6185 return (B_FALSE);
6183 6186
6184 6187 ftemp = (ipsec_fragcache_entry_t *)
6185 6188 kmem_zalloc(sizeof (ipsec_fragcache_entry_t) *
6186 6189 IPSEC_FRAG_HASH_SIZE, KM_NOSLEEP);
6187 6190 if (ftemp == NULL) {
6188 6191 kmem_free(frag->itpf_ptr, sizeof (ipsec_fragcache_entry_t *) *
6189 6192 IPSEC_FRAG_HASH_SLOTS);
6190 6193 return (B_FALSE);
6191 6194 }
6192 6195
6193 6196 frag->itpf_freelist = NULL;
6194 6197
6195 6198 for (i = 0; i < IPSEC_FRAG_HASH_SIZE; i++) {
6196 6199 ftemp->itpfe_next = frag->itpf_freelist;
6197 6200 frag->itpf_freelist = ftemp;
6198 6201 ftemp++;
6199 6202 }
6200 6203
6201 6204 frag->itpf_expire_hint = 0;
6202 6205
6203 6206 return (B_TRUE);
6204 6207 }
6205 6208
6206 6209 void
6207 6210 ipsec_fragcache_uninit(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6208 6211 {
6209 6212 ipsec_fragcache_entry_t *fep;
6210 6213 int i;
6211 6214
6212 6215 mutex_enter(&frag->itpf_lock);
6213 6216 if (frag->itpf_ptr) {
6214 6217 /* Delete any existing fragcache entry chains */
6215 6218 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6216 6219 fep = (frag->itpf_ptr)[i];
6217 6220 while (fep != NULL) {
6218 6221 /* Returned fep is next in chain or NULL */
6219 6222 fep = fragcache_delentry(i, fep, frag, ipss);
6220 6223 }
6221 6224 }
6222 6225 /*
6223 6226 * Chase the pointers back to the beginning
6224 6227 * of the memory allocation and then
6225 6228 * get rid of the allocated freelist
6226 6229 */
6227 6230 while (frag->itpf_freelist->itpfe_next != NULL)
6228 6231 frag->itpf_freelist = frag->itpf_freelist->itpfe_next;
6229 6232 /*
6230 6233 * XXX - If we ever dynamically grow the freelist
6231 6234 * then we'll have to free entries individually
6232 6235 * or determine how many entries or chunks we have
6233 6236 * grown since the initial allocation.
6234 6237 */
6235 6238 kmem_free(frag->itpf_freelist,
6236 6239 sizeof (ipsec_fragcache_entry_t) *
6237 6240 IPSEC_FRAG_HASH_SIZE);
6238 6241 /* Free the fragcache structure */
6239 6242 kmem_free(frag->itpf_ptr,
6240 6243 sizeof (ipsec_fragcache_entry_t *) *
6241 6244 IPSEC_FRAG_HASH_SLOTS);
6242 6245 }
6243 6246 mutex_exit(&frag->itpf_lock);
6244 6247 mutex_destroy(&frag->itpf_lock);
6245 6248 }
6246 6249
6247 6250 /*
6248 6251 * Add a fragment to the fragment cache. Consumes mp if NULL is returned.
6249 6252 * Returns mp if a whole fragment has been assembled, NULL otherwise
6250 6253 * The returned mp could be a b_next chain of fragments.
6251 6254 *
6252 6255 * The iramp argument is set on inbound; NULL if outbound.
6253 6256 */
6254 6257 mblk_t *
6255 6258 ipsec_fragcache_add(ipsec_fragcache_t *frag, mblk_t *iramp, mblk_t *mp,
6256 6259 int outer_hdr_len, ipsec_stack_t *ipss)
6257 6260 {
6258 6261 boolean_t is_v4;
6259 6262 time_t itpf_time;
6260 6263 ipha_t *iph;
6261 6264 ipha_t *oiph;
6262 6265 ip6_t *ip6h = NULL;
6263 6266 uint8_t v6_proto;
6264 6267 uint8_t *v6_proto_p;
6265 6268 uint16_t ip6_hdr_length;
6266 6269 ip_pkt_t ipp;
6267 6270 ip6_frag_t *fraghdr;
6268 6271 ipsec_fragcache_entry_t *fep;
6269 6272 int i;
6270 6273 mblk_t *nmp, *prevmp;
6271 6274 int firstbyte, lastbyte;
6272 6275 int offset;
6273 6276 int last;
6274 6277 boolean_t inbound = (iramp != NULL);
6275 6278
6276 6279 #ifdef FRAGCACHE_DEBUG
6277 6280 cmn_err(CE_WARN, "Fragcache: %s\n", inbound ? "INBOUND" : "OUTBOUND");
6278 6281 #endif
6279 6282 /*
6280 6283 * You're on the slow path, so insure that every packet in the
6281 6284 * cache is a single-mblk one.
6282 6285 */
6283 6286 if (mp->b_cont != NULL) {
6284 6287 nmp = msgpullup(mp, -1);
6285 6288 if (nmp == NULL) {
6286 6289 ip_drop_packet(mp, inbound, NULL,
6287 6290 DROPPER(ipss, ipds_spd_nomem),
6288 6291 &ipss->ipsec_spd_dropper);
6289 6292 if (inbound)
6290 6293 (void) ip_recv_attr_free_mblk(iramp);
6291 6294 return (NULL);
6292 6295 }
6293 6296 freemsg(mp);
6294 6297 mp = nmp;
6295 6298 }
6296 6299
6297 6300 mutex_enter(&frag->itpf_lock);
6298 6301
6299 6302 oiph = (ipha_t *)mp->b_rptr;
6300 6303 iph = (ipha_t *)(mp->b_rptr + outer_hdr_len);
6301 6304
6302 6305 if (IPH_HDR_VERSION(iph) == IPV4_VERSION) {
6303 6306 is_v4 = B_TRUE;
6304 6307 } else {
6305 6308 ASSERT(IPH_HDR_VERSION(iph) == IPV6_VERSION);
6306 6309 ip6h = (ip6_t *)(mp->b_rptr + outer_hdr_len);
6307 6310
6308 6311 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip6_hdr_length,
6309 6312 &v6_proto_p)) {
6310 6313 /*
6311 6314 * Find upper layer protocol.
6312 6315 * If it fails we have a malformed packet
6313 6316 */
6314 6317 mutex_exit(&frag->itpf_lock);
6315 6318 ip_drop_packet(mp, inbound, NULL,
6316 6319 DROPPER(ipss, ipds_spd_malformed_packet),
6317 6320 &ipss->ipsec_spd_dropper);
6318 6321 if (inbound)
6319 6322 (void) ip_recv_attr_free_mblk(iramp);
6320 6323 return (NULL);
6321 6324 } else {
6322 6325 v6_proto = *v6_proto_p;
6323 6326 }
6324 6327
6325 6328
6326 6329 bzero(&ipp, sizeof (ipp));
6327 6330 (void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
6328 6331 if (!(ipp.ipp_fields & IPPF_FRAGHDR)) {
6329 6332 /*
6330 6333 * We think this is a fragment, but didn't find
6331 6334 * a fragment header. Something is wrong.
6332 6335 */
6333 6336 mutex_exit(&frag->itpf_lock);
6334 6337 ip_drop_packet(mp, inbound, NULL,
6335 6338 DROPPER(ipss, ipds_spd_malformed_frag),
6336 6339 &ipss->ipsec_spd_dropper);
6337 6340 if (inbound)
6338 6341 (void) ip_recv_attr_free_mblk(iramp);
6339 6342 return (NULL);
6340 6343 }
6341 6344 fraghdr = ipp.ipp_fraghdr;
6342 6345 is_v4 = B_FALSE;
6343 6346 }
6344 6347
6345 6348 /* Anything to cleanup? */
6346 6349
6347 6350 /*
6348 6351 * This cleanup call could be put in a timer loop
6349 6352 * but it may actually be just as reasonable a decision to
6350 6353 * leave it here. The disadvantage is this only gets called when
6351 6354 * frags are added. The advantage is that it is not
6352 6355 * susceptible to race conditions like a time-based cleanup
6353 6356 * may be.
6354 6357 */
6355 6358 itpf_time = gethrestime_sec();
6356 6359 if (itpf_time >= frag->itpf_expire_hint)
6357 6360 ipsec_fragcache_clean(frag, ipss);
6358 6361
6359 6362 /* Lookup to see if there is an existing entry */
6360 6363
6361 6364 if (is_v4)
6362 6365 i = IPSEC_FRAG_HASH_FUNC(iph->ipha_ident);
6363 6366 else
6364 6367 i = IPSEC_FRAG_HASH_FUNC(fraghdr->ip6f_ident);
6365 6368
6366 6369 for (fep = (frag->itpf_ptr)[i]; fep; fep = fep->itpfe_next) {
6367 6370 if (is_v4) {
6368 6371 ASSERT(iph != NULL);
6369 6372 if ((fep->itpfe_id == iph->ipha_ident) &&
6370 6373 (fep->itpfe_src == iph->ipha_src) &&
6371 6374 (fep->itpfe_dst == iph->ipha_dst) &&
6372 6375 (fep->itpfe_proto == iph->ipha_protocol))
6373 6376 break;
6374 6377 } else {
6375 6378 ASSERT(fraghdr != NULL);
6376 6379 ASSERT(fep != NULL);
6377 6380 if ((fep->itpfe_id == fraghdr->ip6f_ident) &&
6378 6381 IN6_ARE_ADDR_EQUAL(&fep->itpfe_src6,
6379 6382 &ip6h->ip6_src) &&
6380 6383 IN6_ARE_ADDR_EQUAL(&fep->itpfe_dst6,
6381 6384 &ip6h->ip6_dst) && (fep->itpfe_proto == v6_proto))
6382 6385 break;
6383 6386 }
6384 6387 }
6385 6388
6386 6389 if (is_v4) {
6387 6390 firstbyte = V4_FRAG_OFFSET(iph);
6388 6391 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6389 6392 IPH_HDR_LENGTH(iph);
6390 6393 last = (V4_MORE_FRAGS(iph) == 0);
6391 6394 #ifdef FRAGCACHE_DEBUG
6392 6395 cmn_err(CE_WARN, "V4 fragcache: firstbyte = %d, lastbyte = %d, "
6393 6396 "is_last_frag = %d, id = %d, mp = %p\n", firstbyte,
6394 6397 lastbyte, last, iph->ipha_ident, mp);
6395 6398 #endif
6396 6399 } else {
6397 6400 firstbyte = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
6398 6401 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6399 6402 sizeof (ip6_t) - ip6_hdr_length;
6400 6403 last = (fraghdr->ip6f_offlg & IP6F_MORE_FRAG) == 0;
6401 6404 #ifdef FRAGCACHE_DEBUG
6402 6405 cmn_err(CE_WARN, "V6 fragcache: firstbyte = %d, lastbyte = %d, "
6403 6406 "is_last_frag = %d, id = %d, fraghdr = %p, mp = %p\n",
6404 6407 firstbyte, lastbyte, last, fraghdr->ip6f_ident, fraghdr,
6405 6408 mp);
6406 6409 #endif
6407 6410 }
6408 6411
6409 6412 /* check for bogus fragments and delete the entry */
6410 6413 if (firstbyte > 0 && firstbyte <= 8) {
6411 6414 if (fep != NULL)
6412 6415 (void) fragcache_delentry(i, fep, frag, ipss);
6413 6416 mutex_exit(&frag->itpf_lock);
6414 6417 ip_drop_packet(mp, inbound, NULL,
6415 6418 DROPPER(ipss, ipds_spd_malformed_frag),
6416 6419 &ipss->ipsec_spd_dropper);
6417 6420 if (inbound)
6418 6421 (void) ip_recv_attr_free_mblk(iramp);
6419 6422 return (NULL);
6420 6423 }
6421 6424
6422 6425 /* Not found, allocate a new entry */
6423 6426 if (fep == NULL) {
6424 6427 if (frag->itpf_freelist == NULL) {
6425 6428 /* see if there is some space */
6426 6429 ipsec_fragcache_clean(frag, ipss);
6427 6430 if (frag->itpf_freelist == NULL) {
6428 6431 mutex_exit(&frag->itpf_lock);
6429 6432 ip_drop_packet(mp, inbound, NULL,
6430 6433 DROPPER(ipss, ipds_spd_nomem),
6431 6434 &ipss->ipsec_spd_dropper);
6432 6435 if (inbound)
6433 6436 (void) ip_recv_attr_free_mblk(iramp);
6434 6437 return (NULL);
6435 6438 }
6436 6439 }
6437 6440
6438 6441 fep = frag->itpf_freelist;
6439 6442 frag->itpf_freelist = fep->itpfe_next;
6440 6443
6441 6444 if (is_v4) {
6442 6445 bcopy((caddr_t)&iph->ipha_src, (caddr_t)&fep->itpfe_src,
6443 6446 sizeof (struct in_addr));
6444 6447 bcopy((caddr_t)&iph->ipha_dst, (caddr_t)&fep->itpfe_dst,
6445 6448 sizeof (struct in_addr));
6446 6449 fep->itpfe_id = iph->ipha_ident;
6447 6450 fep->itpfe_proto = iph->ipha_protocol;
6448 6451 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6449 6452 } else {
6450 6453 bcopy((in6_addr_t *)&ip6h->ip6_src,
6451 6454 (in6_addr_t *)&fep->itpfe_src6,
6452 6455 sizeof (struct in6_addr));
6453 6456 bcopy((in6_addr_t *)&ip6h->ip6_dst,
6454 6457 (in6_addr_t *)&fep->itpfe_dst6,
6455 6458 sizeof (struct in6_addr));
6456 6459 fep->itpfe_id = fraghdr->ip6f_ident;
6457 6460 fep->itpfe_proto = v6_proto;
6458 6461 i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6459 6462 }
6460 6463 itpf_time = gethrestime_sec();
6461 6464 fep->itpfe_exp = itpf_time + IPSEC_FRAG_TTL_MAX + 1;
6462 6465 fep->itpfe_last = 0;
6463 6466 fep->itpfe_fraglist = NULL;
6464 6467 fep->itpfe_depth = 0;
6465 6468 fep->itpfe_next = (frag->itpf_ptr)[i];
6466 6469 (frag->itpf_ptr)[i] = fep;
6467 6470
6468 6471 if (frag->itpf_expire_hint > fep->itpfe_exp)
6469 6472 frag->itpf_expire_hint = fep->itpfe_exp;
6470 6473
6471 6474 }
6472 6475
6473 6476 /* Insert it in the frag list */
6474 6477 /* List is in order by starting offset of fragments */
6475 6478
6476 6479 prevmp = NULL;
6477 6480 for (nmp = fep->itpfe_fraglist; nmp; nmp = nmp->b_next) {
6478 6481 ipha_t *niph;
6479 6482 ipha_t *oniph;
6480 6483 ip6_t *nip6h;
6481 6484 ip_pkt_t nipp;
6482 6485 ip6_frag_t *nfraghdr;
6483 6486 uint16_t nip6_hdr_length;
6484 6487 uint8_t *nv6_proto_p;
6485 6488 int nfirstbyte, nlastbyte;
6486 6489 char *data, *ndata;
6487 6490 mblk_t *ndata_mp = (inbound ? nmp->b_cont : nmp);
6488 6491 int hdr_len;
6489 6492
6490 6493 oniph = (ipha_t *)mp->b_rptr;
6491 6494 nip6h = NULL;
6492 6495 niph = NULL;
6493 6496
6494 6497 /*
6495 6498 * Determine outer header type and length and set
6496 6499 * pointers appropriately
6497 6500 */
6498 6501
6499 6502 if (IPH_HDR_VERSION(oniph) == IPV4_VERSION) {
6500 6503 hdr_len = ((outer_hdr_len != 0) ?
6501 6504 IPH_HDR_LENGTH(oiph) : 0);
6502 6505 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6503 6506 } else {
6504 6507 ASSERT(IPH_HDR_VERSION(oniph) == IPV6_VERSION);
6505 6508 ASSERT(ndata_mp->b_cont == NULL);
6506 6509 nip6h = (ip6_t *)ndata_mp->b_rptr;
6507 6510 (void) ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6508 6511 &nip6_hdr_length, &v6_proto_p);
6509 6512 hdr_len = ((outer_hdr_len != 0) ? nip6_hdr_length : 0);
6510 6513 }
6511 6514
6512 6515 /*
6513 6516 * Determine inner header type and length and set
6514 6517 * pointers appropriately
6515 6518 */
6516 6519
6517 6520 if (is_v4) {
6518 6521 if (niph == NULL) {
6519 6522 /* Was v6 outer */
6520 6523 niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6521 6524 }
6522 6525 nfirstbyte = V4_FRAG_OFFSET(niph);
6523 6526 nlastbyte = nfirstbyte + ntohs(niph->ipha_length) -
6524 6527 IPH_HDR_LENGTH(niph);
6525 6528 } else {
6526 6529 ASSERT(ndata_mp->b_cont == NULL);
6527 6530 nip6h = (ip6_t *)(ndata_mp->b_rptr + hdr_len);
6528 6531 if (!ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6529 6532 &nip6_hdr_length, &nv6_proto_p)) {
6530 6533 mutex_exit(&frag->itpf_lock);
6531 6534 ip_drop_packet_chain(nmp, inbound, NULL,
6532 6535 DROPPER(ipss, ipds_spd_malformed_frag),
6533 6536 &ipss->ipsec_spd_dropper);
6534 6537 ipsec_freemsg_chain(ndata_mp);
6535 6538 if (inbound)
6536 6539 (void) ip_recv_attr_free_mblk(iramp);
6537 6540 return (NULL);
6538 6541 }
6539 6542 bzero(&nipp, sizeof (nipp));
6540 6543 (void) ip_find_hdr_v6(ndata_mp, nip6h, B_FALSE, &nipp,
6541 6544 NULL);
6542 6545 nfraghdr = nipp.ipp_fraghdr;
6543 6546 nfirstbyte = ntohs(nfraghdr->ip6f_offlg &
6544 6547 IP6F_OFF_MASK);
6545 6548 nlastbyte = nfirstbyte + ntohs(nip6h->ip6_plen) +
6546 6549 sizeof (ip6_t) - nip6_hdr_length;
6547 6550 }
6548 6551
6549 6552 /* Check for overlapping fragments */
6550 6553 if (firstbyte >= nfirstbyte && firstbyte < nlastbyte) {
6551 6554 /*
6552 6555 * Overlap Check:
6553 6556 * ~~~~--------- # Check if the newly
6554 6557 * ~ ndata_mp| # received fragment
6555 6558 * ~~~~--------- # overlaps with the
6556 6559 * ---------~~~~~~ # current fragment.
6557 6560 * | mp ~
6558 6561 * ---------~~~~~~
6559 6562 */
6560 6563 if (is_v4) {
6561 6564 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6562 6565 firstbyte - nfirstbyte;
6563 6566 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6564 6567 } else {
6565 6568 data = (char *)ip6h +
6566 6569 nip6_hdr_length + firstbyte -
6567 6570 nfirstbyte;
6568 6571 ndata = (char *)nip6h + nip6_hdr_length;
6569 6572 }
6570 6573 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte) -
6571 6574 firstbyte)) {
6572 6575 /* Overlapping data does not match */
6573 6576 (void) fragcache_delentry(i, fep, frag, ipss);
6574 6577 mutex_exit(&frag->itpf_lock);
6575 6578 ip_drop_packet(mp, inbound, NULL,
6576 6579 DROPPER(ipss, ipds_spd_overlap_frag),
6577 6580 &ipss->ipsec_spd_dropper);
6578 6581 if (inbound)
6579 6582 (void) ip_recv_attr_free_mblk(iramp);
6580 6583 return (NULL);
6581 6584 }
6582 6585 /* Part of defense for jolt2.c fragmentation attack */
6583 6586 if (firstbyte >= nfirstbyte && lastbyte <= nlastbyte) {
6584 6587 /*
6585 6588 * Check for identical or subset fragments:
6586 6589 * ---------- ~~~~--------~~~~~
6587 6590 * | nmp | or ~ nmp ~
6588 6591 * ---------- ~~~~--------~~~~~
6589 6592 * ---------- ------
6590 6593 * | mp | | mp |
6591 6594 * ---------- ------
6592 6595 */
6593 6596 mutex_exit(&frag->itpf_lock);
6594 6597 ip_drop_packet(mp, inbound, NULL,
6595 6598 DROPPER(ipss, ipds_spd_evil_frag),
6596 6599 &ipss->ipsec_spd_dropper);
6597 6600 if (inbound)
6598 6601 (void) ip_recv_attr_free_mblk(iramp);
6599 6602 return (NULL);
6600 6603 }
6601 6604
6602 6605 }
6603 6606
6604 6607 /* Correct location for this fragment? */
6605 6608 if (firstbyte <= nfirstbyte) {
6606 6609 /*
6607 6610 * Check if the tail end of the new fragment overlaps
6608 6611 * with the head of the current fragment.
6609 6612 * --------~~~~~~~
6610 6613 * | nmp ~
6611 6614 * --------~~~~~~~
6612 6615 * ~~~~~--------
6613 6616 * ~ mp |
6614 6617 * ~~~~~--------
6615 6618 */
6616 6619 if (lastbyte > nfirstbyte) {
6617 6620 /* Fragments overlap */
6618 6621 data = (char *)iph + IPH_HDR_LENGTH(iph) +
6619 6622 firstbyte - nfirstbyte;
6620 6623 ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6621 6624 if (is_v4) {
6622 6625 data = (char *)iph +
6623 6626 IPH_HDR_LENGTH(iph) + firstbyte -
6624 6627 nfirstbyte;
6625 6628 ndata = (char *)niph +
6626 6629 IPH_HDR_LENGTH(niph);
6627 6630 } else {
6628 6631 data = (char *)ip6h +
6629 6632 nip6_hdr_length + firstbyte -
6630 6633 nfirstbyte;
6631 6634 ndata = (char *)nip6h + nip6_hdr_length;
6632 6635 }
6633 6636 if (bcmp(data, ndata, MIN(lastbyte, nlastbyte)
6634 6637 - nfirstbyte)) {
6635 6638 /* Overlap mismatch */
6636 6639 (void) fragcache_delentry(i, fep, frag,
6637 6640 ipss);
6638 6641 mutex_exit(&frag->itpf_lock);
6639 6642 ip_drop_packet(mp, inbound, NULL,
6640 6643 DROPPER(ipss,
6641 6644 ipds_spd_overlap_frag),
6642 6645 &ipss->ipsec_spd_dropper);
6643 6646 if (inbound) {
6644 6647 (void) ip_recv_attr_free_mblk(
6645 6648 iramp);
6646 6649 }
6647 6650 return (NULL);
6648 6651 }
6649 6652 }
6650 6653
6651 6654 /*
6652 6655 * Fragment does not illegally overlap and can now
6653 6656 * be inserted into the chain
6654 6657 */
6655 6658 break;
6656 6659 }
6657 6660
6658 6661 prevmp = nmp;
6659 6662 }
6660 6663 /* Prepend the attributes before we link it in */
6661 6664 if (iramp != NULL) {
6662 6665 ASSERT(iramp->b_cont == NULL);
6663 6666 iramp->b_cont = mp;
6664 6667 mp = iramp;
6665 6668 iramp = NULL;
6666 6669 }
6667 6670 mp->b_next = nmp;
6668 6671
6669 6672 if (prevmp == NULL) {
6670 6673 fep->itpfe_fraglist = mp;
6671 6674 } else {
6672 6675 prevmp->b_next = mp;
6673 6676 }
6674 6677 if (last)
6675 6678 fep->itpfe_last = 1;
6676 6679
6677 6680 /* Part of defense for jolt2.c fragmentation attack */
6678 6681 if (++(fep->itpfe_depth) > IPSEC_MAX_FRAGS) {
6679 6682 (void) fragcache_delentry(i, fep, frag, ipss);
6680 6683 mutex_exit(&frag->itpf_lock);
6681 6684 if (inbound)
6682 6685 mp = ip_recv_attr_free_mblk(mp);
6683 6686
6684 6687 ip_drop_packet(mp, inbound, NULL,
6685 6688 DROPPER(ipss, ipds_spd_max_frags),
6686 6689 &ipss->ipsec_spd_dropper);
6687 6690 return (NULL);
6688 6691 }
6689 6692
6690 6693 /* Check for complete packet */
6691 6694
6692 6695 if (!fep->itpfe_last) {
6693 6696 mutex_exit(&frag->itpf_lock);
6694 6697 #ifdef FRAGCACHE_DEBUG
6695 6698 cmn_err(CE_WARN, "Fragment cached, last not yet seen.\n");
6696 6699 #endif
6697 6700 return (NULL);
6698 6701 }
6699 6702
6700 6703 offset = 0;
6701 6704 for (mp = fep->itpfe_fraglist; mp; mp = mp->b_next) {
6702 6705 mblk_t *data_mp = (inbound ? mp->b_cont : mp);
6703 6706 int hdr_len;
6704 6707
6705 6708 oiph = (ipha_t *)data_mp->b_rptr;
6706 6709 ip6h = NULL;
6707 6710 iph = NULL;
6708 6711
6709 6712 if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
6710 6713 hdr_len = ((outer_hdr_len != 0) ?
6711 6714 IPH_HDR_LENGTH(oiph) : 0);
6712 6715 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6713 6716 } else {
6714 6717 ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
6715 6718 ASSERT(data_mp->b_cont == NULL);
6716 6719 ip6h = (ip6_t *)data_mp->b_rptr;
6717 6720 (void) ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6718 6721 &ip6_hdr_length, &v6_proto_p);
6719 6722 hdr_len = ((outer_hdr_len != 0) ? ip6_hdr_length : 0);
6720 6723 }
6721 6724
6722 6725 /* Calculate current fragment start/end */
6723 6726 if (is_v4) {
6724 6727 if (iph == NULL) {
6725 6728 /* Was v6 outer */
6726 6729 iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6727 6730 }
6728 6731 firstbyte = V4_FRAG_OFFSET(iph);
6729 6732 lastbyte = firstbyte + ntohs(iph->ipha_length) -
6730 6733 IPH_HDR_LENGTH(iph);
6731 6734 } else {
6732 6735 ASSERT(data_mp->b_cont == NULL);
6733 6736 ip6h = (ip6_t *)(data_mp->b_rptr + hdr_len);
6734 6737 if (!ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6735 6738 &ip6_hdr_length, &v6_proto_p)) {
6736 6739 mutex_exit(&frag->itpf_lock);
6737 6740 ip_drop_packet_chain(mp, inbound, NULL,
6738 6741 DROPPER(ipss, ipds_spd_malformed_frag),
6739 6742 &ipss->ipsec_spd_dropper);
6740 6743 return (NULL);
6741 6744 }
6742 6745 v6_proto = *v6_proto_p;
6743 6746 bzero(&ipp, sizeof (ipp));
6744 6747 (void) ip_find_hdr_v6(data_mp, ip6h, B_FALSE, &ipp,
6745 6748 NULL);
6746 6749 fraghdr = ipp.ipp_fraghdr;
6747 6750 firstbyte = ntohs(fraghdr->ip6f_offlg &
6748 6751 IP6F_OFF_MASK);
6749 6752 lastbyte = firstbyte + ntohs(ip6h->ip6_plen) +
6750 6753 sizeof (ip6_t) - ip6_hdr_length;
6751 6754 }
6752 6755
6753 6756 /*
6754 6757 * If this fragment is greater than current offset,
6755 6758 * we have a missing fragment so return NULL
6756 6759 */
6757 6760 if (firstbyte > offset) {
6758 6761 mutex_exit(&frag->itpf_lock);
6759 6762 #ifdef FRAGCACHE_DEBUG
6760 6763 /*
6761 6764 * Note, this can happen when the last frag
6762 6765 * gets sent through because it is smaller
6763 6766 * than the MTU. It is not necessarily an
6764 6767 * error condition.
6765 6768 */
6766 6769 cmn_err(CE_WARN, "Frag greater than offset! : "
6767 6770 "missing fragment: firstbyte = %d, offset = %d, "
6768 6771 "mp = %p\n", firstbyte, offset, mp);
6769 6772 #endif
6770 6773 return (NULL);
6771 6774 }
6772 6775 #ifdef FRAGCACHE_DEBUG
6773 6776 cmn_err(CE_WARN, "Frag offsets : "
6774 6777 "firstbyte = %d, offset = %d, mp = %p\n",
6775 6778 firstbyte, offset, mp);
6776 6779 #endif
6777 6780
6778 6781 /*
6779 6782 * If we are at the last fragment, we have the complete
6780 6783 * packet, so rechain things and return it to caller
6781 6784 * for processing
6782 6785 */
6783 6786
6784 6787 if ((is_v4 && !V4_MORE_FRAGS(iph)) ||
6785 6788 (!is_v4 && !(fraghdr->ip6f_offlg & IP6F_MORE_FRAG))) {
6786 6789 mp = fep->itpfe_fraglist;
6787 6790 fep->itpfe_fraglist = NULL;
6788 6791 (void) fragcache_delentry(i, fep, frag, ipss);
6789 6792 mutex_exit(&frag->itpf_lock);
6790 6793
6791 6794 if ((is_v4 && (firstbyte + ntohs(iph->ipha_length) >
6792 6795 65535)) || (!is_v4 && (firstbyte +
6793 6796 ntohs(ip6h->ip6_plen) > 65535))) {
6794 6797 /* It is an invalid "ping-o-death" packet */
6795 6798 /* Discard it */
6796 6799 ip_drop_packet_chain(mp, inbound, NULL,
6797 6800 DROPPER(ipss, ipds_spd_evil_frag),
6798 6801 &ipss->ipsec_spd_dropper);
6799 6802 return (NULL);
6800 6803 }
6801 6804 #ifdef FRAGCACHE_DEBUG
6802 6805 cmn_err(CE_WARN, "Fragcache returning mp = %p, "
6803 6806 "mp->b_next = %p", mp, mp->b_next);
6804 6807 #endif
6805 6808 /*
6806 6809 * For inbound case, mp has attrmp b_next'd chain
6807 6810 * For outbound case, it is just data mp chain
6808 6811 */
6809 6812 return (mp);
6810 6813 }
6811 6814
6812 6815 /*
6813 6816 * Update new ending offset if this
6814 6817 * fragment extends the packet
6815 6818 */
6816 6819 if (offset < lastbyte)
6817 6820 offset = lastbyte;
6818 6821 }
6819 6822
6820 6823 mutex_exit(&frag->itpf_lock);
6821 6824
6822 6825 /* Didn't find last fragment, so return NULL */
6823 6826 return (NULL);
6824 6827 }
6825 6828
6826 6829 static void
6827 6830 ipsec_fragcache_clean(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6828 6831 {
6829 6832 ipsec_fragcache_entry_t *fep;
6830 6833 int i;
6831 6834 ipsec_fragcache_entry_t *earlyfep = NULL;
6832 6835 time_t itpf_time;
6833 6836 int earlyexp;
6834 6837 int earlyi = 0;
6835 6838
6836 6839 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6837 6840
6838 6841 itpf_time = gethrestime_sec();
6839 6842 earlyexp = itpf_time + 10000;
6840 6843
6841 6844 for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6842 6845 fep = (frag->itpf_ptr)[i];
6843 6846 while (fep) {
6844 6847 if (fep->itpfe_exp < itpf_time) {
6845 6848 /* found */
6846 6849 fep = fragcache_delentry(i, fep, frag, ipss);
6847 6850 } else {
6848 6851 if (fep->itpfe_exp < earlyexp) {
6849 6852 earlyfep = fep;
6850 6853 earlyexp = fep->itpfe_exp;
6851 6854 earlyi = i;
6852 6855 }
6853 6856 fep = fep->itpfe_next;
6854 6857 }
6855 6858 }
6856 6859 }
6857 6860
6858 6861 frag->itpf_expire_hint = earlyexp;
6859 6862
6860 6863 /* if (!found) */
6861 6864 if (frag->itpf_freelist == NULL)
6862 6865 (void) fragcache_delentry(earlyi, earlyfep, frag, ipss);
6863 6866 }
6864 6867
6865 6868 static ipsec_fragcache_entry_t *
6866 6869 fragcache_delentry(int slot, ipsec_fragcache_entry_t *fep,
6867 6870 ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6868 6871 {
6869 6872 ipsec_fragcache_entry_t *targp;
6870 6873 ipsec_fragcache_entry_t *nextp = fep->itpfe_next;
6871 6874
6872 6875 ASSERT(MUTEX_HELD(&frag->itpf_lock));
6873 6876
6874 6877 /* Free up any fragment list still in cache entry */
6875 6878 if (fep->itpfe_fraglist != NULL) {
6876 6879 ip_drop_packet_chain(fep->itpfe_fraglist,
6877 6880 ip_recv_attr_is_mblk(fep->itpfe_fraglist), NULL,
6878 6881 DROPPER(ipss, ipds_spd_expired_frags),
6879 6882 &ipss->ipsec_spd_dropper);
6880 6883 }
6881 6884 fep->itpfe_fraglist = NULL;
6882 6885
6883 6886 targp = (frag->itpf_ptr)[slot];
6884 6887 ASSERT(targp != 0);
6885 6888
6886 6889 if (targp == fep) {
6887 6890 /* unlink from head of hash chain */
6888 6891 (frag->itpf_ptr)[slot] = nextp;
6889 6892 /* link into free list */
6890 6893 fep->itpfe_next = frag->itpf_freelist;
6891 6894 frag->itpf_freelist = fep;
6892 6895 return (nextp);
6893 6896 }
6894 6897
6895 6898 /* maybe should use double linked list to make update faster */
6896 6899 /* must be past front of chain */
6897 6900 while (targp) {
6898 6901 if (targp->itpfe_next == fep) {
6899 6902 /* unlink from hash chain */
6900 6903 targp->itpfe_next = nextp;
6901 6904 /* link into free list */
6902 6905 fep->itpfe_next = frag->itpf_freelist;
6903 6906 frag->itpf_freelist = fep;
6904 6907 return (nextp);
6905 6908 }
6906 6909 targp = targp->itpfe_next;
6907 6910 ASSERT(targp != 0);
6908 6911 }
6909 6912 /* NOTREACHED */
6910 6913 return (NULL);
6911 6914 }
|
↓ open down ↓ |
1779 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX