Print this page
Bayard's initial drop, needs finishing, or at least testing.
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ipsec_impl.h
+++ new/usr/src/uts/common/inet/ipsec_impl.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
24 25 */
25 26
26 27 #ifndef _INET_IPSEC_IMPL_H
27 28 #define _INET_IPSEC_IMPL_H
28 29
29 30 #include <inet/ip.h>
30 31 #include <inet/ipdrop.h>
31 32
32 33 #ifdef __cplusplus
33 34 extern "C" {
34 35 #endif
35 36
36 37 #define IPSEC_CONF_SRC_ADDRESS 0 /* Source Address */
37 38 #define IPSEC_CONF_SRC_PORT 1 /* Source Port */
38 39 #define IPSEC_CONF_DST_ADDRESS 2 /* Dest Address */
39 40 #define IPSEC_CONF_DST_PORT 3 /* Dest Port */
40 41 #define IPSEC_CONF_SRC_MASK 4 /* Source Address Mask */
41 42 #define IPSEC_CONF_DST_MASK 5 /* Destination Address Mask */
42 43 #define IPSEC_CONF_ULP 6 /* Upper layer Port */
43 44 #define IPSEC_CONF_IPSEC_PROT 7 /* AH or ESP or AH_ESP */
44 45 #define IPSEC_CONF_IPSEC_AALGS 8 /* Auth Algorithms - MD5 etc. */
45 46 #define IPSEC_CONF_IPSEC_EALGS 9 /* Encr Algorithms - DES etc. */
46 47 #define IPSEC_CONF_IPSEC_EAALGS 10 /* Encr Algorithms - MD5 etc. */
47 48 #define IPSEC_CONF_IPSEC_SA 11 /* Shared or unique SA */
48 49 #define IPSEC_CONF_IPSEC_DIR 12 /* Direction of traffic */
49 50 #define IPSEC_CONF_ICMP_TYPE 13 /* ICMP type */
50 51 #define IPSEC_CONF_ICMP_CODE 14 /* ICMP code */
51 52 #define IPSEC_CONF_NEGOTIATE 15 /* Negotiation */
52 53 #define IPSEC_CONF_TUNNEL 16 /* Tunnel */
53 54
54 55 /* Type of an entry */
55 56
56 57 #define IPSEC_NTYPES 0x02
57 58 #define IPSEC_TYPE_OUTBOUND 0x00
58 59 #define IPSEC_TYPE_INBOUND 0x01
59 60
60 61 /* Policy */
61 62 #define IPSEC_POLICY_APPLY 0x01
62 63 #define IPSEC_POLICY_DISCARD 0x02
63 64 #define IPSEC_POLICY_BYPASS 0x03
64 65
65 66 /* Shared or unique SA */
66 67 #define IPSEC_SHARED_SA 0x01
67 68 #define IPSEC_UNIQUE_SA 0x02
68 69
69 70 /* IPsec protocols and combinations */
70 71 #define IPSEC_AH_ONLY 0x01
71 72 #define IPSEC_ESP_ONLY 0x02
72 73 #define IPSEC_AH_ESP 0x03
73 74
74 75 /*
75 76 * Internally defined "any" algorithm.
76 77 * Move to PF_KEY v3 when that RFC is released.
77 78 */
78 79 #define SADB_AALG_ANY 255
79 80
80 81 #ifdef _KERNEL
81 82
82 83 #include <inet/common.h>
83 84 #include <netinet/ip6.h>
84 85 #include <netinet/icmp6.h>
85 86 #include <net/pfkeyv2.h>
86 87 #include <inet/ip.h>
87 88 #include <inet/sadb.h>
88 89 #include <inet/ipsecah.h>
89 90 #include <inet/ipsecesp.h>
90 91 #include <sys/crypto/common.h>
91 92 #include <sys/crypto/api.h>
92 93 #include <sys/avl.h>
93 94
94 95 /*
95 96 * Maximum number of authentication algorithms (can be indexed by one byte
96 97 * per PF_KEY and the IKE IPsec DOI.
97 98 */
98 99 #define MAX_AALGS 256
99 100
100 101 /*
101 102 * IPsec task queue constants.
102 103 */
103 104 #define IPSEC_TASKQ_MIN 10
104 105 #define IPSEC_TASKQ_MAX 20
105 106
106 107 /*
107 108 * So we can access IPsec global variables that live in keysock.c.
108 109 */
109 110 extern boolean_t keysock_extended_reg(netstack_t *);
110 111 extern uint32_t keysock_next_seq(netstack_t *);
111 112
112 113 /*
113 114 * Locking for ipsec policy rules:
114 115 *
115 116 * policy heads: system policy is static; per-conn polheads are dynamic,
116 117 * and refcounted (and inherited); use atomic refcounts and "don't let
117 118 * go with both hands".
118 119 *
119 120 * policy: refcounted; references from polhead, ipsec_out
120 121 *
121 122 * actions: refcounted; referenced from: action hash table, policy, ipsec_out
122 123 * selectors: refcounted; referenced from: selector hash table, policy.
123 124 */
124 125
125 126 /*
126 127 * the following are inspired by, but not directly based on,
127 128 * some of the sys/queue.h type-safe pseudo-polymorphic macros
128 129 * found in BSD.
129 130 *
130 131 * XXX If we use these more generally, we'll have to make the names
131 132 * less generic (HASH_* will probably clobber other namespaces).
132 133 */
133 134
134 135 #define HASH_LOCK(table, hash) \
135 136 mutex_enter(&(table)[hash].hash_lock)
136 137 #define HASH_UNLOCK(table, hash) \
137 138 mutex_exit(&(table)[hash].hash_lock)
138 139
139 140 #define HASH_LOCKED(table, hash) \
140 141 MUTEX_HELD(&(table)[hash].hash_lock)
141 142
142 143 #define HASH_ITERATE(var, field, table, hash) \
143 144 var = table[hash].hash_head; var != NULL; var = var->field.hash_next
144 145
145 146 #define HASH_NEXT(var, field) \
146 147 (var)->field.hash_next
147 148
148 149 #define HASH_INSERT(var, field, table, hash) \
149 150 { \
150 151 ASSERT(HASH_LOCKED(table, hash)); \
151 152 (var)->field.hash_next = (table)[hash].hash_head; \
152 153 (var)->field.hash_pp = &(table)[hash].hash_head; \
153 154 (table)[hash].hash_head = var; \
154 155 if ((var)->field.hash_next != NULL) \
155 156 (var)->field.hash_next->field.hash_pp = \
156 157 &((var)->field.hash_next); \
157 158 }
158 159
159 160
160 161 #define HASH_UNCHAIN(var, field, table, hash) \
161 162 { \
162 163 ASSERT(MUTEX_HELD(&(table)[hash].hash_lock)); \
163 164 HASHLIST_UNCHAIN(var, field); \
164 165 }
165 166
166 167 #define HASHLIST_INSERT(var, field, head) \
167 168 { \
168 169 (var)->field.hash_next = head; \
169 170 (var)->field.hash_pp = &(head); \
170 171 head = var; \
171 172 if ((var)->field.hash_next != NULL) \
172 173 (var)->field.hash_next->field.hash_pp = \
173 174 &((var)->field.hash_next); \
174 175 }
175 176
176 177 #define HASHLIST_UNCHAIN(var, field) \
177 178 { \
178 179 *var->field.hash_pp = var->field.hash_next; \
179 180 if (var->field.hash_next) \
180 181 var->field.hash_next->field.hash_pp = \
181 182 var->field.hash_pp; \
182 183 HASH_NULL(var, field); \
183 184 }
184 185
185 186
186 187 #define HASH_NULL(var, field) \
187 188 { \
188 189 var->field.hash_next = NULL; \
189 190 var->field.hash_pp = NULL; \
190 191 }
191 192
192 193 #define HASH_LINK(fieldname, type) \
193 194 struct { \
194 195 type *hash_next; \
195 196 type **hash_pp; \
196 197 } fieldname
197 198
198 199
199 200 #define HASH_HEAD(tag) \
200 201 struct { \
201 202 struct tag *hash_head; \
202 203 kmutex_t hash_lock; \
203 204 }
204 205
205 206
206 207 typedef struct ipsec_policy_s ipsec_policy_t;
207 208
208 209 typedef HASH_HEAD(ipsec_policy_s) ipsec_policy_hash_t;
209 210
210 211 /*
211 212 * When adding new fields to ipsec_prot_t, make sure to update
212 213 * ipsec_in_to_out_action() as well as other code in spd.c
213 214 */
214 215
215 216 typedef struct ipsec_prot
216 217 {
217 218 unsigned int
218 219 ipp_use_ah : 1,
219 220 ipp_use_esp : 1,
220 221 ipp_use_se : 1,
221 222 ipp_use_unique : 1,
222 223 ipp_use_espa : 1,
223 224 ipp_pad : 27;
224 225 uint8_t ipp_auth_alg; /* DOI number */
225 226 uint8_t ipp_encr_alg; /* DOI number */
226 227 uint8_t ipp_esp_auth_alg; /* DOI number */
227 228 uint16_t ipp_ah_minbits; /* AH: min keylen */
228 229 uint16_t ipp_ah_maxbits; /* AH: max keylen */
229 230 uint16_t ipp_espe_minbits; /* ESP encr: min keylen */
230 231 uint16_t ipp_espe_maxbits; /* ESP encr: max keylen */
231 232 uint16_t ipp_espa_minbits; /* ESP auth: min keylen */
232 233 uint16_t ipp_espa_maxbits; /* ESP auth: max keylen */
233 234 uint32_t ipp_km_proto; /* key mgmt protocol */
234 235 uint32_t ipp_km_cookie; /* key mgmt cookie */
235 236 uint32_t ipp_replay_depth; /* replay window */
236 237 /* XXX add lifetimes */
237 238 } ipsec_prot_t;
238 239
239 240 #define IPSEC_MAX_KEYBITS (0xffff)
240 241
241 242 /*
242 243 * An individual policy action, possibly a member of a chain.
243 244 *
244 245 * Action chains may be shared between multiple policy rules.
245 246 *
246 247 * With one exception (IPSEC_POLICY_LOG), a chain consists of an
247 248 * ordered list of alternative ways to handle a packet.
248 249 *
249 250 * All actions are also "interned" into a hash table (to allow
250 251 * multiple rules with the same action chain to share one copy in
251 252 * memory).
252 253 */
253 254
254 255 typedef struct ipsec_act
255 256 {
256 257 uint8_t ipa_type;
257 258 uint8_t ipa_log;
258 259 union
259 260 {
260 261 ipsec_prot_t ipau_apply;
261 262 uint8_t ipau_reject_type;
262 263 uint32_t ipau_resolve_id; /* magic cookie */
263 264 uint8_t ipau_log_type;
264 265 } ipa_u;
265 266 #define ipa_apply ipa_u.ipau_apply
266 267 #define ipa_reject_type ipa_u.ipau_reject_type
267 268 #define ipa_log_type ipa_u.ipau_log_type
268 269 #define ipa_resolve_type ipa_u.ipau_resolve_type
269 270 } ipsec_act_t;
270 271
271 272 #define IPSEC_ACT_APPLY 0x01 /* match IPSEC_POLICY_APPLY */
272 273 #define IPSEC_ACT_DISCARD 0x02 /* match IPSEC_POLICY_DISCARD */
273 274 #define IPSEC_ACT_BYPASS 0x03 /* match IPSEC_POLICY_BYPASS */
274 275 #define IPSEC_ACT_REJECT 0x04
275 276 #define IPSEC_ACT_CLEAR 0x05
276 277
277 278 typedef struct ipsec_action_s
278 279 {
279 280 HASH_LINK(ipa_hash, struct ipsec_action_s);
280 281 struct ipsec_action_s *ipa_next; /* next alternative */
281 282 uint32_t ipa_refs; /* refcount */
282 283 ipsec_act_t ipa_act;
283 284 /*
284 285 * The following bits are equivalent to an OR of bits included in the
285 286 * ipau_apply fields of this and subsequent actions in an
286 287 * action chain; this is an optimization for the sake of
287 288 * ipsec_out_process() in ip.c and a few other places.
288 289 */
289 290 unsigned int
290 291 ipa_hval: 8,
291 292 ipa_allow_clear:1, /* rule allows cleartext? */
292 293 ipa_want_ah:1, /* an action wants ah */
293 294 ipa_want_esp:1, /* an action wants esp */
294 295 ipa_want_se:1, /* an action wants se */
295 296 ipa_want_unique:1, /* want unique sa's */
296 297 ipa_pad:19;
297 298 uint32_t ipa_ovhd; /* per-packet encap ovhd */
298 299 } ipsec_action_t;
299 300
300 301 #define IPACT_REFHOLD(ipa) { \
301 302 atomic_add_32(&(ipa)->ipa_refs, 1); \
302 303 ASSERT((ipa)->ipa_refs != 0); \
303 304 }
304 305 #define IPACT_REFRELE(ipa) { \
305 306 ASSERT((ipa)->ipa_refs != 0); \
306 307 membar_exit(); \
307 308 if (atomic_add_32_nv(&(ipa)->ipa_refs, -1) == 0) \
308 309 ipsec_action_free(ipa); \
309 310 (ipa) = 0; \
310 311 }
311 312
312 313 /*
313 314 * For now, use a trivially sized hash table for actions.
314 315 * In the future we can add the structure canonicalization necessary
315 316 * to get the hash function to behave correctly..
316 317 */
317 318 #define IPSEC_ACTION_HASH_SIZE 1
318 319
319 320 /*
320 321 * Merged address structure, for cheezy address-family independent
321 322 * matches in policy code.
322 323 */
323 324
324 325 typedef union ipsec_addr
325 326 {
326 327 in6_addr_t ipsad_v6;
327 328 in_addr_t ipsad_v4;
328 329 } ipsec_addr_t;
329 330
330 331 /*
331 332 * ipsec selector set, as used by the kernel policy structures.
332 333 * Note that that we specify "local" and "remote"
333 334 * rather than "source" and "destination", which allows the selectors
334 335 * for symmetric policy rules to be shared between inbound and
335 336 * outbound rules.
336 337 *
337 338 * "local" means "destination" on inbound, and "source" on outbound.
338 339 * "remote" means "source" on inbound, and "destination" on outbound.
339 340 * XXX if we add a fifth policy enforcement point for forwarded packets,
340 341 * what do we do?
341 342 *
342 343 * The ipsl_valid mask is not done as a bitfield; this is so we
343 344 * can use "ffs()" to find the "most interesting" valid tag.
344 345 *
345 346 * XXX should we have multiple types for space-conservation reasons?
346 347 * (v4 vs v6? prefix vs. range)?
347 348 */
348 349
349 350 typedef struct ipsec_selkey
350 351 {
351 352 uint32_t ipsl_valid; /* bitmask of valid entries */
352 353 #define IPSL_REMOTE_ADDR 0x00000001
353 354 #define IPSL_LOCAL_ADDR 0x00000002
354 355 #define IPSL_REMOTE_PORT 0x00000004
355 356 #define IPSL_LOCAL_PORT 0x00000008
356 357 #define IPSL_PROTOCOL 0x00000010
357 358 #define IPSL_ICMP_TYPE 0x00000020
358 359 #define IPSL_ICMP_CODE 0x00000040
359 360 #define IPSL_IPV6 0x00000080
360 361 #define IPSL_IPV4 0x00000100
361 362
362 363 #define IPSL_WILDCARD 0x0000007f
363 364
364 365 ipsec_addr_t ipsl_local;
365 366 ipsec_addr_t ipsl_remote;
366 367 uint16_t ipsl_lport;
367 368 uint16_t ipsl_rport;
368 369 /*
369 370 * ICMP type and code selectors. Both have an end value to
370 371 * specify ranges, or * and *_end are equal for a single
371 372 * value
372 373 */
373 374 uint8_t ipsl_icmp_type;
374 375 uint8_t ipsl_icmp_type_end;
375 376 uint8_t ipsl_icmp_code;
376 377 uint8_t ipsl_icmp_code_end;
377 378
378 379 uint8_t ipsl_proto; /* ip payload type */
379 380 uint8_t ipsl_local_pfxlen; /* #bits of prefix */
380 381 uint8_t ipsl_remote_pfxlen; /* #bits of prefix */
381 382 uint8_t ipsl_mbz;
382 383
383 384 /* Insert new elements above this line */
384 385 uint32_t ipsl_pol_hval;
385 386 uint32_t ipsl_sel_hval;
386 387 } ipsec_selkey_t;
387 388
388 389 typedef struct ipsec_sel
389 390 {
390 391 HASH_LINK(ipsl_hash, struct ipsec_sel);
391 392 uint32_t ipsl_refs; /* # refs to this sel */
392 393 ipsec_selkey_t ipsl_key; /* actual selector guts */
393 394 } ipsec_sel_t;
394 395
395 396 /*
396 397 * One policy rule. This will be linked into a single hash chain bucket in
397 398 * the parent rule structure. If the selector is simple enough to
398 399 * allow hashing, it gets filed under ipsec_policy_root_t->ipr_hash.
399 400 * Otherwise it goes onto a linked list in ipsec_policy_root_t->ipr_nonhash[af]
400 401 *
401 402 * In addition, we file the rule into an avl tree keyed by the rule index.
402 403 * (Duplicate rules are permitted; the comparison function breaks ties).
403 404 */
404 405 struct ipsec_policy_s
405 406 {
406 407 HASH_LINK(ipsp_hash, struct ipsec_policy_s);
407 408 avl_node_t ipsp_byid;
408 409 uint64_t ipsp_index; /* unique id */
409 410 uint32_t ipsp_prio; /* rule priority */
410 411 uint32_t ipsp_refs;
411 412 ipsec_sel_t *ipsp_sel; /* selector set (shared) */
412 413 ipsec_action_t *ipsp_act; /* action (may be shared) */
413 414 netstack_t *ipsp_netstack; /* No netstack_hold */
414 415 };
415 416
416 417 #define IPPOL_REFHOLD(ipp) { \
417 418 atomic_add_32(&(ipp)->ipsp_refs, 1); \
418 419 ASSERT((ipp)->ipsp_refs != 0); \
419 420 }
420 421 #define IPPOL_REFRELE(ipp) { \
421 422 ASSERT((ipp)->ipsp_refs != 0); \
422 423 membar_exit(); \
423 424 if (atomic_add_32_nv(&(ipp)->ipsp_refs, -1) == 0) \
424 425 ipsec_policy_free(ipp); \
425 426 (ipp) = 0; \
426 427 }
427 428
428 429 #define IPPOL_UNCHAIN(php, ip) \
429 430 HASHLIST_UNCHAIN((ip), ipsp_hash); \
430 431 avl_remove(&(php)->iph_rulebyid, (ip)); \
431 432 IPPOL_REFRELE(ip);
432 433
433 434 /*
434 435 * Policy ruleset. One per (protocol * direction) for system policy.
435 436 */
436 437
437 438 #define IPSEC_AF_V4 0
438 439 #define IPSEC_AF_V6 1
439 440 #define IPSEC_NAF 2
440 441
441 442 typedef struct ipsec_policy_root_s
442 443 {
443 444 ipsec_policy_t *ipr_nonhash[IPSEC_NAF];
444 445 int ipr_nchains;
445 446 ipsec_policy_hash_t *ipr_hash;
446 447 } ipsec_policy_root_t;
447 448
448 449 /*
449 450 * Policy head. One for system policy; there may also be one present
450 451 * on ill_t's with interface-specific policy, as well as one present
451 452 * for sockets with per-socket policy allocated.
452 453 */
453 454
454 455 typedef struct ipsec_policy_head_s
455 456 {
456 457 uint32_t iph_refs;
457 458 krwlock_t iph_lock;
458 459 uint64_t iph_gen; /* generation number */
459 460 ipsec_policy_root_t iph_root[IPSEC_NTYPES];
460 461 avl_tree_t iph_rulebyid;
461 462 } ipsec_policy_head_t;
462 463
463 464 #define IPPH_REFHOLD(iph) { \
464 465 atomic_add_32(&(iph)->iph_refs, 1); \
465 466 ASSERT((iph)->iph_refs != 0); \
466 467 }
467 468 #define IPPH_REFRELE(iph, ns) { \
468 469 ASSERT((iph)->iph_refs != 0); \
469 470 membar_exit(); \
470 471 if (atomic_add_32_nv(&(iph)->iph_refs, -1) == 0) \
471 472 ipsec_polhead_free(iph, ns); \
472 473 (iph) = 0; \
473 474 }
474 475
475 476 /*
476 477 * IPsec fragment related structures
477 478 */
478 479
479 480 typedef struct ipsec_fragcache_entry {
480 481 struct ipsec_fragcache_entry *itpfe_next; /* hash list chain */
481 482 mblk_t *itpfe_fraglist; /* list of fragments */
482 483 time_t itpfe_exp; /* time when entry is stale */
483 484 int itpfe_depth; /* # of fragments in list */
484 485 ipsec_addr_t itpfe_frag_src;
485 486 ipsec_addr_t itpfe_frag_dst;
486 487 #define itpfe_src itpfe_frag_src.ipsad_v4
487 488 #define itpfe_src6 itpfe_frag_src.ipsad_v6
488 489 #define itpfe_dst itpfe_frag_dst.ipsad_v4
489 490 #define itpfe_dst6 itpfe_frag_dst.ipsad_v6
490 491 uint32_t itpfe_id; /* IP datagram ID */
491 492 uint8_t itpfe_proto; /* IP Protocol */
492 493 uint8_t itpfe_last; /* Last packet */
493 494 } ipsec_fragcache_entry_t;
494 495
495 496 typedef struct ipsec_fragcache {
496 497 kmutex_t itpf_lock;
497 498 struct ipsec_fragcache_entry **itpf_ptr;
498 499 struct ipsec_fragcache_entry *itpf_freelist;
499 500 time_t itpf_expire_hint; /* time when oldest entry is stale */
500 501 } ipsec_fragcache_t;
501 502
502 503 /*
503 504 * Tunnel policies. We keep a minature of the transport-mode/global policy
504 505 * per each tunnel instance.
505 506 *
506 507 * People who need both an itp held down AND one of its polheads need to
507 508 * first lock the itp, THEN the polhead, otherwise deadlock WILL occur.
508 509 */
509 510 typedef struct ipsec_tun_pol_s {
510 511 avl_node_t itp_node;
511 512 kmutex_t itp_lock;
512 513 uint64_t itp_next_policy_index;
513 514 ipsec_policy_head_t *itp_policy;
514 515 ipsec_policy_head_t *itp_inactive;
515 516 uint32_t itp_flags;
516 517 uint32_t itp_refcnt;
517 518 char itp_name[LIFNAMSIZ];
518 519 ipsec_fragcache_t itp_fragcache;
519 520 } ipsec_tun_pol_t;
520 521 /* NOTE - Callers (tun code) synchronize their own instances for these flags. */
521 522 #define ITPF_P_ACTIVE 0x1 /* Are we using IPsec right now? */
522 523 #define ITPF_P_TUNNEL 0x2 /* Negotiate tunnel-mode */
523 524 /* Optimization -> Do we have per-port security entries in this polhead? */
524 525 #define ITPF_P_PER_PORT_SECURITY 0x4
525 526 #define ITPF_PFLAGS 0x7
526 527 #define ITPF_SHIFT 3
527 528
528 529 #define ITPF_I_ACTIVE 0x8 /* Is the inactive using IPsec right now? */
529 530 #define ITPF_I_TUNNEL 0x10 /* Negotiate tunnel-mode (on inactive) */
530 531 /* Optimization -> Do we have per-port security entries in this polhead? */
531 532 #define ITPF_I_PER_PORT_SECURITY 0x20
532 533 #define ITPF_IFLAGS 0x38
533 534
534 535 /* NOTE: f cannot be an expression. */
535 536 #define ITPF_CLONE(f) (f) = (((f) & ITPF_PFLAGS) | \
536 537 (((f) & ITPF_PFLAGS) << ITPF_SHIFT));
537 538 #define ITPF_SWAP(f) (f) = ((((f) & ITPF_PFLAGS) << ITPF_SHIFT) | \
538 539 (((f) & ITPF_IFLAGS) >> ITPF_SHIFT))
539 540
540 541 #define ITP_P_ISACTIVE(itp, iph) ((itp)->itp_flags & \
541 542 (((itp)->itp_policy == (iph)) ? ITPF_P_ACTIVE : ITPF_I_ACTIVE))
542 543
543 544 #define ITP_P_ISTUNNEL(itp, iph) ((itp)->itp_flags & \
544 545 (((itp)->itp_policy == (iph)) ? ITPF_P_TUNNEL : ITPF_I_TUNNEL))
545 546
546 547 #define ITP_P_ISPERPORT(itp, iph) ((itp)->itp_flags & \
547 548 (((itp)->itp_policy == (iph)) ? ITPF_P_PER_PORT_SECURITY : \
548 549 ITPF_I_PER_PORT_SECURITY))
549 550
550 551 #define ITP_REFHOLD(itp) { \
551 552 atomic_add_32(&((itp)->itp_refcnt), 1); \
552 553 ASSERT((itp)->itp_refcnt != 0); \
553 554 }
554 555
555 556 #define ITP_REFRELE(itp, ns) { \
556 557 ASSERT((itp)->itp_refcnt != 0); \
557 558 membar_exit(); \
558 559 if (atomic_add_32_nv(&((itp)->itp_refcnt), -1) == 0) \
559 560 itp_free(itp, ns); \
560 561 }
561 562
562 563 /*
563 564 * Certificate identity.
564 565 */
565 566
566 567 typedef struct ipsid_s
567 568 {
568 569 struct ipsid_s *ipsid_next;
569 570 struct ipsid_s **ipsid_ptpn;
570 571 uint32_t ipsid_refcnt;
571 572 int ipsid_type; /* id type */
572 573 char *ipsid_cid; /* certificate id string */
573 574 } ipsid_t;
574 575
575 576 /*
576 577 * ipsid_t reference hold/release macros, just like ipsa versions.
577 578 */
578 579
579 580 #define IPSID_REFHOLD(ipsid) { \
580 581 atomic_add_32(&(ipsid)->ipsid_refcnt, 1); \
581 582 ASSERT((ipsid)->ipsid_refcnt != 0); \
582 583 }
583 584
584 585 /*
585 586 * Decrement the reference count on the ID. Someone else will clean up
586 587 * after us later.
587 588 */
588 589
589 590 #define IPSID_REFRELE(ipsid) { \
590 591 membar_exit(); \
591 592 atomic_add_32(&(ipsid)->ipsid_refcnt, -1); \
592 593 }
593 594
594 595 /*
595 596 * Following are the estimates of what the maximum AH and ESP header size
596 597 * would be. This is used to tell the upper layer the right value of MSS
597 598 * it should use without consulting AH/ESP. If the size is something
598 599 * different from this, ULP will learn the right one through
599 600 * ICMP_FRAGMENTATION_NEEDED messages generated locally.
600 601 *
601 602 * AH : 12 bytes of constant header + 32 bytes of ICV checksum (SHA-512).
602 603 */
603 604 #define IPSEC_MAX_AH_HDR_SIZE (44)
604 605
605 606 /*
606 607 * ESP : Is a bit more complex...
607 608 *
608 609 * A system of one inequality and one equation MUST be solved for proper ESP
609 610 * overhead. The inequality is:
610 611 *
611 612 * MTU - sizeof (IP header + options) >=
612 613 * sizeof (esph_t) + sizeof (IV or ctr) + data-size + 2 + ICV
613 614 *
614 615 * IV or counter is almost always the cipher's block size. The equation is:
615 616 *
616 617 * data-size % block-size = (block-size - 2)
617 618 *
618 619 * so we can put as much data into the datagram as possible. If we are
619 620 * pessimistic and include our largest overhead cipher (AES) and hash
620 621 * (SHA-512), and assume 1500-byte MTU minus IPv4 overhead of 20 bytes, we get:
621 622 *
622 623 * 1480 >= 8 + 16 + data-size + 2 + 32
623 624 * 1480 >= 58 + data-size
624 625 * 1422 >= data-size, 1422 % 16 = 14, so 58 is the overhead!
625 626 *
626 627 * But, let's re-run the numbers with the same algorithms, but with an IPv6
627 628 * header:
628 629 *
629 630 * 1460 >= 58 + data-size
630 631 * 1402 >= data-size, 1402 % 16 = 10, meaning shrink to 1390 to get 14,
631 632 *
632 633 * which means the overhead is now 70.
633 634 *
634 635 * Hmmm... IPv4 headers can never be anything other than multiples of 4-bytes,
635 636 * and IPv6 ones can never be anything other than multiples of 8-bytes. We've
636 637 * seen overheads of 58 and 70. 58 % 16 == 10, and 70 % 16 == 6. IPv4 could
637 638 * force us to have 62 ( % 16 == 14) or 66 ( % 16 == 2), or IPv6 could force us
638 639 * to have 78 ( % 16 = 14). Let's compute IPv6 + 8-bytes of options:
639 640 *
640 641 * 1452 >= 58 + data-size
641 642 * 1394 >= data-size, 1394 % 16 = 2, meaning shrink to 1390 to get 14,
642 643 *
643 644 * Aha! The "ESP overhead" shrinks to 62 (70 - 8). This is good. Let's try
644 645 * IPv4 + 8 bytes of IPv4 options:
645 646 *
646 647 * 1472 >= 58 + data-size
647 648 * 1414 >= data-size, 1414 % 16 = 6, meaning shrink to 1406,
648 649 *
649 650 * meaning 66 is the overhead. Let's try 12 bytes:
650 651 *
651 652 * 1468 >= 58 + data-size
652 653 * 1410 >= data-size, 1410 % 16 = 2, meaning also shrink to 1406,
653 654 *
654 655 * meaning 62 is the overhead. How about 16 bytes?
655 656 *
656 657 * 1464 >= 58 + data-size
657 658 * 1406 >= data-size, 1402 % 16 = 14, which is great!
658 659 *
659 660 * this means 58 is the overhead. If I wrap and add 20 bytes, it looks just
660 661 * like IPv6's 70 bytes. If I add 24, we go back to 66 bytes.
661 662 *
662 663 * So picking 70 is a sensible, conservative default. Optimal calculations
663 664 * will depend on knowing pre-ESP header length (called "divpoint" in the ESP
664 665 * code), which could be cached in the conn_t for connected endpoints, or
665 666 * which must be computed on every datagram otherwise.
666 667 */
667 668 #define IPSEC_MAX_ESP_HDR_SIZE (70)
668 669
669 670 /*
670 671 * Alternate, when we know the crypto block size via the SA. Assume an ICV on
671 672 * the SA. Use:
672 673 *
673 674 * sizeof (esph_t) + 2 * (sizeof (IV/counter)) - 2 + sizeof (ICV). The "-2"
674 675 * discounts the overhead of the pad + padlen that gets swallowed up by the
675 676 * second (theoretically all-pad) cipher-block. If you use our examples of
676 677 * AES and SHA512, you get:
677 678 *
678 679 * 8 + 32 - 2 + 32 == 70.
679 680 *
680 681 * Which is our pre-computed maximum above.
681 682 */
682 683 #include <inet/ipsecesp.h>
683 684 #define IPSEC_BASE_ESP_HDR_SIZE(sa) \
684 685 (sizeof (esph_t) + ((sa)->ipsa_iv_len << 1) - 2 + (sa)->ipsa_mac_len)
685 686
686 687 /*
687 688 * Identity hash table.
688 689 *
689 690 * Identities are refcounted and "interned" into the hash table.
690 691 * Only references coming from other objects (SA's, latching state)
691 692 * are counted in ipsid_refcnt.
692 693 *
693 694 * Locking: IPSID_REFHOLD is safe only when (a) the object's hash bucket
694 695 * is locked, (b) we know that the refcount must be > 0.
695 696 *
696 697 * The ipsid_next and ipsid_ptpn fields are only to be referenced or
697 698 * modified when the bucket lock is held; in particular, we only
698 699 * delete objects while holding the bucket lock, and we only increase
699 700 * the refcount from 0 to 1 while the bucket lock is held.
700 701 */
701 702
702 703 #define IPSID_HASHSIZE 64
703 704
704 705 typedef struct ipsif_s
705 706 {
706 707 ipsid_t *ipsif_head;
707 708 kmutex_t ipsif_lock;
708 709 } ipsif_t;
709 710
710 711 /*
711 712 * For call to the kernel crypto framework. State needed during
712 713 * the execution of a crypto request.
713 714 */
714 715 typedef struct ipsec_crypto_s {
715 716 size_t ic_skip_len; /* len to skip for AH auth */
716 717 crypto_data_t ic_crypto_data; /* single op crypto data */
717 718 crypto_dual_data_t ic_crypto_dual_data; /* for dual ops */
718 719 crypto_data_t ic_crypto_mac; /* to store the MAC */
719 720 ipsa_cm_mech_t ic_cmm;
720 721 } ipsec_crypto_t;
721 722
722 723 /*
723 724 * IPsec stack instances
724 725 */
725 726 struct ipsec_stack {
726 727 netstack_t *ipsec_netstack; /* Common netstack */
727 728
728 729 /* Packet dropper for IP IPsec processing failures */
729 730 ipdropper_t ipsec_dropper;
730 731
731 732 /* From spd.c */
732 733 /*
733 734 * Policy rule index generator. We assume this won't wrap in the
734 735 * lifetime of a system. If we make 2^20 policy changes per second,
735 736 * this will last 2^44 seconds, or roughly 500,000 years, so we don't
736 737 * have to worry about reusing policy index values.
737 738 */
738 739 uint64_t ipsec_next_policy_index;
739 740
740 741 HASH_HEAD(ipsec_action_s) ipsec_action_hash[IPSEC_ACTION_HASH_SIZE];
741 742 HASH_HEAD(ipsec_sel) *ipsec_sel_hash;
742 743 uint32_t ipsec_spd_hashsize;
743 744
744 745 ipsif_t ipsec_ipsid_buckets[IPSID_HASHSIZE];
745 746
746 747 /*
747 748 * Active & Inactive system policy roots
748 749 */
749 750 ipsec_policy_head_t ipsec_system_policy;
750 751 ipsec_policy_head_t ipsec_inactive_policy;
751 752
752 753 /* Packet dropper for generic SPD drops. */
753 754 ipdropper_t ipsec_spd_dropper;
754 755
755 756 /* ipdrop.c */
756 757 kstat_t *ipsec_ip_drop_kstat;
757 758 struct ip_dropstats *ipsec_ip_drop_types;
758 759
759 760 /* spd.c */
760 761 /*
761 762 * Have a counter for every possible policy message in
762 763 * ipsec_policy_failure_msgs
763 764 */
764 765 uint32_t ipsec_policy_failure_count[IPSEC_POLICY_MAX];
765 766 /* Time since last ipsec policy failure that printed a message. */
766 767 hrtime_t ipsec_policy_failure_last;
767 768
768 769 /* ip_spd.c */
769 770 /* stats */
770 771 kstat_t *ipsec_ksp;
771 772 struct ipsec_kstats_s *ipsec_kstats;
772 773
773 774 /* sadb.c */
774 775 /* Packet dropper for generic SADB drops. */
775 776 ipdropper_t ipsec_sadb_dropper;
776 777
777 778 /* spd.c */
|
↓ open down ↓ |
744 lines elided |
↑ open up ↑ |
778 779 boolean_t ipsec_inbound_v4_policy_present;
779 780 boolean_t ipsec_outbound_v4_policy_present;
780 781 boolean_t ipsec_inbound_v6_policy_present;
781 782 boolean_t ipsec_outbound_v6_policy_present;
782 783
783 784 /* spd.c */
784 785 /*
785 786 * Because policy needs to know what algorithms are supported, keep the
786 787 * lists of algorithms here.
787 788 */
788 - kmutex_t ipsec_alg_lock;
789 + krwlock_t ipsec_alg_lock;
789 790
790 791 uint8_t ipsec_nalgs[IPSEC_NALGTYPES];
791 792 ipsec_alginfo_t *ipsec_alglists[IPSEC_NALGTYPES][IPSEC_MAX_ALGS];
792 793
793 794 uint8_t ipsec_sortlist[IPSEC_NALGTYPES][IPSEC_MAX_ALGS];
794 795
795 796 int ipsec_algs_exec_mode[IPSEC_NALGTYPES];
796 797
797 798 uint32_t ipsec_tun_spd_hashsize;
798 799 /*
799 800 * Tunnel policies - AVL tree indexed by tunnel name.
800 801 */
801 802 krwlock_t ipsec_tunnel_policy_lock;
802 803 uint64_t ipsec_tunnel_policy_gen;
803 804 avl_tree_t ipsec_tunnel_policies;
804 805
805 806 /* ipsec_loader.c */
806 807 kmutex_t ipsec_loader_lock;
807 808 int ipsec_loader_state;
808 809 int ipsec_loader_sig;
809 810 kt_did_t ipsec_loader_tid;
810 811 kcondvar_t ipsec_loader_sig_cv; /* For loader_sig conditions. */
811 812
812 813 };
813 814 typedef struct ipsec_stack ipsec_stack_t;
814 815
815 816 /* Handle the kstat_create in ip_drop_init() failing */
816 817 #define DROPPER(_ipss, _dropper) \
817 818 (((_ipss)->ipsec_ip_drop_types == NULL) ? NULL : \
818 819 &((_ipss)->ipsec_ip_drop_types->_dropper))
819 820
820 821 /*
821 822 * Loader states..
822 823 */
823 824 #define IPSEC_LOADER_WAIT 0
824 825 #define IPSEC_LOADER_FAILED -1
825 826 #define IPSEC_LOADER_SUCCEEDED 1
826 827
827 828 /*
828 829 * ipsec_loader entrypoints.
829 830 */
830 831 extern void ipsec_loader_init(ipsec_stack_t *);
831 832 extern void ipsec_loader_start(ipsec_stack_t *);
832 833 extern void ipsec_loader_destroy(ipsec_stack_t *);
833 834 extern void ipsec_loader_loadnow(ipsec_stack_t *);
834 835 extern boolean_t ipsec_loader_wait(queue_t *q, ipsec_stack_t *);
835 836 extern boolean_t ipsec_loaded(ipsec_stack_t *);
836 837 extern boolean_t ipsec_failed(ipsec_stack_t *);
837 838
838 839 /*
839 840 * ipsec policy entrypoints (spd.c)
840 841 */
841 842
842 843 extern void ipsec_policy_g_destroy(void);
843 844 extern void ipsec_policy_g_init(void);
844 845
845 846 extern mblk_t *ipsec_add_crypto_data(mblk_t *, ipsec_crypto_t **);
846 847 extern mblk_t *ipsec_remove_crypto_data(mblk_t *, ipsec_crypto_t **);
847 848 extern mblk_t *ipsec_free_crypto_data(mblk_t *);
848 849 extern int ipsec_alloc_table(ipsec_policy_head_t *, int, int, boolean_t,
849 850 netstack_t *);
850 851 extern void ipsec_polhead_init(ipsec_policy_head_t *, int);
851 852 extern void ipsec_polhead_destroy(ipsec_policy_head_t *);
852 853 extern void ipsec_polhead_free_table(ipsec_policy_head_t *);
853 854 extern mblk_t *ipsec_check_global_policy(mblk_t *, conn_t *, ipha_t *,
854 855 ip6_t *, ip_recv_attr_t *, netstack_t *ns);
855 856 extern mblk_t *ipsec_check_inbound_policy(mblk_t *, conn_t *, ipha_t *, ip6_t *,
856 857 ip_recv_attr_t *);
857 858
858 859 extern boolean_t ipsec_in_to_out(ip_recv_attr_t *, ip_xmit_attr_t *,
859 860 mblk_t *, ipha_t *, ip6_t *);
860 861 extern void ipsec_in_release_refs(ip_recv_attr_t *);
861 862 extern void ipsec_out_release_refs(ip_xmit_attr_t *);
862 863 extern void ipsec_log_policy_failure(int, char *, ipha_t *, ip6_t *, boolean_t,
863 864 netstack_t *);
864 865 extern boolean_t ipsec_inbound_accept_clear(mblk_t *, ipha_t *, ip6_t *);
865 866 extern int ipsec_conn_cache_policy(conn_t *, boolean_t);
866 867 extern void ipsec_cache_outbound_policy(const conn_t *, const in6_addr_t *,
867 868 const in6_addr_t *, in_port_t, ip_xmit_attr_t *);
868 869 extern boolean_t ipsec_outbound_policy_current(ip_xmit_attr_t *);
869 870 extern ipsec_action_t *ipsec_in_to_out_action(ip_recv_attr_t *);
870 871 extern void ipsec_latch_inbound(conn_t *connp, ip_recv_attr_t *ira);
871 872
872 873 extern void ipsec_policy_free(ipsec_policy_t *);
873 874 extern void ipsec_action_free(ipsec_action_t *);
874 875 extern void ipsec_polhead_free(ipsec_policy_head_t *, netstack_t *);
875 876 extern ipsec_policy_head_t *ipsec_polhead_split(ipsec_policy_head_t *,
876 877 netstack_t *);
877 878 extern ipsec_policy_head_t *ipsec_polhead_create(void);
878 879 extern ipsec_policy_head_t *ipsec_system_policy(netstack_t *);
879 880 extern ipsec_policy_head_t *ipsec_inactive_policy(netstack_t *);
880 881 extern void ipsec_swap_policy(ipsec_policy_head_t *, ipsec_policy_head_t *,
881 882 netstack_t *);
882 883 extern void ipsec_swap_global_policy(netstack_t *);
883 884
884 885 extern int ipsec_clone_system_policy(netstack_t *);
885 886 extern ipsec_policy_t *ipsec_policy_create(ipsec_selkey_t *,
886 887 const ipsec_act_t *, int, int, uint64_t *, netstack_t *);
887 888 extern boolean_t ipsec_policy_delete(ipsec_policy_head_t *,
888 889 ipsec_selkey_t *, int, netstack_t *);
889 890 extern int ipsec_policy_delete_index(ipsec_policy_head_t *, uint64_t,
890 891 netstack_t *);
891 892 extern boolean_t ipsec_polhead_insert(ipsec_policy_head_t *, ipsec_act_t *,
892 893 uint_t, int, int, netstack_t *);
893 894 extern void ipsec_polhead_flush(ipsec_policy_head_t *, netstack_t *);
894 895 extern int ipsec_copy_polhead(ipsec_policy_head_t *, ipsec_policy_head_t *,
895 896 netstack_t *);
896 897 extern void ipsec_actvec_from_req(const ipsec_req_t *, ipsec_act_t **, uint_t *,
897 898 netstack_t *);
898 899 extern void ipsec_actvec_free(ipsec_act_t *, uint_t);
899 900 extern int ipsec_req_from_head(ipsec_policy_head_t *, ipsec_req_t *, int);
900 901 extern mblk_t *ipsec_construct_inverse_acquire(sadb_msg_t *, sadb_ext_t **,
901 902 netstack_t *);
902 903 extern ipsec_policy_t *ipsec_find_policy(int, const conn_t *,
903 904 ipsec_selector_t *, netstack_t *);
904 905 extern ipsid_t *ipsid_lookup(int, char *, netstack_t *);
905 906 extern boolean_t ipsid_equal(ipsid_t *, ipsid_t *);
906 907 extern void ipsid_gc(netstack_t *);
907 908 extern void ipsec_latch_ids(ipsec_latch_t *, ipsid_t *, ipsid_t *);
908 909
909 910 extern void ipsec_config_flush(netstack_t *);
910 911 extern boolean_t ipsec_check_policy(ipsec_policy_head_t *, ipsec_policy_t *,
911 912 int);
912 913 extern void ipsec_enter_policy(ipsec_policy_head_t *, ipsec_policy_t *, int,
913 914 netstack_t *);
914 915 extern boolean_t ipsec_check_action(ipsec_act_t *, int *, netstack_t *);
915 916
916 917 extern void iplatch_free(ipsec_latch_t *);
917 918 extern ipsec_latch_t *iplatch_create(void);
918 919 extern int ipsec_set_req(cred_t *, conn_t *, ipsec_req_t *);
919 920
920 921 extern void ipsec_insert_always(avl_tree_t *tree, void *new_node);
921 922
922 923 extern int32_t ipsec_act_ovhd(const ipsec_act_t *act);
923 924 extern mblk_t *sadb_whack_label(mblk_t *, ipsa_t *, ip_xmit_attr_t *,
924 925 kstat_named_t *, ipdropper_t *);
925 926 extern mblk_t *sadb_whack_label_v4(mblk_t *, ipsa_t *, kstat_named_t *,
926 927 ipdropper_t *);
927 928 extern mblk_t *sadb_whack_label_v6(mblk_t *, ipsa_t *, kstat_named_t *,
928 929 ipdropper_t *);
929 930 extern boolean_t update_iv(uint8_t *, queue_t *, ipsa_t *, ipsecesp_stack_t *);
930 931
931 932 /*
932 933 * Tunnel-support SPD functions and variables.
933 934 */
934 935 struct iptun_s; /* Defined in inet/iptun/iptun_impl.h. */
935 936 extern mblk_t *ipsec_tun_inbound(ip_recv_attr_t *, mblk_t *, ipsec_tun_pol_t *,
936 937 ipha_t *, ip6_t *, ipha_t *, ip6_t *, int, netstack_t *);
937 938 extern mblk_t *ipsec_tun_outbound(mblk_t *, struct iptun_s *, ipha_t *,
938 939 ip6_t *, ipha_t *, ip6_t *, int, ip_xmit_attr_t *);
939 940 extern void itp_free(ipsec_tun_pol_t *, netstack_t *);
940 941 extern ipsec_tun_pol_t *create_tunnel_policy(char *, int *, uint64_t *,
941 942 netstack_t *);
942 943 extern ipsec_tun_pol_t *get_tunnel_policy(char *, netstack_t *);
943 944 extern void itp_unlink(ipsec_tun_pol_t *, netstack_t *);
944 945 extern void itp_walk(void (*)(ipsec_tun_pol_t *, void *, netstack_t *),
945 946 void *, netstack_t *);
946 947
947 948 extern ipsec_tun_pol_t *itp_get_byaddr(uint32_t *, uint32_t *, int,
948 949 ip_stack_t *);
949 950
950 951 /*
951 952 * IPsec AH/ESP functions called from IP or the common SADB code in AH.
952 953 */
953 954
954 955 extern void ipsecah_in_assocfailure(mblk_t *, char, ushort_t, char *,
955 956 uint32_t, void *, int, ip_recv_attr_t *ira);
956 957 extern void ipsecesp_in_assocfailure(mblk_t *, char, ushort_t, char *,
957 958 uint32_t, void *, int, ip_recv_attr_t *ira);
958 959 extern void ipsecesp_send_keepalive(ipsa_t *);
959 960
960 961 /*
961 962 * Algorithm management helper functions.
962 963 */
963 964 extern boolean_t ipsec_valid_key_size(uint16_t, ipsec_alginfo_t *);
964 965
965 966 /*
966 967 * Per-socket policy, for now, takes precedence... this priority value
967 968 * insures it.
968 969 */
969 970 #define IPSEC_PRIO_SOCKET 0x1000000
970 971
971 972 /* DDI initialization functions. */
972 973 extern boolean_t ipsecesp_ddi_init(void);
973 974 extern boolean_t ipsecah_ddi_init(void);
974 975 extern boolean_t keysock_ddi_init(void);
975 976 extern boolean_t spdsock_ddi_init(void);
976 977
977 978 extern void ipsecesp_ddi_destroy(void);
978 979 extern void ipsecah_ddi_destroy(void);
979 980 extern void keysock_ddi_destroy(void);
980 981 extern void spdsock_ddi_destroy(void);
981 982
982 983 /*
983 984 * AH- and ESP-specific functions that are called directly by other modules.
984 985 */
985 986 extern void ipsecah_fill_defs(struct sadb_x_ecomb *, netstack_t *);
986 987 extern void ipsecesp_fill_defs(struct sadb_x_ecomb *, netstack_t *);
987 988 extern void ipsecah_algs_changed(netstack_t *);
988 989 extern void ipsecesp_algs_changed(netstack_t *);
989 990 extern void ipsecesp_init_funcs(ipsa_t *);
990 991 extern void ipsecah_init_funcs(ipsa_t *);
991 992 extern mblk_t *ipsecah_icmp_error(mblk_t *, ip_recv_attr_t *);
992 993 extern mblk_t *ipsecesp_icmp_error(mblk_t *, ip_recv_attr_t *);
993 994
994 995 /*
995 996 * spdsock functions that are called directly by IP.
996 997 */
997 998 extern void spdsock_update_pending_algs(netstack_t *);
998 999
999 1000 /*
1000 1001 * IP functions that are called from AH and ESP.
1001 1002 */
1002 1003 extern boolean_t ipsec_outbound_sa(mblk_t *, ip_xmit_attr_t *, uint_t);
1003 1004 extern mblk_t *ipsec_inbound_esp_sa(mblk_t *, ip_recv_attr_t *, esph_t **);
1004 1005 extern mblk_t *ipsec_inbound_ah_sa(mblk_t *, ip_recv_attr_t *, ah_t **);
1005 1006 extern ipsec_policy_t *ipsec_find_policy_head(ipsec_policy_t *,
1006 1007 ipsec_policy_head_t *, int, ipsec_selector_t *);
1007 1008
1008 1009 /*
1009 1010 * IP dropper init/destroy.
1010 1011 */
1011 1012 void ip_drop_init(ipsec_stack_t *);
1012 1013 void ip_drop_destroy(ipsec_stack_t *);
1013 1014
1014 1015 /*
1015 1016 * Common functions
1016 1017 */
1017 1018 extern boolean_t ip_addr_match(uint8_t *, int, in6_addr_t *);
1018 1019 extern boolean_t ipsec_label_match(ts_label_t *, ts_label_t *);
1019 1020
1020 1021 /*
1021 1022 * AH and ESP counters types.
1022 1023 */
1023 1024 typedef uint32_t ah_counter;
1024 1025 typedef uint32_t esp_counter;
1025 1026
1026 1027 #endif /* _KERNEL */
1027 1028
1028 1029 #ifdef __cplusplus
1029 1030 }
1030 1031 #endif
1031 1032
1032 1033 #endif /* _INET_IPSEC_IMPL_H */
|
↓ open down ↓ |
234 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX