Print this page
Bayard's initial drop, needs finishing, or at least testing.
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ip/spdsock.c
+++ new/usr/src/uts/common/inet/ip/spdsock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
23 24 */
24 25
25 26 #include <sys/param.h>
26 27 #include <sys/types.h>
27 28 #include <sys/stream.h>
28 29 #include <sys/strsubr.h>
29 30 #include <sys/strsun.h>
30 31 #include <sys/stropts.h>
31 32 #include <sys/zone.h>
32 33 #include <sys/vnode.h>
33 34 #include <sys/sysmacros.h>
34 35 #define _SUN_TPI_VERSION 2
35 36 #include <sys/tihdr.h>
36 37 #include <sys/ddi.h>
37 38 #include <sys/sunddi.h>
38 39 #include <sys/mkdev.h>
39 40 #include <sys/debug.h>
40 41 #include <sys/kmem.h>
41 42 #include <sys/cmn_err.h>
42 43 #include <sys/suntpi.h>
43 44 #include <sys/policy.h>
44 45 #include <sys/dls.h>
45 46
46 47 #include <sys/socket.h>
47 48 #include <netinet/in.h>
48 49 #include <net/pfkeyv2.h>
49 50 #include <net/pfpolicy.h>
50 51
51 52 #include <inet/common.h>
52 53 #include <netinet/ip6.h>
53 54 #include <inet/ip.h>
54 55 #include <inet/ip6.h>
55 56 #include <inet/mi.h>
56 57 #include <inet/proto_set.h>
57 58 #include <inet/nd.h>
58 59 #include <inet/ip_if.h>
59 60 #include <inet/optcom.h>
60 61 #include <inet/ipsec_impl.h>
61 62 #include <inet/spdsock.h>
62 63 #include <inet/sadb.h>
63 64 #include <inet/iptun.h>
64 65 #include <inet/iptun/iptun_impl.h>
65 66
66 67 #include <sys/isa_defs.h>
67 68
68 69 #include <c2/audit.h>
69 70
70 71 /*
71 72 * This is a transport provider for the PF_POLICY IPsec policy
72 73 * management socket, which provides a management interface into the
73 74 * SPD, allowing policy rules to be added, deleted, and queried.
74 75 *
75 76 * This effectively replaces the old private SIOC*IPSECONFIG ioctls
76 77 * with an extensible interface which will hopefully be public some
77 78 * day.
78 79 *
79 80 * See <net/pfpolicy.h> for more details on the protocol.
80 81 *
81 82 * We link against drv/ip and call directly into it to manipulate the
82 83 * SPD; see ipsec_impl.h for the policy data structures and spd.c for
83 84 * the code which maintains them.
84 85 *
85 86 * The MT model of this is QPAIR with the addition of some explicit
86 87 * locking to protect system-wide policy data structures.
87 88 */
88 89
89 90 static vmem_t *spdsock_vmem; /* for minor numbers. */
90 91
91 92 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
92 93
93 94 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
94 95 static struct T_info_ack spdsock_g_t_info_ack = {
95 96 T_INFO_ACK,
96 97 T_INFINITE, /* TSDU_size. Maximum size messages. */
97 98 T_INVALID, /* ETSDU_size. No expedited data. */
98 99 T_INVALID, /* CDATA_size. No connect data. */
99 100 T_INVALID, /* DDATA_size. No disconnect data. */
100 101 0, /* ADDR_size. */
101 102 0, /* OPT_size. No user-settable options */
102 103 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */
103 104 T_COTS, /* SERV_type. spdsock supports connection oriented. */
104 105 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */
105 106 (XPG4_1) /* Provider flags */
106 107 };
107 108
108 109 /* Named Dispatch Parameter Management Structure */
109 110 typedef struct spdsockparam_s {
110 111 uint_t spdsock_param_min;
111 112 uint_t spdsock_param_max;
112 113 uint_t spdsock_param_value;
113 114 char *spdsock_param_name;
114 115 } spdsockparam_t;
115 116
116 117 /*
117 118 * Table of NDD variables supported by spdsock. These are loaded into
118 119 * spdsock_g_nd in spdsock_init_nd.
119 120 * All of these are alterable, within the min/max values given, at run time.
120 121 */
121 122 static spdsockparam_t lcl_param_arr[] = {
122 123 /* min max value name */
123 124 { 4096, 65536, 8192, "spdsock_xmit_hiwat"},
124 125 { 0, 65536, 1024, "spdsock_xmit_lowat"},
125 126 { 4096, 65536, 8192, "spdsock_recv_hiwat"},
126 127 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"},
127 128 { 0, 3, 0, "spdsock_debug"},
128 129 };
129 130 #define spds_xmit_hiwat spds_params[0].spdsock_param_value
130 131 #define spds_xmit_lowat spds_params[1].spdsock_param_value
131 132 #define spds_recv_hiwat spds_params[2].spdsock_param_value
132 133 #define spds_max_buf spds_params[3].spdsock_param_value
133 134 #define spds_debug spds_params[4].spdsock_param_value
134 135
135 136 #define ss0dbg(a) printf a
136 137 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */
137 138 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a
138 139 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a
139 140 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a
140 141
141 142 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
142 143 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
143 144 (ss)->spdsock_dump_head = (iph); \
144 145 (ss)->spdsock_dump_gen = (iph)->iph_gen; \
145 146 (ss)->spdsock_dump_cur_type = 0; \
146 147 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
147 148 (ss)->spdsock_dump_cur_rule = NULL; \
148 149 (ss)->spdsock_dump_count = 0; \
149 150 (ss)->spdsock_dump_cur_chain = 0; \
150 151 }
151 152
152 153 static int spdsock_close(queue_t *);
153 154 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
154 155 static void spdsock_wput(queue_t *, mblk_t *);
155 156 static void spdsock_wsrv(queue_t *);
156 157 static void spdsock_rsrv(queue_t *);
157 158 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
158 159 static void spdsock_stack_shutdown(netstackid_t stackid, void *arg);
159 160 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
160 161 static void spdsock_loadcheck(void *);
161 162 static void spdsock_merge_algs(spd_stack_t *);
162 163 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
163 164 static mblk_t *spdsock_dump_next_record(spdsock_t *);
164 165 static void update_iptun_policy(ipsec_tun_pol_t *);
165 166
166 167 static struct module_info info = {
167 168 5138, "spdsock", 1, INFPSZ, 512, 128
168 169 };
169 170
170 171 static struct qinit rinit = {
171 172 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
172 173 NULL, &info
173 174 };
174 175
175 176 static struct qinit winit = {
176 177 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
177 178 };
178 179
179 180 struct streamtab spdsockinfo = {
180 181 &rinit, &winit
181 182 };
182 183
183 184 /* mapping from alg type to protocol number, as per RFC 2407 */
184 185 static const uint_t algproto[] = {
185 186 PROTO_IPSEC_AH,
186 187 PROTO_IPSEC_ESP,
187 188 };
188 189
189 190 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0]))
190 191
191 192 /* mapping from kernel exec mode to spdsock exec mode */
192 193 static const uint_t execmodes[] = {
193 194 SPD_ALG_EXEC_MODE_SYNC,
194 195 SPD_ALG_EXEC_MODE_ASYNC
195 196 };
196 197
197 198 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0]))
198 199
199 200 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
200 201 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
201 202
202 203 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
203 204
204 205 /* ARGSUSED */
205 206 static int
206 207 spdsock_param_get(q, mp, cp, cr)
207 208 queue_t *q;
208 209 mblk_t *mp;
209 210 caddr_t cp;
210 211 cred_t *cr;
211 212 {
212 213 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
213 214 uint_t value;
214 215 spdsock_t *ss = (spdsock_t *)q->q_ptr;
215 216 spd_stack_t *spds = ss->spdsock_spds;
216 217
217 218 mutex_enter(&spds->spds_param_lock);
218 219 value = spdsockpa->spdsock_param_value;
219 220 mutex_exit(&spds->spds_param_lock);
220 221
221 222 (void) mi_mpprintf(mp, "%u", value);
222 223 return (0);
223 224 }
224 225
225 226 /* This routine sets an NDD variable in a spdsockparam_t structure. */
226 227 /* ARGSUSED */
227 228 static int
228 229 spdsock_param_set(q, mp, value, cp, cr)
229 230 queue_t *q;
230 231 mblk_t *mp;
231 232 char *value;
232 233 caddr_t cp;
233 234 cred_t *cr;
234 235 {
235 236 ulong_t new_value;
236 237 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp;
237 238 spdsock_t *ss = (spdsock_t *)q->q_ptr;
238 239 spd_stack_t *spds = ss->spdsock_spds;
239 240
240 241 /* Convert the value from a string into a long integer. */
241 242 if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
242 243 return (EINVAL);
243 244
244 245 mutex_enter(&spds->spds_param_lock);
245 246 /*
246 247 * Fail the request if the new value does not lie within the
247 248 * required bounds.
248 249 */
249 250 if (new_value < spdsockpa->spdsock_param_min ||
250 251 new_value > spdsockpa->spdsock_param_max) {
251 252 mutex_exit(&spds->spds_param_lock);
252 253 return (EINVAL);
253 254 }
254 255
255 256 /* Set the new value */
256 257 spdsockpa->spdsock_param_value = new_value;
257 258 mutex_exit(&spds->spds_param_lock);
258 259
259 260 return (0);
260 261 }
261 262
262 263 /*
263 264 * Initialize at module load time
264 265 */
265 266 boolean_t
266 267 spdsock_ddi_init(void)
267 268 {
268 269 spdsock_max_optsize = optcom_max_optsize(
269 270 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
270 271
271 272 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
272 273 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
273 274
274 275 /*
275 276 * We want to be informed each time a stack is created or
276 277 * destroyed in the kernel, so we can maintain the
277 278 * set of spd_stack_t's.
278 279 */
279 280 netstack_register(NS_SPDSOCK, spdsock_stack_init,
280 281 spdsock_stack_shutdown, spdsock_stack_fini);
281 282
282 283 return (B_TRUE);
283 284 }
284 285
285 286 /*
286 287 * Walk through the param array specified registering each element with the
287 288 * named dispatch handler.
288 289 */
289 290 static boolean_t
290 291 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
291 292 {
292 293 for (; cnt-- > 0; ssp++) {
293 294 if (ssp->spdsock_param_name != NULL &&
294 295 ssp->spdsock_param_name[0]) {
295 296 if (!nd_load(ndp,
296 297 ssp->spdsock_param_name,
297 298 spdsock_param_get, spdsock_param_set,
298 299 (caddr_t)ssp)) {
299 300 nd_free(ndp);
300 301 return (B_FALSE);
301 302 }
302 303 }
303 304 }
304 305 return (B_TRUE);
305 306 }
306 307
307 308 /*
308 309 * Initialize for each stack instance
309 310 */
310 311 /* ARGSUSED */
311 312 static void *
312 313 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
313 314 {
314 315 spd_stack_t *spds;
315 316 spdsockparam_t *ssp;
316 317
317 318 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
318 319 spds->spds_netstack = ns;
319 320
320 321 ASSERT(spds->spds_g_nd == NULL);
321 322
322 323 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
323 324 spds->spds_params = ssp;
324 325 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
325 326
326 327 (void) spdsock_param_register(&spds->spds_g_nd, ssp,
327 328 A_CNT(lcl_param_arr));
328 329
329 330 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
330 331 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
331 332
332 333 return (spds);
333 334 }
334 335
335 336 void
336 337 spdsock_ddi_destroy(void)
337 338 {
338 339 vmem_destroy(spdsock_vmem);
339 340
340 341 netstack_unregister(NS_SPDSOCK);
341 342 }
342 343
343 344 /*
344 345 * Do pre-removal cleanup.
345 346 */
346 347 /* ARGSUSED */
347 348 static void
348 349 spdsock_stack_shutdown(netstackid_t stackid, void *arg)
349 350 {
350 351 spd_stack_t *spds = (spd_stack_t *)arg;
351 352
352 353 if (spds->spds_mp_algs != NULL) {
353 354 freemsg(spds->spds_mp_algs);
354 355 spds->spds_mp_algs = NULL;
355 356 }
356 357 }
357 358
358 359 /* ARGSUSED */
359 360 static void
360 361 spdsock_stack_fini(netstackid_t stackid, void *arg)
361 362 {
362 363 spd_stack_t *spds = (spd_stack_t *)arg;
363 364
364 365 ASSERT(spds->spds_mp_algs == NULL);
365 366 mutex_destroy(&spds->spds_param_lock);
366 367 mutex_destroy(&spds->spds_alg_lock);
367 368 nd_free(&spds->spds_g_nd);
368 369 kmem_free(spds->spds_params, sizeof (lcl_param_arr));
369 370 spds->spds_params = NULL;
370 371
371 372 kmem_free(spds, sizeof (*spds));
372 373 }
373 374
374 375 /*
375 376 * NOTE: large quantities of this should be shared with keysock.
376 377 * Would be nice to combine some of this into a common module, but
377 378 * not possible given time pressures.
378 379 */
379 380
380 381 /*
381 382 * High-level reality checking of extensions.
382 383 */
383 384 /* ARGSUSED */ /* XXX */
384 385 static boolean_t
385 386 ext_check(spd_ext_t *ext)
386 387 {
387 388 spd_if_t *tunname = (spd_if_t *)ext;
388 389 int i;
389 390 char *idstr;
390 391
391 392 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
392 393 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */
393 394
394 395 /*
395 396 * Make sure the strings in these identities are
396 397 * null-terminated. Let's "proactively" null-terminate the
397 398 * string at the last byte if it's not terminated sooner.
398 399 */
399 400 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
400 401 idstr = (char *)(tunname + 1);
401 402 while (*idstr != '\0' && i > 0) {
402 403 i--;
403 404 idstr++;
404 405 }
405 406 if (i == 0) {
406 407 /*
407 408 * I.e., if the bozo user didn't NULL-terminate the
408 409 * string...
409 410 */
410 411 idstr--;
411 412 *idstr = '\0';
412 413 }
413 414 }
414 415 return (B_TRUE); /* For now... */
415 416 }
416 417
417 418
418 419
419 420 /* Return values for spdsock_get_ext(). */
420 421 #define KGE_OK 0
421 422 #define KGE_DUP 1
422 423 #define KGE_UNK 2
423 424 #define KGE_LEN 3
424 425 #define KGE_CHK 4
425 426
426 427 /*
427 428 * Parse basic extension headers and return in the passed-in pointer vector.
428 429 * Return values include:
429 430 *
430 431 * KGE_OK Everything's nice and parsed out.
431 432 * If there are no extensions, place NULL in extv[0].
432 433 * KGE_DUP There is a duplicate extension.
433 434 * First instance in appropriate bin. First duplicate in
434 435 * extv[0].
435 436 * KGE_UNK Unknown extension type encountered. extv[0] contains
436 437 * unknown header.
437 438 * KGE_LEN Extension length error.
438 439 * KGE_CHK High-level reality check failed on specific extension.
439 440 *
440 441 * My apologies for some of the pointer arithmetic in here. I'm thinking
441 442 * like an assembly programmer, yet trying to make the compiler happy.
442 443 */
443 444 static int
444 445 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
445 446 {
446 447 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
447 448
448 449 /* Use extv[0] as the "current working pointer". */
449 450
450 451 extv[0] = (spd_ext_t *)(basehdr + 1);
451 452
452 453 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
453 454 /* Check for unknown headers. */
454 455 if (extv[0]->spd_ext_type == 0 ||
455 456 extv[0]->spd_ext_type > SPD_EXT_MAX)
456 457 return (KGE_UNK);
457 458
458 459 /*
459 460 * Check length. Use uint64_t because extlen is in units
460 461 * of 64-bit words. If length goes beyond the msgsize,
461 462 * return an error. (Zero length also qualifies here.)
462 463 */
463 464 if (extv[0]->spd_ext_len == 0 ||
464 465 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
465 466 (void *)((uint8_t *)basehdr + msgsize))
466 467 return (KGE_LEN);
467 468
468 469 /* Check for redundant headers. */
469 470 if (extv[extv[0]->spd_ext_type] != NULL)
470 471 return (KGE_DUP);
471 472
472 473 /*
473 474 * Reality check the extension if possible at the spdsock
474 475 * level.
475 476 */
476 477 if (!ext_check(extv[0]))
477 478 return (KGE_CHK);
478 479
479 480 /* If I make it here, assign the appropriate bin. */
480 481 extv[extv[0]->spd_ext_type] = extv[0];
481 482
482 483 /* Advance pointer (See above for uint64_t ptr reasoning.) */
483 484 extv[0] = (spd_ext_t *)
484 485 ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
485 486 }
486 487
487 488 /* Everything's cool. */
488 489
489 490 /*
490 491 * If extv[0] == NULL, then there are no extension headers in this
491 492 * message. Ensure that this is the case.
492 493 */
493 494 if (extv[0] == (spd_ext_t *)(basehdr + 1))
494 495 extv[0] = NULL;
495 496
496 497 return (KGE_OK);
497 498 }
498 499
499 500 static const int bad_ext_diag[] = {
500 501 SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
501 502 SPD_DIAGNOSTIC_MALFORMED_REMPORT,
502 503 SPD_DIAGNOSTIC_MALFORMED_PROTO,
503 504 SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
504 505 SPD_DIAGNOSTIC_MALFORMED_REMADDR,
505 506 SPD_DIAGNOSTIC_MALFORMED_ACTION,
506 507 SPD_DIAGNOSTIC_MALFORMED_RULE,
507 508 SPD_DIAGNOSTIC_MALFORMED_RULESET,
508 509 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
509 510 };
510 511
511 512 static const int dup_ext_diag[] = {
512 513 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
513 514 SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
514 515 SPD_DIAGNOSTIC_DUPLICATE_PROTO,
515 516 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
516 517 SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
517 518 SPD_DIAGNOSTIC_DUPLICATE_ACTION,
518 519 SPD_DIAGNOSTIC_DUPLICATE_RULE,
519 520 SPD_DIAGNOSTIC_DUPLICATE_RULESET,
520 521 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
521 522 };
522 523
523 524 /*
524 525 * Transmit a PF_POLICY error message to the instance either pointed to
525 526 * by ks, the instance with serial number serial, or more, depending.
526 527 *
527 528 * The faulty message (or a reasonable facsimile thereof) is in mp.
528 529 * This function will free mp or recycle it for delivery, thereby causing
529 530 * the stream head to free it.
530 531 */
531 532 static void
532 533 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
533 534 {
534 535 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
535 536
536 537 ASSERT(mp->b_datap->db_type == M_DATA);
537 538
538 539 if (spmsg->spd_msg_type < SPD_MIN ||
539 540 spmsg->spd_msg_type > SPD_MAX)
540 541 spmsg->spd_msg_type = SPD_RESERVED;
541 542
542 543 /*
543 544 * Strip out extension headers.
544 545 */
545 546 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
546 547 mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
547 548 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
548 549 spmsg->spd_msg_errno = (uint8_t)error;
549 550 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
550 551
551 552 qreply(q, mp);
552 553 }
553 554
554 555 static void
555 556 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
556 557 {
557 558 spdsock_error(q, mp, EINVAL, diagnostic);
558 559 }
559 560
560 561 static void
561 562 spd_echo(queue_t *q, mblk_t *mp)
562 563 {
563 564 qreply(q, mp);
564 565 }
565 566
566 567 /*
567 568 * Do NOT consume a reference to itp.
568 569 */
569 570 /*ARGSUSED*/
570 571 static void
571 572 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
572 573 {
573 574 boolean_t active = (boolean_t)cookie;
574 575 ipsec_policy_head_t *iph;
575 576
576 577 iph = active ? itp->itp_policy : itp->itp_inactive;
577 578 IPPH_REFHOLD(iph);
578 579 mutex_enter(&itp->itp_lock);
579 580 spdsock_flush_one(iph, ns); /* Releases iph refhold. */
580 581 if (active)
581 582 itp->itp_flags &= ~ITPF_PFLAGS;
582 583 else
583 584 itp->itp_flags &= ~ITPF_IFLAGS;
584 585 mutex_exit(&itp->itp_lock);
585 586 /* SPD_FLUSH is worth a tunnel MTU check. */
586 587 update_iptun_policy(itp);
587 588 }
588 589
589 590 /*
590 591 * Clear out one polhead.
591 592 */
592 593 static void
593 594 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
594 595 {
595 596 rw_enter(&iph->iph_lock, RW_WRITER);
596 597 ipsec_polhead_flush(iph, ns);
597 598 rw_exit(&iph->iph_lock);
598 599 IPPH_REFRELE(iph, ns);
599 600 }
600 601
601 602 static void
602 603 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
603 604 mblk_t *mp)
604 605 {
605 606 boolean_t active;
606 607 spdsock_t *ss = (spdsock_t *)q->q_ptr;
607 608 netstack_t *ns = ss->spdsock_spds->spds_netstack;
608 609 uint32_t auditing = AU_AUDITING();
609 610
610 611 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
611 612 spdsock_flush_one(iph, ns);
612 613 if (auditing) {
613 614 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
614 615 cred_t *cr;
615 616 pid_t cpid;
616 617
617 618 cr = msg_getcred(mp, &cpid);
618 619 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
619 620 audit_pf_policy(SPD_FLUSH, cr, ns,
620 621 ITP_NAME(itp), active, 0, cpid);
621 622 }
622 623 } else {
623 624 active = (iph == ALL_ACTIVE_POLHEADS);
624 625
625 626 /* First flush the global policy. */
626 627 spdsock_flush_one(active ? ipsec_system_policy(ns) :
627 628 ipsec_inactive_policy(ns), ns);
628 629 if (auditing) {
629 630 cred_t *cr;
630 631 pid_t cpid;
631 632
632 633 cr = msg_getcred(mp, &cpid);
633 634 audit_pf_policy(SPD_FLUSH, cr, ns, NULL,
634 635 active, 0, cpid);
635 636 }
636 637 /* Then flush every tunnel's appropriate one. */
637 638 itp_walk(spdsock_flush_node, (void *)active, ns);
638 639 if (auditing) {
639 640 cred_t *cr;
640 641 pid_t cpid;
641 642
642 643 cr = msg_getcred(mp, &cpid);
643 644 audit_pf_policy(SPD_FLUSH, cr, ns,
644 645 "all tunnels", active, 0, cpid);
645 646 }
646 647 }
647 648
648 649 spd_echo(q, mp);
649 650 }
650 651
651 652 static boolean_t
652 653 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
653 654 {
654 655 bzero(sel, sizeof (*sel));
655 656
656 657 if (extv[SPD_EXT_PROTO] != NULL) {
657 658 struct spd_proto *pr =
658 659 (struct spd_proto *)extv[SPD_EXT_PROTO];
659 660 sel->ipsl_proto = pr->spd_proto_number;
660 661 sel->ipsl_valid |= IPSL_PROTOCOL;
661 662 }
662 663 if (extv[SPD_EXT_LCLPORT] != NULL) {
663 664 struct spd_portrange *pr =
664 665 (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
665 666 sel->ipsl_lport = pr->spd_ports_minport;
666 667 sel->ipsl_valid |= IPSL_LOCAL_PORT;
667 668 }
668 669 if (extv[SPD_EXT_REMPORT] != NULL) {
669 670 struct spd_portrange *pr =
670 671 (struct spd_portrange *)extv[SPD_EXT_REMPORT];
671 672 sel->ipsl_rport = pr->spd_ports_minport;
672 673 sel->ipsl_valid |= IPSL_REMOTE_PORT;
673 674 }
674 675
675 676 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
676 677 struct spd_typecode *tc=
677 678 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
678 679
679 680 sel->ipsl_valid |= IPSL_ICMP_TYPE;
680 681 sel->ipsl_icmp_type = tc->spd_typecode_type;
681 682 if (tc->spd_typecode_type_end < tc->spd_typecode_type)
682 683 sel->ipsl_icmp_type_end = tc->spd_typecode_type;
683 684 else
684 685 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
685 686
686 687 if (tc->spd_typecode_code != 255) {
687 688 sel->ipsl_valid |= IPSL_ICMP_CODE;
688 689 sel->ipsl_icmp_code = tc->spd_typecode_code;
689 690 if (tc->spd_typecode_code_end < tc->spd_typecode_code)
690 691 sel->ipsl_icmp_code_end = tc->spd_typecode_code;
691 692 else
692 693 sel->ipsl_icmp_code_end =
693 694 tc->spd_typecode_code_end;
694 695 }
695 696 }
696 697 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \
697 698 if ((extv)[(extn)] != NULL) { \
698 699 uint_t addrlen; \
699 700 struct spd_address *ap = \
700 701 (struct spd_address *)((extv)[(extn)]); \
701 702 addrlen = (ap->spd_address_af == AF_INET6) ? \
702 703 IPV6_ADDR_LEN : IP_ADDR_LEN; \
703 704 if (SPD_64TO8(ap->spd_address_len) < \
704 705 (addrlen + sizeof (*ap))) { \
705 706 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \
706 707 return (B_FALSE); \
707 708 } \
708 709 bcopy((ap+1), &((sel)->field), addrlen); \
709 710 (sel)->pfield = ap->spd_address_prefixlen; \
710 711 (sel)->ipsl_valid |= (bit); \
711 712 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \
712 713 IPSL_IPV6 : IPSL_IPV4; \
713 714 }
714 715
715 716 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
716 717 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
717 718 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
718 719 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
719 720
720 721 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
721 722 (IPSL_IPV6|IPSL_IPV4)) {
722 723 *diag = SPD_DIAGNOSTIC_MIXED_AF;
723 724 return (B_FALSE);
724 725 }
725 726
726 727 #undef ADDR2SEL
727 728
728 729 return (B_TRUE);
729 730 }
730 731
731 732 static boolean_t
732 733 spd_convert_type(uint32_t type, ipsec_act_t *act)
733 734 {
734 735 switch (type) {
735 736 case SPD_ACTTYPE_DROP:
736 737 act->ipa_type = IPSEC_ACT_DISCARD;
737 738 return (B_TRUE);
738 739
739 740 case SPD_ACTTYPE_PASS:
740 741 act->ipa_type = IPSEC_ACT_CLEAR;
741 742 return (B_TRUE);
742 743
743 744 case SPD_ACTTYPE_IPSEC:
744 745 act->ipa_type = IPSEC_ACT_APPLY;
745 746 return (B_TRUE);
746 747 }
747 748 return (B_FALSE);
748 749 }
749 750
750 751 static boolean_t
751 752 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
752 753 {
753 754 /*
754 755 * Note use of !! for boolean canonicalization.
755 756 */
756 757 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
757 758 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
758 759 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
759 760 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
760 761 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
761 762 return (B_TRUE);
762 763 }
763 764
764 765 static void
765 766 spdsock_reset_act(ipsec_act_t *act)
766 767 {
767 768 bzero(act, sizeof (*act));
768 769 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
769 770 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
770 771 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
771 772 }
772 773
773 774 /*
774 775 * Sanity check action against reality, and shrink-wrap key sizes..
775 776 */
776 777 static boolean_t
777 778 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
778 779 spd_stack_t *spds)
779 780 {
780 781 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
781 782 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
782 783 return (B_FALSE);
783 784 }
784 785 if ((act->ipa_type != IPSEC_ACT_APPLY) &&
785 786 (act->ipa_apply.ipp_use_ah ||
786 787 act->ipa_apply.ipp_use_esp ||
787 788 act->ipa_apply.ipp_use_espa ||
788 789 act->ipa_apply.ipp_use_se ||
789 790 act->ipa_apply.ipp_use_unique)) {
790 791 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
791 792 return (B_FALSE);
792 793 }
793 794 if ((act->ipa_type == IPSEC_ACT_APPLY) &&
794 795 !act->ipa_apply.ipp_use_ah &&
795 796 !act->ipa_apply.ipp_use_esp) {
796 797 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
797 798 return (B_FALSE);
798 799 }
799 800 return (ipsec_check_action(act, diag, spds->spds_netstack));
800 801 }
801 802
802 803 /*
803 804 * We may be short a few error checks here..
804 805 */
805 806 static boolean_t
806 807 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
807 808 int *diag, spd_stack_t *spds)
808 809 {
809 810 struct spd_ext_actions *sactp =
810 811 (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
811 812 ipsec_act_t act, *actp, *endactp;
812 813 struct spd_attribute *attrp, *endattrp;
813 814 uint64_t *endp;
814 815 int nact;
815 816 boolean_t tunnel_polhead;
816 817
817 818 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
818 819 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
819 820 SPD_RULE_FLAG_TUNNEL));
820 821
821 822 *actpp = NULL;
822 823 *nactp = 0;
823 824
824 825 if (sactp == NULL) {
825 826 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
826 827 return (B_FALSE);
827 828 }
828 829
829 830 /*
830 831 * Parse the "action" extension and convert into an action chain.
831 832 */
832 833
833 834 nact = sactp->spd_actions_count;
834 835
835 836 endp = (uint64_t *)sactp;
836 837 endp += sactp->spd_actions_len;
837 838 endattrp = (struct spd_attribute *)endp;
838 839
839 840 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
840 841 if (actp == NULL) {
841 842 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
842 843 return (B_FALSE);
843 844 }
844 845 *actpp = actp;
845 846 *nactp = nact;
846 847 endactp = actp + nact;
847 848
848 849 spdsock_reset_act(&act);
849 850 attrp = (struct spd_attribute *)(&sactp[1]);
850 851
851 852 for (; attrp < endattrp; attrp++) {
852 853 switch (attrp->spd_attr_tag) {
853 854 case SPD_ATTR_NOP:
854 855 break;
855 856
856 857 case SPD_ATTR_EMPTY:
857 858 spdsock_reset_act(&act);
858 859 break;
859 860
860 861 case SPD_ATTR_END:
861 862 attrp = endattrp;
862 863 /* FALLTHRU */
863 864 case SPD_ATTR_NEXT:
864 865 if (actp >= endactp) {
865 866 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
866 867 goto fail;
867 868 }
868 869 if (!spdsock_check_action(&act, tunnel_polhead,
869 870 diag, spds))
870 871 goto fail;
871 872 *actp++ = act;
872 873 spdsock_reset_act(&act);
873 874 break;
874 875
875 876 case SPD_ATTR_TYPE:
876 877 if (!spd_convert_type(attrp->spd_attr_value, &act)) {
877 878 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
878 879 goto fail;
879 880 }
880 881 break;
881 882
882 883 case SPD_ATTR_FLAGS:
883 884 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
884 885 /*
885 886 * Set "sa unique" for transport-mode
886 887 * tunnels whether we want to or not.
887 888 */
888 889 attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
889 890 }
890 891 if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
891 892 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
892 893 goto fail;
893 894 }
894 895 break;
895 896
896 897 case SPD_ATTR_AH_AUTH:
897 898 if (attrp->spd_attr_value == 0) {
898 899 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
899 900 goto fail;
900 901 }
901 902 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
902 903 break;
903 904
904 905 case SPD_ATTR_ESP_ENCR:
905 906 if (attrp->spd_attr_value == 0) {
906 907 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
907 908 goto fail;
908 909 }
909 910 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
910 911 break;
911 912
912 913 case SPD_ATTR_ESP_AUTH:
913 914 if (attrp->spd_attr_value == 0) {
914 915 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
915 916 goto fail;
916 917 }
917 918 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
918 919 break;
919 920
920 921 case SPD_ATTR_ENCR_MINBITS:
921 922 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
922 923 break;
923 924
924 925 case SPD_ATTR_ENCR_MAXBITS:
925 926 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
926 927 break;
927 928
928 929 case SPD_ATTR_AH_MINBITS:
929 930 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
930 931 break;
931 932
932 933 case SPD_ATTR_AH_MAXBITS:
933 934 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
934 935 break;
935 936
936 937 case SPD_ATTR_ESPA_MINBITS:
937 938 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
938 939 break;
939 940
940 941 case SPD_ATTR_ESPA_MAXBITS:
941 942 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
942 943 break;
943 944
944 945 case SPD_ATTR_LIFE_SOFT_TIME:
945 946 case SPD_ATTR_LIFE_HARD_TIME:
946 947 case SPD_ATTR_LIFE_SOFT_BYTES:
947 948 case SPD_ATTR_LIFE_HARD_BYTES:
948 949 break;
949 950
950 951 case SPD_ATTR_KM_PROTO:
951 952 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
952 953 break;
953 954
954 955 case SPD_ATTR_KM_COOKIE:
955 956 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
956 957 break;
957 958
958 959 case SPD_ATTR_REPLAY_DEPTH:
959 960 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
960 961 break;
961 962 }
962 963 }
963 964 if (actp != endactp) {
964 965 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
965 966 goto fail;
966 967 }
967 968
968 969 return (B_TRUE);
969 970 fail:
970 971 ipsec_actvec_free(*actpp, nact);
971 972 *actpp = NULL;
972 973 return (B_FALSE);
973 974 }
974 975
975 976 typedef struct
976 977 {
977 978 ipsec_policy_t *pol;
978 979 int dir;
979 980 } tmprule_t;
980 981
981 982 static int
982 983 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
983 984 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
984 985 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
985 986 {
986 987 ipsec_policy_t *pol;
987 988
988 989 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
989 990 sel->ipsl_valid |= af;
990 991
991 992 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
992 993 index, spds->spds_netstack);
993 994 if (pol == NULL)
994 995 return (ENOMEM);
995 996
996 997 (*rp)->pol = pol;
997 998 (*rp)->dir = dir;
998 999 (*rp)++;
999 1000
1000 1001 if (!ipsec_check_policy(iph, pol, dir))
1001 1002 return (EEXIST);
1002 1003
1003 1004 rule->spd_rule_index = pol->ipsp_index;
1004 1005 return (0);
1005 1006 }
1006 1007
1007 1008 static int
1008 1009 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
1009 1010 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
1010 1011 tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
1011 1012 {
1012 1013 int error;
1013 1014
1014 1015 if (afs & IPSL_IPV4) {
1015 1016 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
1016 1017 index, spds);
1017 1018 if (error != 0)
1018 1019 return (error);
1019 1020 }
1020 1021 if (afs & IPSL_IPV6) {
1021 1022 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
1022 1023 index, spds);
1023 1024 if (error != 0)
1024 1025 return (error);
1025 1026 }
1026 1027 return (0);
1027 1028 }
1028 1029
1029 1030
1030 1031 static void
1031 1032 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1032 1033 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1033 1034 {
1034 1035 ipsec_selkey_t sel;
1035 1036 ipsec_act_t *actp;
1036 1037 uint_t nact;
1037 1038 int diag = 0, error, afs;
1038 1039 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1039 1040 tmprule_t rules[4], *rulep = &rules[0];
1040 1041 boolean_t tunnel_mode, empty_itp, active;
1041 1042 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1042 1043 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1043 1044 spd_stack_t *spds = ss->spdsock_spds;
1044 1045 uint32_t auditing = AU_AUDITING();
1045 1046
1046 1047 if (rule == NULL) {
1047 1048 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1048 1049 if (auditing) {
1049 1050 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1050 1051 cred_t *cr;
1051 1052 pid_t cpid;
1052 1053
1053 1054 cr = msg_getcred(mp, &cpid);
1054 1055 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1055 1056 audit_pf_policy(SPD_ADDRULE, cr,
1056 1057 spds->spds_netstack, ITP_NAME(itp), active,
1057 1058 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid);
1058 1059 }
1059 1060 return;
1060 1061 }
1061 1062
1062 1063 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1063 1064
1064 1065 if (itp != NULL) {
1065 1066 mutex_enter(&itp->itp_lock);
1066 1067 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1067 1068 active = (itp->itp_policy == iph);
1068 1069 if (ITP_P_ISACTIVE(itp, iph)) {
1069 1070 /* Check for mix-and-match of tunnel/transport. */
1070 1071 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1071 1072 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1072 1073 mutex_exit(&itp->itp_lock);
1073 1074 spdsock_error(q, mp, EBUSY, 0);
1074 1075 return;
1075 1076 }
1076 1077 empty_itp = B_FALSE;
1077 1078 } else {
1078 1079 empty_itp = B_TRUE;
1079 1080 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1080 1081 if (tunnel_mode)
1081 1082 itp->itp_flags |= active ? ITPF_P_TUNNEL :
1082 1083 ITPF_I_TUNNEL;
1083 1084 }
1084 1085 } else {
1085 1086 empty_itp = B_FALSE;
1086 1087 }
1087 1088
1088 1089 if (rule->spd_rule_index != 0) {
1089 1090 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1090 1091 error = EINVAL;
1091 1092 goto fail2;
1092 1093 }
1093 1094
1094 1095 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1095 1096 error = EINVAL;
1096 1097 goto fail2;
1097 1098 }
1098 1099
1099 1100 if (itp != NULL) {
1100 1101 if (tunnel_mode) {
1101 1102 if (sel.ipsl_valid &
1102 1103 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1103 1104 itp->itp_flags |= active ?
1104 1105 ITPF_P_PER_PORT_SECURITY :
1105 1106 ITPF_I_PER_PORT_SECURITY;
1106 1107 }
1107 1108 } else {
1108 1109 /*
1109 1110 * For now, we don't allow transport-mode on a tunnel
1110 1111 * with ANY specific selectors. Bail if we have such
1111 1112 * a request.
1112 1113 */
1113 1114 if (sel.ipsl_valid & IPSL_WILDCARD) {
1114 1115 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1115 1116 error = EINVAL;
1116 1117 goto fail2;
1117 1118 }
1118 1119 }
1119 1120 }
1120 1121
1121 1122 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1122 1123 error = EINVAL;
1123 1124 goto fail2;
1124 1125 }
1125 1126 /*
1126 1127 * If no addresses were specified, add both.
1127 1128 */
1128 1129 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1129 1130 if (afs == 0)
1130 1131 afs = (IPSL_IPV6|IPSL_IPV4);
1131 1132
1132 1133 rw_enter(&iph->iph_lock, RW_WRITER);
1133 1134
1134 1135 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1135 1136 error = mkrulepair(iph, rule, &sel, actp, nact,
1136 1137 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1137 1138 if (error != 0)
1138 1139 goto fail;
1139 1140 }
1140 1141
1141 1142 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1142 1143 error = mkrulepair(iph, rule, &sel, actp, nact,
1143 1144 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1144 1145 if (error != 0)
1145 1146 goto fail;
1146 1147 }
1147 1148
1148 1149 while ((--rulep) >= &rules[0]) {
1149 1150 ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1150 1151 spds->spds_netstack);
1151 1152 }
1152 1153 rw_exit(&iph->iph_lock);
1153 1154 if (itp != NULL)
1154 1155 mutex_exit(&itp->itp_lock);
1155 1156
1156 1157 ipsec_actvec_free(actp, nact);
1157 1158 spd_echo(q, mp);
1158 1159 if (auditing) {
1159 1160 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1160 1161 cred_t *cr;
1161 1162 pid_t cpid;
1162 1163
1163 1164 cr = msg_getcred(mp, &cpid);
1164 1165 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1165 1166 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1166 1167 ITP_NAME(itp), active, 0, cpid);
1167 1168 }
1168 1169 return;
1169 1170
1170 1171 fail:
1171 1172 rw_exit(&iph->iph_lock);
1172 1173 while ((--rulep) >= &rules[0])
1173 1174 IPPOL_REFRELE(rulep->pol);
1174 1175 ipsec_actvec_free(actp, nact);
1175 1176 fail2:
1176 1177 if (itp != NULL) {
1177 1178 if (empty_itp)
1178 1179 itp->itp_flags = 0;
1179 1180 mutex_exit(&itp->itp_lock);
1180 1181 }
1181 1182 spdsock_error(q, mp, error, diag);
1182 1183 if (auditing) {
1183 1184 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1184 1185 cred_t *cr;
1185 1186 pid_t cpid;
1186 1187
1187 1188 cr = msg_getcred(mp, &cpid);
1188 1189 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1189 1190 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1190 1191 ITP_NAME(itp), active, error, cpid);
1191 1192 }
1192 1193 }
1193 1194
1194 1195 void
1195 1196 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1196 1197 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1197 1198 {
1198 1199 ipsec_selkey_t sel;
1199 1200 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1200 1201 int err, diag = 0;
1201 1202 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1202 1203 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1203 1204 uint32_t auditing = AU_AUDITING();
1204 1205
1205 1206 if (rule == NULL) {
1206 1207 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1207 1208 if (auditing) {
1208 1209 boolean_t active;
1209 1210 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1210 1211 cred_t *cr;
1211 1212 pid_t cpid;
1212 1213
1213 1214 cr = msg_getcred(mp, &cpid);
1214 1215 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1215 1216 audit_pf_policy(SPD_DELETERULE, cr, ns,
1216 1217 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1217 1218 cpid);
1218 1219 }
1219 1220 return;
1220 1221 }
1221 1222
1222 1223 /*
1223 1224 * Must enter itp_lock first to avoid deadlock. See tun.c's
1224 1225 * set_sec_simple() for the other case of itp_lock and iph_lock.
1225 1226 */
1226 1227 if (itp != NULL)
1227 1228 mutex_enter(&itp->itp_lock);
1228 1229
1229 1230 if (rule->spd_rule_index != 0) {
1230 1231 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1231 1232 0) {
1232 1233 err = ESRCH;
1233 1234 goto fail;
1234 1235 }
1235 1236 } else {
1236 1237 if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1237 1238 err = EINVAL; /* diag already set... */
1238 1239 goto fail;
1239 1240 }
1240 1241
1241 1242 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1242 1243 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1243 1244 err = ESRCH;
1244 1245 goto fail;
1245 1246 }
1246 1247
1247 1248 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1248 1249 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1249 1250 err = ESRCH;
1250 1251 goto fail;
1251 1252 }
1252 1253 }
1253 1254
1254 1255 if (itp != NULL) {
1255 1256 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1256 1257 rw_enter(&iph->iph_lock, RW_READER);
1257 1258 if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1258 1259 if (iph == itp->itp_policy)
1259 1260 itp->itp_flags &= ~ITPF_PFLAGS;
1260 1261 else
1261 1262 itp->itp_flags &= ~ITPF_IFLAGS;
1262 1263 }
1263 1264 /* Can exit locks in any order. */
1264 1265 rw_exit(&iph->iph_lock);
1265 1266 mutex_exit(&itp->itp_lock);
1266 1267 }
1267 1268 spd_echo(q, mp);
1268 1269 if (auditing) {
1269 1270 boolean_t active;
1270 1271 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1271 1272 cred_t *cr;
1272 1273 pid_t cpid;
1273 1274
1274 1275 cr = msg_getcred(mp, &cpid);
1275 1276 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1276 1277 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1277 1278 active, 0, cpid);
1278 1279 }
1279 1280 return;
1280 1281 fail:
1281 1282 if (itp != NULL)
1282 1283 mutex_exit(&itp->itp_lock);
1283 1284 spdsock_error(q, mp, err, diag);
1284 1285 if (auditing) {
1285 1286 boolean_t active;
1286 1287 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1287 1288 cred_t *cr;
1288 1289 pid_t cpid;
1289 1290
1290 1291 cr = msg_getcred(mp, &cpid);
1291 1292 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1292 1293 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1293 1294 active, err, cpid);
1294 1295 }
1295 1296 }
1296 1297
1297 1298 /* Do NOT consume a reference to itp. */
1298 1299 /* ARGSUSED */
1299 1300 static void
1300 1301 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1301 1302 {
1302 1303 mutex_enter(&itp->itp_lock);
1303 1304 ITPF_SWAP(itp->itp_flags);
1304 1305 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1305 1306 mutex_exit(&itp->itp_lock);
1306 1307 /* SPD_FLIP is worth a tunnel MTU check. */
1307 1308 update_iptun_policy(itp);
1308 1309 }
1309 1310
1310 1311 void
1311 1312 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1312 1313 {
1313 1314 char *tname;
1314 1315 ipsec_tun_pol_t *itp;
1315 1316 spdsock_t *ss = (spdsock_t *)q->q_ptr;
1316 1317 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1317 1318 uint32_t auditing = AU_AUDITING();
1318 1319
1319 1320 if (tunname != NULL) {
1320 1321 tname = (char *)tunname->spd_if_name;
1321 1322 if (*tname == '\0') {
1322 1323 /* can't fail */
1323 1324 ipsec_swap_global_policy(ns);
1324 1325 if (auditing) {
1325 1326 boolean_t active;
1326 1327 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1327 1328 cred_t *cr;
1328 1329 pid_t cpid;
1329 1330
1330 1331 cr = msg_getcred(mp, &cpid);
1331 1332 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1332 1333 audit_pf_policy(SPD_FLIP, cr, ns,
1333 1334 NULL, active, 0, cpid);
1334 1335 }
1335 1336 itp_walk(spdsock_flip_node, NULL, ns);
1336 1337 if (auditing) {
1337 1338 boolean_t active;
1338 1339 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1339 1340 cred_t *cr;
1340 1341 pid_t cpid;
1341 1342
1342 1343 cr = msg_getcred(mp, &cpid);
1343 1344 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1344 1345 audit_pf_policy(SPD_FLIP, cr, ns,
1345 1346 "all tunnels", active, 0, cpid);
1346 1347 }
1347 1348 } else {
1348 1349 itp = get_tunnel_policy(tname, ns);
1349 1350 if (itp == NULL) {
1350 1351 /* Better idea for "tunnel not found"? */
1351 1352 spdsock_error(q, mp, ESRCH, 0);
1352 1353 if (auditing) {
1353 1354 boolean_t active;
1354 1355 spd_msg_t *spmsg =
1355 1356 (spd_msg_t *)mp->b_rptr;
1356 1357 cred_t *cr;
1357 1358 pid_t cpid;
1358 1359
1359 1360 cr = msg_getcred(mp, &cpid);
1360 1361 active = (spmsg->spd_msg_spdid ==
1361 1362 SPD_ACTIVE);
1362 1363 audit_pf_policy(SPD_FLIP, cr, ns,
1363 1364 ITP_NAME(itp), active,
1364 1365 ESRCH, cpid);
1365 1366 }
1366 1367 return;
1367 1368 }
1368 1369 spdsock_flip_node(itp, NULL, ns);
1369 1370 if (auditing) {
1370 1371 boolean_t active;
1371 1372 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1372 1373 cred_t *cr;
1373 1374 pid_t cpid;
1374 1375
1375 1376 cr = msg_getcred(mp, &cpid);
1376 1377 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1377 1378 audit_pf_policy(SPD_FLIP, cr, ns,
1378 1379 ITP_NAME(itp), active, 0, cpid);
1379 1380 }
1380 1381 ITP_REFRELE(itp, ns);
1381 1382 }
1382 1383 } else {
1383 1384 ipsec_swap_global_policy(ns); /* can't fail */
1384 1385 if (auditing) {
1385 1386 boolean_t active;
1386 1387 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1387 1388 cred_t *cr;
1388 1389 pid_t cpid;
1389 1390
1390 1391 cr = msg_getcred(mp, &cpid);
1391 1392 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1392 1393 audit_pf_policy(SPD_FLIP, cr,
1393 1394 ns, NULL, active, 0, cpid);
1394 1395 }
1395 1396 }
1396 1397 spd_echo(q, mp);
1397 1398 }
1398 1399
1399 1400 /*
1400 1401 * Unimplemented feature
1401 1402 */
1402 1403 /* ARGSUSED */
1403 1404 static void
1404 1405 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1405 1406 spd_ext_t **extv, ipsec_tun_pol_t *itp)
1406 1407 {
1407 1408 spdsock_error(q, mp, EINVAL, 0);
1408 1409 }
1409 1410
1410 1411
1411 1412 static mblk_t *
1412 1413 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1413 1414 uint32_t count, uint16_t error)
1414 1415 {
1415 1416 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1416 1417 spd_msg_t *msg;
1417 1418 spd_ruleset_ext_t *ruleset;
1418 1419 mblk_t *m = allocb(len, BPRI_HI);
1419 1420
1420 1421 ASSERT(RW_READ_HELD(&iph->iph_lock));
1421 1422
1422 1423 if (m == NULL) {
1423 1424 return (NULL);
1424 1425 }
1425 1426 msg = (spd_msg_t *)m->b_rptr;
1426 1427 ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1427 1428
1428 1429 m->b_wptr = (uint8_t *)&ruleset[1];
1429 1430
1430 1431 *msg = *(spd_msg_t *)(req->b_rptr);
1431 1432 msg->spd_msg_len = SPD_8TO64(len);
1432 1433 msg->spd_msg_errno = error;
1433 1434
1434 1435 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1435 1436 ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1436 1437 ruleset->spd_ruleset_count = count;
1437 1438 ruleset->spd_ruleset_version = iph->iph_gen;
1438 1439 return (m);
1439 1440 }
1440 1441
1441 1442 static mblk_t *
1442 1443 spdsock_dump_finish(spdsock_t *ss, int error)
1443 1444 {
1444 1445 mblk_t *m;
1445 1446 ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1446 1447 mblk_t *req = ss->spdsock_dump_req;
1447 1448 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1448 1449
1449 1450 rw_enter(&iph->iph_lock, RW_READER);
1450 1451 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1451 1452 rw_exit(&iph->iph_lock);
1452 1453 IPPH_REFRELE(iph, ns);
1453 1454 if (ss->spdsock_itp != NULL) {
1454 1455 ITP_REFRELE(ss->spdsock_itp, ns);
1455 1456 ss->spdsock_itp = NULL;
1456 1457 }
1457 1458 ss->spdsock_dump_req = NULL;
1458 1459 freemsg(req);
1459 1460
1460 1461 return (m);
1461 1462 }
1462 1463
1463 1464 /*
1464 1465 * Rule encoding functions.
1465 1466 * We do a two-pass encode.
1466 1467 * If base != NULL, fill in encoded rule part starting at base+offset.
1467 1468 * Always return "offset" plus length of to-be-encoded data.
1468 1469 */
1469 1470 static uint_t
1470 1471 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1471 1472 uint8_t type_end, uint8_t code, uint8_t code_end)
1472 1473 {
1473 1474 struct spd_typecode *tcp;
1474 1475
1475 1476 ASSERT(ALIGNED64(offset));
1476 1477
1477 1478 if (base != NULL) {
1478 1479 tcp = (struct spd_typecode *)(base + offset);
1479 1480 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1480 1481 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1481 1482 tcp->spd_typecode_code = code;
1482 1483 tcp->spd_typecode_type = type;
1483 1484 tcp->spd_typecode_type_end = type_end;
1484 1485 tcp->spd_typecode_code_end = code_end;
1485 1486 }
1486 1487 offset += sizeof (*tcp);
1487 1488
1488 1489 ASSERT(ALIGNED64(offset));
1489 1490
1490 1491 return (offset);
1491 1492 }
1492 1493
1493 1494 static uint_t
1494 1495 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1495 1496 {
1496 1497 struct spd_proto *spp;
1497 1498
1498 1499 ASSERT(ALIGNED64(offset));
1499 1500
1500 1501 if (base != NULL) {
1501 1502 spp = (struct spd_proto *)(base + offset);
1502 1503 spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1503 1504 spp->spd_proto_exttype = SPD_EXT_PROTO;
1504 1505 spp->spd_proto_number = proto;
1505 1506 spp->spd_proto_reserved1 = 0;
1506 1507 spp->spd_proto_reserved2 = 0;
1507 1508 }
1508 1509 offset += sizeof (*spp);
1509 1510
1510 1511 ASSERT(ALIGNED64(offset));
1511 1512
1512 1513 return (offset);
1513 1514 }
1514 1515
1515 1516 static uint_t
1516 1517 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1517 1518 {
1518 1519 struct spd_portrange *spp;
1519 1520
1520 1521 ASSERT(ALIGNED64(offset));
1521 1522
1522 1523 if (base != NULL) {
1523 1524 spp = (struct spd_portrange *)(base + offset);
1524 1525 spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1525 1526 spp->spd_ports_exttype = ext;
1526 1527 spp->spd_ports_minport = port;
1527 1528 spp->spd_ports_maxport = port;
1528 1529 }
1529 1530 offset += sizeof (*spp);
1530 1531
1531 1532 ASSERT(ALIGNED64(offset));
1532 1533
1533 1534 return (offset);
1534 1535 }
1535 1536
1536 1537 static uint_t
1537 1538 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1538 1539 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1539 1540 {
1540 1541 struct spd_address *sae;
1541 1542 ipsec_addr_t *spdaddr;
1542 1543 uint_t start = offset;
1543 1544 uint_t addrlen;
1544 1545 uint_t af;
1545 1546
1546 1547 if (sel->ipsl_valid & IPSL_IPV4) {
1547 1548 af = AF_INET;
1548 1549 addrlen = IP_ADDR_LEN;
1549 1550 } else {
1550 1551 af = AF_INET6;
1551 1552 addrlen = IPV6_ADDR_LEN;
1552 1553 }
1553 1554
1554 1555 ASSERT(ALIGNED64(offset));
1555 1556
1556 1557 if (base != NULL) {
1557 1558 sae = (struct spd_address *)(base + offset);
1558 1559 sae->spd_address_exttype = ext;
1559 1560 sae->spd_address_af = af;
1560 1561 sae->spd_address_prefixlen = pfxlen;
1561 1562 sae->spd_address_reserved2 = 0;
1562 1563
1563 1564 spdaddr = (ipsec_addr_t *)(&sae[1]);
1564 1565 bcopy(addr, spdaddr, addrlen);
1565 1566 }
1566 1567 offset += sizeof (*sae);
1567 1568 addrlen = roundup(addrlen, sizeof (uint64_t));
1568 1569 offset += addrlen;
1569 1570
1570 1571 ASSERT(ALIGNED64(offset));
1571 1572
1572 1573 if (base != NULL)
1573 1574 sae->spd_address_len = SPD_8TO64(offset - start);
1574 1575 return (offset);
1575 1576 }
1576 1577
1577 1578 static uint_t
1578 1579 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1579 1580 {
1580 1581 const ipsec_selkey_t *selkey = &sel->ipsl_key;
1581 1582
1582 1583 if (selkey->ipsl_valid & IPSL_PROTOCOL)
1583 1584 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1584 1585 if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1585 1586 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1586 1587 selkey->ipsl_lport);
1587 1588 if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1588 1589 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1589 1590 selkey->ipsl_rport);
1590 1591 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1591 1592 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1592 1593 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1593 1594 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1594 1595 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1595 1596 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1596 1597 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1597 1598 offset = spdsock_encode_typecode(base, offset,
1598 1599 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1599 1600 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1600 1601 selkey->ipsl_icmp_code : 255,
1601 1602 (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1602 1603 selkey->ipsl_icmp_code_end : 255);
1603 1604 }
1604 1605 return (offset);
1605 1606 }
1606 1607
1607 1608 static uint_t
1608 1609 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1609 1610 uint32_t value)
1610 1611 {
1611 1612 struct spd_attribute *attr;
1612 1613
1613 1614 ASSERT(ALIGNED64(offset));
1614 1615
1615 1616 if (base != NULL) {
1616 1617 attr = (struct spd_attribute *)(base + offset);
1617 1618 attr->spd_attr_tag = tag;
1618 1619 attr->spd_attr_value = value;
1619 1620 }
1620 1621 offset += sizeof (struct spd_attribute);
1621 1622
1622 1623 ASSERT(ALIGNED64(offset));
1623 1624
1624 1625 return (offset);
1625 1626 }
1626 1627
1627 1628
1628 1629 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1629 1630
1630 1631 static uint_t
1631 1632 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1632 1633 {
1633 1634 const struct ipsec_act *act = &(ap->ipa_act);
1634 1635 uint_t flags;
1635 1636
1636 1637 EMIT(SPD_ATTR_EMPTY, 0);
1637 1638 switch (act->ipa_type) {
1638 1639 case IPSEC_ACT_DISCARD:
1639 1640 case IPSEC_ACT_REJECT:
1640 1641 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1641 1642 break;
1642 1643 case IPSEC_ACT_BYPASS:
1643 1644 case IPSEC_ACT_CLEAR:
1644 1645 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1645 1646 break;
1646 1647
1647 1648 case IPSEC_ACT_APPLY:
1648 1649 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1649 1650 flags = 0;
1650 1651 if (act->ipa_apply.ipp_use_ah)
1651 1652 flags |= SPD_APPLY_AH;
1652 1653 if (act->ipa_apply.ipp_use_esp)
1653 1654 flags |= SPD_APPLY_ESP;
1654 1655 if (act->ipa_apply.ipp_use_espa)
1655 1656 flags |= SPD_APPLY_ESPA;
1656 1657 if (act->ipa_apply.ipp_use_se)
1657 1658 flags |= SPD_APPLY_SE;
1658 1659 if (act->ipa_apply.ipp_use_unique)
1659 1660 flags |= SPD_APPLY_UNIQUE;
1660 1661 EMIT(SPD_ATTR_FLAGS, flags);
1661 1662 if (flags & SPD_APPLY_AH) {
1662 1663 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1663 1664 EMIT(SPD_ATTR_AH_MINBITS,
1664 1665 act->ipa_apply.ipp_ah_minbits);
1665 1666 EMIT(SPD_ATTR_AH_MAXBITS,
1666 1667 act->ipa_apply.ipp_ah_maxbits);
1667 1668 }
1668 1669 if (flags & SPD_APPLY_ESP) {
1669 1670 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1670 1671 EMIT(SPD_ATTR_ENCR_MINBITS,
1671 1672 act->ipa_apply.ipp_espe_minbits);
1672 1673 EMIT(SPD_ATTR_ENCR_MAXBITS,
1673 1674 act->ipa_apply.ipp_espe_maxbits);
1674 1675 if (flags & SPD_APPLY_ESPA) {
1675 1676 EMIT(SPD_ATTR_ESP_AUTH,
1676 1677 act->ipa_apply.ipp_esp_auth_alg);
1677 1678 EMIT(SPD_ATTR_ESPA_MINBITS,
1678 1679 act->ipa_apply.ipp_espa_minbits);
1679 1680 EMIT(SPD_ATTR_ESPA_MAXBITS,
1680 1681 act->ipa_apply.ipp_espa_maxbits);
1681 1682 }
1682 1683 }
1683 1684 if (act->ipa_apply.ipp_km_proto != 0)
1684 1685 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1685 1686 if (act->ipa_apply.ipp_km_cookie != 0)
1686 1687 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1687 1688 if (act->ipa_apply.ipp_replay_depth != 0)
1688 1689 EMIT(SPD_ATTR_REPLAY_DEPTH,
1689 1690 act->ipa_apply.ipp_replay_depth);
1690 1691 /* Add more here */
1691 1692 break;
1692 1693 }
1693 1694
1694 1695 return (offset);
1695 1696 }
1696 1697
1697 1698 static uint_t
1698 1699 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1699 1700 const ipsec_action_t *ap)
1700 1701 {
1701 1702 struct spd_ext_actions *act;
1702 1703 uint_t nact = 0;
1703 1704 uint_t start = offset;
1704 1705
1705 1706 ASSERT(ALIGNED64(offset));
1706 1707
1707 1708 if (base != NULL) {
1708 1709 act = (struct spd_ext_actions *)(base + offset);
1709 1710 act->spd_actions_len = 0;
1710 1711 act->spd_actions_exttype = SPD_EXT_ACTION;
1711 1712 act->spd_actions_count = 0;
1712 1713 act->spd_actions_reserved = 0;
1713 1714 }
1714 1715
1715 1716 offset += sizeof (*act);
1716 1717
1717 1718 ASSERT(ALIGNED64(offset));
1718 1719
1719 1720 while (ap != NULL) {
1720 1721 offset = spdsock_encode_action(base, offset, ap);
1721 1722 ap = ap->ipa_next;
1722 1723 nact++;
1723 1724 if (ap != NULL) {
1724 1725 EMIT(SPD_ATTR_NEXT, 0);
1725 1726 }
1726 1727 }
1727 1728 EMIT(SPD_ATTR_END, 0);
1728 1729
1729 1730 ASSERT(ALIGNED64(offset));
1730 1731
1731 1732 if (base != NULL) {
1732 1733 act->spd_actions_count = nact;
1733 1734 act->spd_actions_len = SPD_8TO64(offset - start);
1734 1735 }
1735 1736
1736 1737 return (offset);
1737 1738 }
1738 1739
1739 1740 #undef EMIT
1740 1741
1741 1742 /* ARGSUSED */
1742 1743 static uint_t
1743 1744 spdsock_rule_flags(uint_t dir, uint_t af)
1744 1745 {
1745 1746 uint_t flags = 0;
1746 1747
1747 1748 if (dir == IPSEC_TYPE_INBOUND)
1748 1749 flags |= SPD_RULE_FLAG_INBOUND;
1749 1750 if (dir == IPSEC_TYPE_OUTBOUND)
1750 1751 flags |= SPD_RULE_FLAG_OUTBOUND;
1751 1752
1752 1753 return (flags);
1753 1754 }
1754 1755
1755 1756
1756 1757 static uint_t
1757 1758 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1758 1759 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1759 1760 boolean_t tunnel)
1760 1761 {
1761 1762 struct spd_msg *spmsg;
1762 1763 struct spd_rule *spr;
1763 1764 spd_if_t *sid;
1764 1765
1765 1766 uint_t start = offset;
1766 1767
1767 1768 ASSERT(ALIGNED64(offset));
1768 1769
1769 1770 if (base != NULL) {
1770 1771 spmsg = (struct spd_msg *)(base + offset);
1771 1772 bzero(spmsg, sizeof (*spmsg));
1772 1773 spmsg->spd_msg_version = PF_POLICY_V1;
1773 1774 spmsg->spd_msg_type = SPD_DUMP;
1774 1775 spmsg->spd_msg_seq = req->spd_msg_seq;
1775 1776 spmsg->spd_msg_pid = req->spd_msg_pid;
1776 1777 }
1777 1778 offset += sizeof (struct spd_msg);
1778 1779
1779 1780 ASSERT(ALIGNED64(offset));
1780 1781
1781 1782 if (base != NULL) {
1782 1783 spr = (struct spd_rule *)(base + offset);
1783 1784 spr->spd_rule_type = SPD_EXT_RULE;
1784 1785 spr->spd_rule_priority = rule->ipsp_prio;
1785 1786 spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1786 1787 if (tunnel)
1787 1788 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1788 1789 spr->spd_rule_unused = 0;
1789 1790 spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1790 1791 spr->spd_rule_index = rule->ipsp_index;
1791 1792 }
1792 1793 offset += sizeof (struct spd_rule);
1793 1794
1794 1795 /*
1795 1796 * If we have an interface name (i.e. if this policy head came from
1796 1797 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1797 1798 */
1798 1799 if (name != NULL) {
1799 1800
1800 1801 ASSERT(ALIGNED64(offset));
1801 1802
1802 1803 if (base != NULL) {
1803 1804 sid = (spd_if_t *)(base + offset);
1804 1805 sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1805 1806 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1806 1807 roundup((strlen(name) - 4), 8));
1807 1808 (void) strlcpy((char *)sid->spd_if_name, name,
1808 1809 LIFNAMSIZ);
1809 1810 }
1810 1811
1811 1812 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1812 1813 }
1813 1814
1814 1815 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1815 1816 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1816 1817
1817 1818 ASSERT(ALIGNED64(offset));
1818 1819
1819 1820 if (base != NULL) {
1820 1821 spmsg->spd_msg_len = SPD_8TO64(offset - start);
1821 1822 }
1822 1823 return (offset);
1823 1824 }
1824 1825
1825 1826 /* ARGSUSED */
1826 1827 static mblk_t *
1827 1828 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1828 1829 uint_t dir, uint_t af, char *name, boolean_t tunnel)
1829 1830 {
1830 1831 mblk_t *m;
1831 1832 uint_t len;
1832 1833 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1833 1834
1834 1835 /*
1835 1836 * Figure out how much space we'll need.
1836 1837 */
1837 1838 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1838 1839 tunnel);
1839 1840
1840 1841 /*
1841 1842 * Allocate mblk.
1842 1843 */
1843 1844 m = allocb(len, BPRI_HI);
1844 1845 if (m == NULL)
1845 1846 return (NULL);
1846 1847
1847 1848 /*
1848 1849 * Fill it in..
1849 1850 */
1850 1851 m->b_wptr = m->b_rptr + len;
1851 1852 bzero(m->b_rptr, len);
1852 1853 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1853 1854 name, tunnel);
1854 1855 return (m);
1855 1856 }
1856 1857
1857 1858 static ipsec_policy_t *
1858 1859 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1859 1860 ipsec_policy_t *cur)
1860 1861 {
1861 1862 ASSERT(RW_READ_HELD(&iph->iph_lock));
1862 1863
1863 1864 ss->spdsock_dump_count++;
1864 1865 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1865 1866 return (cur);
1866 1867 }
1867 1868
1868 1869 static ipsec_policy_t *
1869 1870 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1870 1871 {
1871 1872 ipsec_policy_t *cur;
1872 1873 ipsec_policy_root_t *ipr;
1873 1874 int chain, nchains, type, af;
1874 1875
1875 1876 ASSERT(RW_READ_HELD(&iph->iph_lock));
1876 1877
1877 1878 cur = ss->spdsock_dump_cur_rule;
1878 1879
1879 1880 if (cur != NULL)
1880 1881 return (spdsock_dump_next_in_chain(ss, iph, cur));
1881 1882
1882 1883 type = ss->spdsock_dump_cur_type;
1883 1884
1884 1885 next:
1885 1886 chain = ss->spdsock_dump_cur_chain;
1886 1887 ipr = &iph->iph_root[type];
1887 1888 nchains = ipr->ipr_nchains;
1888 1889
1889 1890 while (chain < nchains) {
1890 1891 cur = ipr->ipr_hash[chain].hash_head;
1891 1892 chain++;
1892 1893 if (cur != NULL) {
1893 1894 ss->spdsock_dump_cur_chain = chain;
1894 1895 return (spdsock_dump_next_in_chain(ss, iph, cur));
1895 1896 }
1896 1897 }
1897 1898 ss->spdsock_dump_cur_chain = nchains;
1898 1899
1899 1900 af = ss->spdsock_dump_cur_af;
1900 1901 while (af < IPSEC_NAF) {
1901 1902 cur = ipr->ipr_nonhash[af];
1902 1903 af++;
1903 1904 if (cur != NULL) {
1904 1905 ss->spdsock_dump_cur_af = af;
1905 1906 return (spdsock_dump_next_in_chain(ss, iph, cur));
1906 1907 }
1907 1908 }
1908 1909
1909 1910 type++;
1910 1911 if (type >= IPSEC_NTYPES)
1911 1912 return (NULL);
1912 1913
1913 1914 ss->spdsock_dump_cur_chain = 0;
1914 1915 ss->spdsock_dump_cur_type = type;
1915 1916 ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1916 1917 goto next;
1917 1918
1918 1919 }
1919 1920
1920 1921 /*
1921 1922 * If we're done with one policy head, but have more to go, we iterate through
1922 1923 * another IPsec tunnel policy head (itp). Return NULL if it is an error
1923 1924 * worthy of returning EAGAIN via PF_POLICY.
1924 1925 */
1925 1926 static ipsec_tun_pol_t *
1926 1927 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1927 1928 {
1928 1929 ipsec_tun_pol_t *itp;
1929 1930
1930 1931 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1931 1932 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1932 1933 /* Oops, state of the tunnel polheads changed. */
1933 1934 itp = NULL;
1934 1935 } else if (ss->spdsock_itp == NULL) {
1935 1936 /* Just finished global, find first node. */
1936 1937 itp = avl_first(&ipss->ipsec_tunnel_policies);
1937 1938 } else {
1938 1939 /* We just finished current polhead, find the next one. */
1939 1940 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1940 1941 }
1941 1942 if (itp != NULL) {
1942 1943 ITP_REFHOLD(itp);
1943 1944 }
1944 1945 if (ss->spdsock_itp != NULL) {
1945 1946 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1946 1947 }
1947 1948 ss->spdsock_itp = itp;
1948 1949 return (itp);
1949 1950 }
1950 1951
1951 1952 static mblk_t *
1952 1953 spdsock_dump_next_record(spdsock_t *ss)
1953 1954 {
1954 1955 ipsec_policy_head_t *iph;
1955 1956 ipsec_policy_t *rule;
1956 1957 mblk_t *m;
1957 1958 ipsec_tun_pol_t *itp;
1958 1959 netstack_t *ns = ss->spdsock_spds->spds_netstack;
1959 1960 ipsec_stack_t *ipss = ns->netstack_ipsec;
1960 1961
1961 1962 iph = ss->spdsock_dump_head;
1962 1963
1963 1964 ASSERT(iph != NULL);
1964 1965
1965 1966 rw_enter(&iph->iph_lock, RW_READER);
1966 1967
1967 1968 if (iph->iph_gen != ss->spdsock_dump_gen) {
1968 1969 rw_exit(&iph->iph_lock);
1969 1970 return (spdsock_dump_finish(ss, EAGAIN));
1970 1971 }
1971 1972
1972 1973 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1973 1974 rw_exit(&iph->iph_lock);
1974 1975 if (--(ss->spdsock_dump_remaining_polheads) == 0)
1975 1976 return (spdsock_dump_finish(ss, 0));
1976 1977
1977 1978
1978 1979 /*
1979 1980 * If we reach here, we have more policy heads (tunnel
1980 1981 * entries) to dump. Let's reset to a new policy head
1981 1982 * and get some more rules.
1982 1983 *
1983 1984 * An empty policy head will have spdsock_dump_next_rule()
1984 1985 * return NULL, and we loop (while dropping the number of
1985 1986 * remaining polheads). If we loop to 0, we finish. We
1986 1987 * keep looping until we hit 0 or until we have a rule to
1987 1988 * encode.
1988 1989 *
1989 1990 * NOTE: No need for ITP_REF*() macros here as we're only
1990 1991 * going after and refholding the policy head itself.
1991 1992 */
1992 1993 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1993 1994 itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1994 1995 if (itp == NULL) {
1995 1996 rw_exit(&ipss->ipsec_tunnel_policy_lock);
1996 1997 return (spdsock_dump_finish(ss, EAGAIN));
1997 1998 }
1998 1999
1999 2000 /* Reset other spdsock_dump thingies. */
2000 2001 IPPH_REFRELE(ss->spdsock_dump_head, ns);
2001 2002 if (ss->spdsock_dump_active) {
2002 2003 ss->spdsock_dump_tunnel =
2003 2004 itp->itp_flags & ITPF_P_TUNNEL;
2004 2005 iph = itp->itp_policy;
2005 2006 } else {
2006 2007 ss->spdsock_dump_tunnel =
2007 2008 itp->itp_flags & ITPF_I_TUNNEL;
2008 2009 iph = itp->itp_inactive;
2009 2010 }
2010 2011 IPPH_REFHOLD(iph);
2011 2012 rw_exit(&ipss->ipsec_tunnel_policy_lock);
2012 2013
2013 2014 rw_enter(&iph->iph_lock, RW_READER);
2014 2015 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2015 2016 }
2016 2017
2017 2018 m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
2018 2019 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
2019 2020 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
2020 2021 ss->spdsock_dump_tunnel);
2021 2022 rw_exit(&iph->iph_lock);
2022 2023
2023 2024 if (m == NULL)
2024 2025 return (spdsock_dump_finish(ss, ENOMEM));
2025 2026 return (m);
2026 2027 }
2027 2028
2028 2029 /*
2029 2030 * Dump records until we run into flow-control back-pressure.
2030 2031 */
2031 2032 static void
2032 2033 spdsock_dump_some(queue_t *q, spdsock_t *ss)
2033 2034 {
2034 2035 mblk_t *m, *dataind;
2035 2036
2036 2037 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
2037 2038 m = spdsock_dump_next_record(ss);
2038 2039 if (m == NULL)
2039 2040 return;
2040 2041 dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
2041 2042 if (dataind == NULL) {
2042 2043 freemsg(m);
2043 2044 return;
2044 2045 }
2045 2046 dataind->b_cont = m;
2046 2047 dataind->b_wptr += sizeof (struct T_data_req);
2047 2048 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
2048 2049 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
2049 2050 dataind->b_datap->db_type = M_PROTO;
2050 2051 putnext(q, dataind);
2051 2052 }
2052 2053 }
2053 2054
2054 2055 /*
2055 2056 * Start dumping.
2056 2057 * Format a start-of-dump record, and set up the stream and kick the rsrv
2057 2058 * procedure to continue the job..
2058 2059 */
2059 2060 /* ARGSUSED */
2060 2061 static void
2061 2062 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
2062 2063 {
2063 2064 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2064 2065 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2065 2066 ipsec_stack_t *ipss = ns->netstack_ipsec;
2066 2067 mblk_t *mr;
2067 2068
2068 2069 /* spdsock_open() already set spdsock_itp to NULL. */
2069 2070 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2070 2071 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
2071 2072 ss->spdsock_dump_remaining_polheads = 1 +
2072 2073 avl_numnodes(&ipss->ipsec_tunnel_policies);
2073 2074 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2074 2075 rw_exit(&ipss->ipsec_tunnel_policy_lock);
2075 2076 if (iph == ALL_ACTIVE_POLHEADS) {
2076 2077 iph = ipsec_system_policy(ns);
2077 2078 ss->spdsock_dump_active = B_TRUE;
2078 2079 } else {
2079 2080 iph = ipsec_inactive_policy(ns);
2080 2081 ss->spdsock_dump_active = B_FALSE;
2081 2082 }
2082 2083 ASSERT(ss->spdsock_itp == NULL);
2083 2084 } else {
2084 2085 ss->spdsock_dump_remaining_polheads = 1;
2085 2086 }
2086 2087
2087 2088 rw_enter(&iph->iph_lock, RW_READER);
2088 2089
2089 2090 mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2090 2091
2091 2092 if (!mr) {
2092 2093 rw_exit(&iph->iph_lock);
2093 2094 spdsock_error(q, mp, ENOMEM, 0);
2094 2095 return;
2095 2096 }
2096 2097
2097 2098 ss->spdsock_dump_req = mp;
2098 2099 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2099 2100
2100 2101 rw_exit(&iph->iph_lock);
2101 2102
2102 2103 qreply(q, mr);
2103 2104 qenable(OTHERQ(q));
2104 2105 }
2105 2106
2106 2107 /* Do NOT consume a reference to ITP. */
2107 2108 void
2108 2109 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2109 2110 {
2110 2111 int *errptr = (int *)ep;
2111 2112
2112 2113 if (*errptr != 0)
2113 2114 return; /* We've failed already for some reason. */
2114 2115 mutex_enter(&itp->itp_lock);
2115 2116 ITPF_CLONE(itp->itp_flags);
2116 2117 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2117 2118 mutex_exit(&itp->itp_lock);
2118 2119 }
2119 2120
2120 2121 void
2121 2122 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2122 2123 {
2123 2124 int error;
2124 2125 char *tname;
2125 2126 ipsec_tun_pol_t *itp;
2126 2127 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2127 2128 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2128 2129 uint32_t auditing = AU_AUDITING();
2129 2130
2130 2131 if (tunname != NULL) {
2131 2132 tname = (char *)tunname->spd_if_name;
2132 2133 if (*tname == '\0') {
2133 2134 error = ipsec_clone_system_policy(ns);
2134 2135 if (auditing) {
2135 2136 boolean_t active;
2136 2137 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2137 2138 cred_t *cr;
2138 2139 pid_t cpid;
2139 2140
2140 2141 cr = msg_getcred(mp, &cpid);
2141 2142 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2142 2143 audit_pf_policy(SPD_CLONE, cr, ns,
2143 2144 NULL, active, error, cpid);
2144 2145 }
2145 2146 if (error == 0) {
2146 2147 itp_walk(spdsock_clone_node, &error, ns);
2147 2148 if (auditing) {
2148 2149 boolean_t active;
2149 2150 spd_msg_t *spmsg =
2150 2151 (spd_msg_t *)mp->b_rptr;
2151 2152 cred_t *cr;
2152 2153 pid_t cpid;
2153 2154
2154 2155 cr = msg_getcred(mp, &cpid);
2155 2156 active = (spmsg->spd_msg_spdid ==
2156 2157 SPD_ACTIVE);
2157 2158 audit_pf_policy(SPD_CLONE, cr,
2158 2159 ns, "all tunnels", active, 0,
2159 2160 cpid);
2160 2161 }
2161 2162 }
2162 2163 } else {
2163 2164 itp = get_tunnel_policy(tname, ns);
2164 2165 if (itp == NULL) {
2165 2166 spdsock_error(q, mp, ENOENT, 0);
2166 2167 if (auditing) {
2167 2168 boolean_t active;
2168 2169 spd_msg_t *spmsg =
2169 2170 (spd_msg_t *)mp->b_rptr;
2170 2171 cred_t *cr;
2171 2172 pid_t cpid;
2172 2173
2173 2174 cr = msg_getcred(mp, &cpid);
2174 2175 active = (spmsg->spd_msg_spdid ==
2175 2176 SPD_ACTIVE);
2176 2177 audit_pf_policy(SPD_CLONE, cr,
2177 2178 ns, NULL, active, ENOENT, cpid);
2178 2179 }
2179 2180 return;
2180 2181 }
2181 2182 spdsock_clone_node(itp, &error, NULL);
2182 2183 if (auditing) {
2183 2184 boolean_t active;
2184 2185 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2185 2186 cred_t *cr;
2186 2187 pid_t cpid;
2187 2188
2188 2189 cr = msg_getcred(mp, &cpid);
2189 2190 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2190 2191 audit_pf_policy(SPD_CLONE, cr, ns,
2191 2192 ITP_NAME(itp), active, error, cpid);
2192 2193 }
2193 2194 ITP_REFRELE(itp, ns);
2194 2195 }
2195 2196 } else {
2196 2197 error = ipsec_clone_system_policy(ns);
2197 2198 if (auditing) {
2198 2199 boolean_t active;
2199 2200 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2200 2201 cred_t *cr;
2201 2202 pid_t cpid;
2202 2203
2203 2204 cr = msg_getcred(mp, &cpid);
2204 2205 active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2205 2206 audit_pf_policy(SPD_CLONE, cr, ns, NULL,
2206 2207 active, error, cpid);
2207 2208 }
2208 2209 }
2209 2210
2210 2211 if (error != 0)
2211 2212 spdsock_error(q, mp, error, 0);
2212 2213 else
2213 2214 spd_echo(q, mp);
2214 2215 }
2215 2216
2216 2217 /*
2217 2218 * Process a SPD_ALGLIST request. The caller expects separate alg entries
2218 2219 * for AH authentication, ESP authentication, and ESP encryption.
2219 2220 * The same distinction is then used when setting the min and max key
2220 2221 * sizes when defining policies.
2221 2222 */
2222 2223
2223 2224 #define SPDSOCK_AH_AUTH 0
2224 2225 #define SPDSOCK_ESP_AUTH 1
2225 2226 #define SPDSOCK_ESP_ENCR 2
2226 2227 #define SPDSOCK_NTYPES 3
2227 2228
2228 2229 static const uint_t algattr[SPDSOCK_NTYPES] = {
2229 2230 SPD_ATTR_AH_AUTH,
2230 2231 SPD_ATTR_ESP_AUTH,
2231 2232 SPD_ATTR_ESP_ENCR
2232 2233 };
2233 2234 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2234 2235 SPD_ATTR_AH_MINBITS,
2235 2236 SPD_ATTR_ESPA_MINBITS,
2236 2237 SPD_ATTR_ENCR_MINBITS
2237 2238 };
2238 2239 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2239 2240 SPD_ATTR_AH_MAXBITS,
2240 2241 SPD_ATTR_ESPA_MAXBITS,
2241 2242 SPD_ATTR_ENCR_MAXBITS
2242 2243 };
2243 2244 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2244 2245 SPD_ATTR_AH_DEFBITS,
2245 2246 SPD_ATTR_ESPA_DEFBITS,
2246 2247 SPD_ATTR_ENCR_DEFBITS
2247 2248 };
2248 2249 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2249 2250 SPD_ATTR_AH_INCRBITS,
2250 2251 SPD_ATTR_ESPA_INCRBITS,
2251 2252 SPD_ATTR_ENCR_INCRBITS
2252 2253 };
2253 2254
2254 2255 #define ATTRPERALG 6 /* fixed attributes per algs */
2255 2256
2256 2257 void
2257 2258 spdsock_alglist(queue_t *q, mblk_t *mp)
2258 2259 {
2259 2260 uint_t algtype;
2260 2261 uint_t algidx;
|
↓ open down ↓ |
2228 lines elided |
↑ open up ↑ |
2261 2262 uint_t algcount;
2262 2263 uint_t size;
2263 2264 mblk_t *m;
2264 2265 uint8_t *cur;
2265 2266 spd_msg_t *msg;
2266 2267 struct spd_ext_actions *act;
2267 2268 struct spd_attribute *attr;
2268 2269 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2269 2270 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2270 2271
2271 - mutex_enter(&ipss->ipsec_alg_lock);
2272 + rw_enter(&ipss->ipsec_alg_lock, RW_READER);
2272 2273 /*
2273 2274 * The SPD client expects to receive separate entries for
2274 2275 * AH authentication and ESP authentication supported algorithms.
2275 2276 *
2276 2277 * Don't return the "any" algorithms, if defined, as no
2277 2278 * kernel policies can be set for these algorithms.
2278 2279 */
2279 2280 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2280 2281 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2281 2282
2282 2283 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2283 2284 algcount--;
2284 2285 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2285 2286 algcount--;
2286 2287
2287 2288 /*
2288 2289 * For each algorithm, we encode:
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2289 2290 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2290 2291 */
2291 2292
2292 2293 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2293 2294 ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2294 2295
2295 2296 ASSERT(ALIGNED64(size));
2296 2297
2297 2298 m = allocb(size, BPRI_HI);
2298 2299 if (m == NULL) {
2299 - mutex_exit(&ipss->ipsec_alg_lock);
2300 + rw_exit(&ipss->ipsec_alg_lock);
2300 2301 spdsock_error(q, mp, ENOMEM, 0);
2301 2302 return;
2302 2303 }
2303 2304
2304 2305 m->b_wptr = m->b_rptr + size;
2305 2306 cur = m->b_rptr;
2306 2307
2307 2308 msg = (spd_msg_t *)cur;
2308 2309 bcopy(mp->b_rptr, cur, sizeof (*msg));
2309 2310
2310 2311 msg->spd_msg_len = SPD_8TO64(size);
2311 2312 msg->spd_msg_errno = 0;
2312 2313 msg->spd_msg_diagnostic = 0;
2313 2314
2314 2315 cur += sizeof (*msg);
2315 2316
2316 2317 act = (struct spd_ext_actions *)cur;
2317 2318 cur += sizeof (*act);
2318 2319
2319 2320 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2320 2321 act->spd_actions_exttype = SPD_EXT_ACTION;
2321 2322 act->spd_actions_count = algcount;
2322 2323 act->spd_actions_reserved = 0;
2323 2324
2324 2325 attr = (struct spd_attribute *)cur;
2325 2326
2326 2327 #define EMIT(tag, value) { \
2327 2328 attr->spd_attr_tag = (tag); \
2328 2329 attr->spd_attr_value = (value); \
2329 2330 attr++; \
2330 2331 }
2331 2332
2332 2333 /*
2333 2334 * If you change the number of EMIT's here, change
2334 2335 * ATTRPERALG above to match
2335 2336 */
2336 2337 #define EMITALGATTRS(_type) { \
2337 2338 EMIT(algattr[_type], algid); /* 1 */ \
2338 2339 EMIT(minbitsattr[_type], minbits); /* 2 */ \
2339 2340 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \
2340 2341 EMIT(defbitsattr[_type], defbits); /* 4 */ \
2341 2342 EMIT(incrbitsattr[_type], incr); /* 5 */ \
2342 2343 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \
2343 2344 }
2344 2345
2345 2346 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2346 2347 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2347 2348 algidx++) {
2348 2349 int algid = ipss->ipsec_sortlist[algtype][algidx];
2349 2350 ipsec_alginfo_t *alg =
2350 2351 ipss->ipsec_alglists[algtype][algid];
2351 2352 uint_t minbits = alg->alg_minbits;
2352 2353 uint_t maxbits = alg->alg_maxbits;
2353 2354 uint_t defbits = alg->alg_default_bits;
2354 2355 uint_t incr = alg->alg_increment;
2355 2356
2356 2357 if (algtype == IPSEC_ALG_AUTH) {
2357 2358 if (algid == SADB_AALG_NONE)
2358 2359 continue;
2359 2360 EMITALGATTRS(SPDSOCK_AH_AUTH);
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
2360 2361 EMITALGATTRS(SPDSOCK_ESP_AUTH);
2361 2362 } else {
2362 2363 if (algid == SADB_EALG_NONE)
2363 2364 continue;
2364 2365 ASSERT(algtype == IPSEC_ALG_ENCR);
2365 2366 EMITALGATTRS(SPDSOCK_ESP_ENCR);
2366 2367 }
2367 2368 }
2368 2369 }
2369 2370
2370 - mutex_exit(&ipss->ipsec_alg_lock);
2371 + rw_exit(&ipss->ipsec_alg_lock);
2371 2372
2372 2373 #undef EMITALGATTRS
2373 2374 #undef EMIT
2374 2375 #undef ATTRPERALG
2375 2376
2376 2377 attr--;
2377 2378 attr->spd_attr_tag = SPD_ATTR_END;
2378 2379
2379 2380 freemsg(mp);
2380 2381 qreply(q, m);
2381 2382 }
2382 2383
2383 2384 /*
2384 2385 * Process a SPD_DUMPALGS request.
2385 2386 */
2386 2387
2387 2388 #define ATTRPERALG 9 /* fixed attributes per algs */
2388 2389
2389 2390 void
2390 2391 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2391 2392 {
2392 2393 uint_t algtype;
2393 2394 uint_t algidx;
2394 2395 uint_t size;
2395 2396 mblk_t *m;
2396 2397 uint8_t *cur;
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2397 2398 spd_msg_t *msg;
2398 2399 struct spd_ext_actions *act;
2399 2400 struct spd_attribute *attr;
2400 2401 ipsec_alginfo_t *alg;
2401 2402 uint_t algid;
2402 2403 uint_t i;
2403 2404 uint_t alg_size;
2404 2405 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2405 2406 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2406 2407
2407 - mutex_enter(&ipss->ipsec_alg_lock);
2408 + rw_enter(&ipss->ipsec_alg_lock, RW_READER);
2408 2409
2409 2410 /*
2410 2411 * For each algorithm, we encode:
2411 2412 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2412 2413 *
2413 2414 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2414 2415 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_NPARAMS / ALG_PARAMS* /
2415 2416 * ALG_MECHNAME / ALG_FLAGS / {END, NEXT}
2416 2417 */
2417 2418
2418 2419 /*
2419 2420 * Compute the size of the SPD message.
2420 2421 */
2421 2422 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2422 2423
2423 2424 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2424 2425 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2425 2426 algidx++) {
2426 2427 algid = ipss->ipsec_sortlist[algtype][algidx];
2427 2428 alg = ipss->ipsec_alglists[algtype][algid];
2428 2429 alg_size = sizeof (struct spd_attribute) *
2429 2430 (ATTRPERALG + alg->alg_nkey_sizes +
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
2430 2431 alg->alg_nblock_sizes + alg->alg_nparams) +
2431 2432 CRYPTO_MAX_MECH_NAME;
2432 2433 size += alg_size;
2433 2434 }
2434 2435 }
2435 2436
2436 2437 ASSERT(ALIGNED64(size));
2437 2438
2438 2439 m = allocb(size, BPRI_HI);
2439 2440 if (m == NULL) {
2440 - mutex_exit(&ipss->ipsec_alg_lock);
2441 + rw_exit(&ipss->ipsec_alg_lock);
2441 2442 spdsock_error(q, mp, ENOMEM, 0);
2442 2443 return;
2443 2444 }
2444 2445
2445 2446 m->b_wptr = m->b_rptr + size;
2446 2447 cur = m->b_rptr;
2447 2448
2448 2449 msg = (spd_msg_t *)cur;
2449 2450 bcopy(mp->b_rptr, cur, sizeof (*msg));
2450 2451
2451 2452 msg->spd_msg_len = SPD_8TO64(size);
2452 2453 msg->spd_msg_errno = 0;
2453 2454 msg->spd_msg_type = SPD_ALGLIST;
2454 2455
2455 2456 msg->spd_msg_diagnostic = 0;
2456 2457
2457 2458 cur += sizeof (*msg);
2458 2459
2459 2460 act = (struct spd_ext_actions *)cur;
2460 2461 cur += sizeof (*act);
2461 2462
2462 2463 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2463 2464 act->spd_actions_exttype = SPD_EXT_ACTION;
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2464 2465 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2465 2466 ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2466 2467 act->spd_actions_reserved = 0;
2467 2468
2468 2469 /*
2469 2470 * If there aren't any algorithms registered, return an empty message.
2470 2471 * spdsock_get_ext() knows how to deal with this.
2471 2472 */
2472 2473 if (act->spd_actions_count == 0) {
2473 2474 act->spd_actions_len = 0;
2474 - mutex_exit(&ipss->ipsec_alg_lock);
2475 + rw_exit(&ipss->ipsec_alg_lock);
2475 2476 goto error;
2476 2477 }
2477 2478
2478 2479 attr = (struct spd_attribute *)cur;
2479 2480
2480 2481 #define EMIT(tag, value) { \
2481 2482 attr->spd_attr_tag = (tag); \
2482 2483 attr->spd_attr_value = (value); \
2483 2484 attr++; \
2484 2485 }
2485 2486
2486 2487 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2487 2488 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2488 2489 algidx++) {
2489 2490
2490 2491 algid = ipss->ipsec_sortlist[algtype][algidx];
2491 2492 alg = ipss->ipsec_alglists[algtype][algid];
2492 2493
2493 2494 /*
2494 2495 * If you change the number of EMIT's here, change
2495 2496 * ATTRPERALG above to match
2496 2497 */
2497 2498 EMIT(SPD_ATTR_ALG_ID, algid);
2498 2499 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2499 2500 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2500 2501 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2501 2502 for (i = 0; i < alg->alg_nkey_sizes; i++)
2502 2503 EMIT(SPD_ATTR_ALG_KEYSIZE,
2503 2504 alg->alg_key_sizes[i]);
2504 2505
2505 2506 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2506 2507 for (i = 0; i < alg->alg_nblock_sizes; i++)
2507 2508 EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2508 2509 alg->alg_block_sizes[i]);
2509 2510
2510 2511 EMIT(SPD_ATTR_ALG_NPARAMS, alg->alg_nparams);
2511 2512 for (i = 0; i < alg->alg_nparams; i++)
2512 2513 EMIT(SPD_ATTR_ALG_PARAMS,
2513 2514 alg->alg_params[i]);
2514 2515
2515 2516 EMIT(SPD_ATTR_ALG_FLAGS, alg->alg_flags);
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
2516 2517
2517 2518 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2518 2519 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2519 2520 attr = (struct spd_attribute *)((char *)attr +
2520 2521 CRYPTO_MAX_MECH_NAME);
2521 2522
2522 2523 EMIT(SPD_ATTR_NEXT, 0);
2523 2524 }
2524 2525 }
2525 2526
2526 - mutex_exit(&ipss->ipsec_alg_lock);
2527 + rw_exit(&ipss->ipsec_alg_lock);
2527 2528
2528 2529 #undef EMITALGATTRS
2529 2530 #undef EMIT
2530 2531 #undef ATTRPERALG
2531 2532
2532 2533 attr--;
2533 2534 attr->spd_attr_tag = SPD_ATTR_END;
2534 2535
2535 2536 error:
2536 2537 freemsg(mp);
2537 2538 qreply(q, m);
2538 2539 }
2539 2540
2540 2541 /*
2541 2542 * Do the actual work of processing an SPD_UPDATEALGS request. Can
2542 2543 * be invoked either once IPsec is loaded on a cached request, or
2543 2544 * when a request is received while IPsec is loaded.
2544 2545 */
2545 2546 static int
2546 2547 spdsock_do_updatealg(spd_ext_t *extv[], spd_stack_t *spds)
2547 2548 {
2548 2549 struct spd_ext_actions *actp;
2549 2550 struct spd_attribute *attr, *endattr;
2550 2551 uint64_t *start, *end;
2551 2552 ipsec_alginfo_t *alg = NULL;
2552 2553 ipsec_algtype_t alg_type = 0;
2553 2554 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2554 2555 uint_t i, cur_key, cur_block, algid;
2555 2556 int diag = -1;
2556 2557
2557 2558 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2558 2559
2559 2560 /* parse the message, building the list of algorithms */
2560 2561
2561 2562 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2562 2563 if (actp == NULL)
2563 2564 return (SPD_DIAGNOSTIC_NO_ACTION_EXT);
2564 2565
2565 2566 start = (uint64_t *)actp;
2566 2567 end = (start + actp->spd_actions_len);
2567 2568 endattr = (struct spd_attribute *)end;
2568 2569 attr = (struct spd_attribute *)&actp[1];
2569 2570
2570 2571 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2571 2572 sizeof (ipsec_alginfo_t *));
2572 2573
2573 2574 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2574 2575
2575 2576 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2576 2577 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2577 2578 #define ALG_PARAM_SIZES(a) (((a)->alg_nparams + 1) * sizeof (uint16_t))
2578 2579
2579 2580 while (attr < endattr) {
2580 2581 switch (attr->spd_attr_tag) {
2581 2582 case SPD_ATTR_NOP:
2582 2583 case SPD_ATTR_EMPTY:
2583 2584 break;
2584 2585 case SPD_ATTR_END:
2585 2586 attr = endattr;
2586 2587 /* FALLTHRU */
2587 2588 case SPD_ATTR_NEXT:
2588 2589 if (doing_proto) {
2589 2590 doing_proto = B_FALSE;
2590 2591 break;
2591 2592 }
2592 2593 if (skip_alg) {
2593 2594 ipsec_alg_free(alg);
2594 2595 } else {
2595 2596 ipsec_alg_free(
2596 2597 spds->spds_algs[alg_type][alg->alg_id]);
2597 2598 spds->spds_algs[alg_type][alg->alg_id] =
2598 2599 alg;
2599 2600 }
2600 2601 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2601 2602 break;
2602 2603
2603 2604 case SPD_ATTR_ALG_ID:
2604 2605 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2605 2606 ss1dbg(spds, ("spdsock_do_updatealg: "
2606 2607 "invalid alg id %d\n",
2607 2608 attr->spd_attr_value));
2608 2609 diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2609 2610 goto bail;
2610 2611 }
2611 2612 alg->alg_id = attr->spd_attr_value;
2612 2613 break;
2613 2614
2614 2615 case SPD_ATTR_ALG_PROTO:
2615 2616 /* find the alg type */
2616 2617 for (i = 0; i < NALGPROTOS; i++)
2617 2618 if (algproto[i] == attr->spd_attr_value)
2618 2619 break;
2619 2620 skip_alg = (i == NALGPROTOS);
2620 2621 if (!skip_alg)
2621 2622 alg_type = i;
2622 2623 break;
2623 2624
2624 2625 case SPD_ATTR_ALG_INCRBITS:
2625 2626 alg->alg_increment = attr->spd_attr_value;
2626 2627 break;
2627 2628
2628 2629 case SPD_ATTR_ALG_NKEYSIZES:
2629 2630 if (alg->alg_key_sizes != NULL) {
2630 2631 kmem_free(alg->alg_key_sizes,
2631 2632 ALG_KEY_SIZES(alg));
2632 2633 }
2633 2634 alg->alg_nkey_sizes = attr->spd_attr_value;
2634 2635 /*
2635 2636 * Allocate room for the trailing zero key size
2636 2637 * value as well.
2637 2638 */
2638 2639 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2639 2640 KM_SLEEP);
2640 2641 cur_key = 0;
2641 2642 break;
2642 2643
2643 2644 case SPD_ATTR_ALG_KEYSIZE:
2644 2645 if (alg->alg_key_sizes == NULL ||
2645 2646 cur_key >= alg->alg_nkey_sizes) {
2646 2647 ss1dbg(spds, ("spdsock_do_updatealg: "
2647 2648 "too many key sizes\n"));
2648 2649 diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2649 2650 goto bail;
2650 2651 }
2651 2652 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2652 2653 break;
2653 2654
2654 2655 case SPD_ATTR_ALG_FLAGS:
2655 2656 /*
2656 2657 * Flags (bit mask). The alg_flags element of
2657 2658 * ipsecalg_flags_t is only 8 bits wide. The
2658 2659 * user can set the VALID bit, but we will ignore it
2659 2660 * and make the decision is the algorithm is valid.
2660 2661 */
2661 2662 alg->alg_flags |= (uint8_t)attr->spd_attr_value;
2662 2663 break;
2663 2664
2664 2665 case SPD_ATTR_ALG_NBLOCKSIZES:
2665 2666 if (alg->alg_block_sizes != NULL) {
2666 2667 kmem_free(alg->alg_block_sizes,
2667 2668 ALG_BLOCK_SIZES(alg));
2668 2669 }
2669 2670 alg->alg_nblock_sizes = attr->spd_attr_value;
2670 2671 /*
2671 2672 * Allocate room for the trailing zero block size
2672 2673 * value as well.
2673 2674 */
2674 2675 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2675 2676 KM_SLEEP);
2676 2677 cur_block = 0;
2677 2678 break;
2678 2679
2679 2680 case SPD_ATTR_ALG_BLOCKSIZE:
2680 2681 if (alg->alg_block_sizes == NULL ||
2681 2682 cur_block >= alg->alg_nblock_sizes) {
2682 2683 ss1dbg(spds, ("spdsock_do_updatealg: "
2683 2684 "too many block sizes\n"));
2684 2685 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2685 2686 goto bail;
2686 2687 }
2687 2688 alg->alg_block_sizes[cur_block++] =
2688 2689 attr->spd_attr_value;
2689 2690 break;
2690 2691
2691 2692 case SPD_ATTR_ALG_NPARAMS:
2692 2693 if (alg->alg_params != NULL) {
2693 2694 kmem_free(alg->alg_params,
2694 2695 ALG_PARAM_SIZES(alg));
2695 2696 }
2696 2697 alg->alg_nparams = attr->spd_attr_value;
2697 2698 /*
2698 2699 * Allocate room for the trailing zero block size
2699 2700 * value as well.
2700 2701 */
2701 2702 alg->alg_params = kmem_zalloc(ALG_PARAM_SIZES(alg),
2702 2703 KM_SLEEP);
2703 2704 cur_block = 0;
2704 2705 break;
2705 2706
2706 2707 case SPD_ATTR_ALG_PARAMS:
2707 2708 if (alg->alg_params == NULL ||
2708 2709 cur_block >= alg->alg_nparams) {
2709 2710 ss1dbg(spds, ("spdsock_do_updatealg: "
2710 2711 "too many params\n"));
2711 2712 diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2712 2713 goto bail;
2713 2714 }
2714 2715 /*
2715 2716 * Array contains: iv_len, icv_len, salt_len
2716 2717 * Any additional parameters are currently ignored.
2717 2718 */
2718 2719 alg->alg_params[cur_block++] =
2719 2720 attr->spd_attr_value;
2720 2721 break;
2721 2722
2722 2723 case SPD_ATTR_ALG_MECHNAME: {
2723 2724 char *mech_name;
2724 2725
2725 2726 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2726 2727 ss1dbg(spds, ("spdsock_do_updatealg: "
2727 2728 "mech name too long\n"));
2728 2729 diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2729 2730 goto bail;
2730 2731 }
2731 2732 mech_name = (char *)(attr + 1);
2732 2733 bcopy(mech_name, alg->alg_mech_name,
2733 2734 attr->spd_attr_value);
2734 2735 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2735 2736 attr = (struct spd_attribute *)((char *)attr +
2736 2737 attr->spd_attr_value);
2737 2738 break;
2738 2739 }
2739 2740
2740 2741 case SPD_ATTR_PROTO_ID:
2741 2742 doing_proto = B_TRUE;
2742 2743 for (i = 0; i < NALGPROTOS; i++) {
2743 2744 if (algproto[i] == attr->spd_attr_value) {
2744 2745 alg_type = i;
2745 2746 break;
2746 2747 }
2747 2748 }
2748 2749 break;
2749 2750
2750 2751 case SPD_ATTR_PROTO_EXEC_MODE:
2751 2752 if (!doing_proto)
2752 2753 break;
2753 2754 for (i = 0; i < NEXECMODES; i++) {
2754 2755 if (execmodes[i] == attr->spd_attr_value) {
2755 2756 spds->spds_algs_exec_mode[alg_type] = i;
2756 2757 break;
2757 2758 }
2758 2759 }
2759 2760 break;
2760 2761 }
2761 2762 attr++;
2762 2763 }
2763 2764
2764 2765 #undef ALG_KEY_SIZES
2765 2766 #undef ALG_BLOCK_SIZES
2766 2767 #undef ALG_PARAM_SIZES
2767 2768
2768 2769 /* update the algorithm tables */
2769 2770 spdsock_merge_algs(spds);
2770 2771 bail:
2771 2772 /* cleanup */
2772 2773 ipsec_alg_free(alg);
2773 2774 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2774 2775 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2775 2776 if (spds->spds_algs[alg_type][algid] != NULL)
2776 2777 ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2777 2778 return (diag);
2778 2779 }
2779 2780
2780 2781 /*
2781 2782 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2782 2783 * the request until IPsec loads. If IPsec is loaded, act on it
2783 2784 * immediately.
2784 2785 */
2785 2786
2786 2787 static void
2787 2788 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2788 2789 {
2789 2790 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2790 2791 spd_stack_t *spds = ss->spdsock_spds;
2791 2792 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec;
2792 2793 uint32_t auditing = AU_AUDITING();
2793 2794
2794 2795 if (!ipsec_loaded(ipss)) {
2795 2796 /*
2796 2797 * IPsec is not loaded, save request and return nicely,
2797 2798 * the message will be processed once IPsec loads.
2798 2799 */
2799 2800 mblk_t *new_mp;
2800 2801
2801 2802 /* last update message wins */
2802 2803 if ((new_mp = copymsg(mp)) == NULL) {
2803 2804 spdsock_error(q, mp, ENOMEM, 0);
2804 2805 return;
2805 2806 }
2806 2807 mutex_enter(&spds->spds_alg_lock);
2807 2808 bcopy(extv, spds->spds_extv_algs,
2808 2809 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2809 2810 if (spds->spds_mp_algs != NULL)
2810 2811 freemsg(spds->spds_mp_algs);
2811 2812 spds->spds_mp_algs = mp;
2812 2813 mutex_exit(&spds->spds_alg_lock);
2813 2814 if (auditing) {
2814 2815 cred_t *cr;
2815 2816 pid_t cpid;
2816 2817
2817 2818 cr = msg_getcred(mp, &cpid);
2818 2819 audit_pf_policy(SPD_UPDATEALGS, cr,
2819 2820 spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2820 2821 cpid);
2821 2822 }
2822 2823 spd_echo(q, new_mp);
2823 2824 } else {
2824 2825 /*
2825 2826 * IPsec is loaded, act on the message immediately.
2826 2827 */
2827 2828 int diag;
2828 2829
2829 2830 mutex_enter(&spds->spds_alg_lock);
2830 2831 diag = spdsock_do_updatealg(extv, spds);
2831 2832 if (diag == -1) {
2832 2833 /* Keep the lock held while we walk the SA tables. */
2833 2834 sadb_alg_update(IPSEC_ALG_ALL, 0, 0,
2834 2835 spds->spds_netstack);
2835 2836 mutex_exit(&spds->spds_alg_lock);
2836 2837 spd_echo(q, mp);
2837 2838 if (auditing) {
2838 2839 cred_t *cr;
2839 2840 pid_t cpid;
2840 2841
2841 2842 cr = msg_getcred(mp, &cpid);
2842 2843 audit_pf_policy(SPD_UPDATEALGS, cr,
2843 2844 spds->spds_netstack, NULL, B_TRUE, 0,
2844 2845 cpid);
2845 2846 }
2846 2847 } else {
2847 2848 mutex_exit(&spds->spds_alg_lock);
2848 2849 spdsock_diag(q, mp, diag);
2849 2850 if (auditing) {
2850 2851 cred_t *cr;
2851 2852 pid_t cpid;
2852 2853
2853 2854 cr = msg_getcred(mp, &cpid);
2854 2855 audit_pf_policy(SPD_UPDATEALGS, cr,
2855 2856 spds->spds_netstack, NULL, B_TRUE, diag,
2856 2857 cpid);
2857 2858 }
2858 2859 }
2859 2860 }
2860 2861 }
2861 2862
2862 2863 /*
2863 2864 * Find a tunnel instance (using the name to link ID mapping), and
2864 2865 * update it after an IPsec change. We need to do this always in case
2865 2866 * we add policy AFTER plumbing a tunnel. We also need to do this
2866 2867 * because, as a side-effect, the tunnel's MTU is updated to reflect
2867 2868 * any IPsec overhead in the itp's policy.
2868 2869 */
2869 2870 static void
2870 2871 update_iptun_policy(ipsec_tun_pol_t *itp)
2871 2872 {
2872 2873 datalink_id_t linkid;
2873 2874
2874 2875 if (dls_mgmt_get_linkid(itp->itp_name, &linkid) == 0)
2875 2876 iptun_set_policy(linkid, itp);
2876 2877 }
2877 2878
2878 2879 /*
2879 2880 * Sort through the mess of polhead options to retrieve an appropriate one.
2880 2881 * Returns NULL if we send an spdsock error. Returns a valid pointer if we
2881 2882 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2882 2883 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2883 2884 * act on ALL policy heads.
2884 2885 */
2885 2886 static ipsec_policy_head_t *
2886 2887 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2887 2888 int msgtype, ipsec_tun_pol_t **itpp)
2888 2889 {
2889 2890 ipsec_tun_pol_t *itp;
2890 2891 ipsec_policy_head_t *iph;
2891 2892 int errno;
2892 2893 char *tname;
2893 2894 boolean_t active;
2894 2895 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2895 2896 netstack_t *ns = ss->spdsock_spds->spds_netstack;
2896 2897 uint64_t gen; /* Placeholder */
2897 2898
2898 2899 active = (spdid == SPD_ACTIVE);
2899 2900 *itpp = NULL;
2900 2901 if (!active && spdid != SPD_STANDBY) {
2901 2902 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2902 2903 return (NULL);
2903 2904 }
2904 2905
2905 2906 if (tunname != NULL) {
2906 2907 /* Acting on a tunnel's SPD. */
2907 2908 tname = (char *)tunname->spd_if_name;
2908 2909 if (*tname == '\0') {
2909 2910 /* Handle all-polhead cases here. */
2910 2911 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2911 2912 spdsock_diag(q, mp,
2912 2913 SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2913 2914 return (NULL);
2914 2915 }
2915 2916 return (active ? ALL_ACTIVE_POLHEADS :
2916 2917 ALL_INACTIVE_POLHEADS);
2917 2918 }
2918 2919
2919 2920 itp = get_tunnel_policy(tname, ns);
2920 2921 if (itp == NULL) {
2921 2922 if (msgtype != SPD_ADDRULE) {
2922 2923 /* "Tunnel not found" */
2923 2924 spdsock_error(q, mp, ENOENT, 0);
2924 2925 return (NULL);
2925 2926 }
2926 2927
2927 2928 errno = 0;
2928 2929 itp = create_tunnel_policy(tname, &errno, &gen, ns);
2929 2930 if (itp == NULL) {
2930 2931 /*
2931 2932 * Something very bad happened, most likely
2932 2933 * ENOMEM. Return an indicator.
2933 2934 */
2934 2935 spdsock_error(q, mp, errno, 0);
2935 2936 return (NULL);
2936 2937 }
2937 2938 }
2938 2939
2939 2940 /* Match up the itp to an iptun instance. */
2940 2941 update_iptun_policy(itp);
2941 2942
2942 2943 *itpp = itp;
2943 2944 /* For spdsock dump state, set the polhead's name. */
2944 2945 if (msgtype == SPD_DUMP) {
2945 2946 ITP_REFHOLD(itp);
2946 2947 ss->spdsock_itp = itp;
2947 2948 ss->spdsock_dump_tunnel = itp->itp_flags &
2948 2949 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2949 2950 }
2950 2951 } else {
2951 2952 itp = NULL;
2952 2953 /* For spdsock dump state, indicate it's global policy. */
2953 2954 if (msgtype == SPD_DUMP)
2954 2955 ss->spdsock_itp = NULL;
2955 2956 }
2956 2957
2957 2958 if (active)
2958 2959 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2959 2960 else
2960 2961 iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2961 2962 itp->itp_inactive;
2962 2963
2963 2964 ASSERT(iph != NULL);
2964 2965 if (itp != NULL) {
2965 2966 IPPH_REFHOLD(iph);
2966 2967 }
2967 2968
2968 2969 return (iph);
2969 2970 }
2970 2971
2971 2972 static void
2972 2973 spdsock_parse(queue_t *q, mblk_t *mp)
2973 2974 {
2974 2975 spd_msg_t *spmsg;
2975 2976 spd_ext_t *extv[SPD_EXT_MAX + 1];
2976 2977 uint_t msgsize;
2977 2978 ipsec_policy_head_t *iph;
2978 2979 ipsec_tun_pol_t *itp;
2979 2980 spd_if_t *tunname;
2980 2981 spdsock_t *ss = (spdsock_t *)q->q_ptr;
2981 2982 spd_stack_t *spds = ss->spdsock_spds;
2982 2983 netstack_t *ns = spds->spds_netstack;
2983 2984 ipsec_stack_t *ipss = ns->netstack_ipsec;
2984 2985
2985 2986 /* Make sure nothing's below me. */
2986 2987 ASSERT(WR(q)->q_next == NULL);
2987 2988
2988 2989 spmsg = (spd_msg_t *)mp->b_rptr;
2989 2990
2990 2991 msgsize = SPD_64TO8(spmsg->spd_msg_len);
2991 2992
2992 2993 if (msgdsize(mp) != msgsize) {
2993 2994 /*
2994 2995 * Message len incorrect w.r.t. actual size. Send an error
2995 2996 * (EMSGSIZE). It may be necessary to massage things a
2996 2997 * bit. For example, if the spd_msg_type is hosed,
2997 2998 * I need to set it to SPD_RESERVED to get delivery to
2998 2999 * do the right thing. Then again, maybe just letting
2999 3000 * the error delivery do the right thing.
3000 3001 */
3001 3002 ss2dbg(spds,
3002 3003 ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
3003 3004 msgdsize(mp), msgsize));
3004 3005 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
3005 3006 return;
3006 3007 }
3007 3008
3008 3009 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
3009 3010 /* Get all message into one mblk. */
3010 3011 if (pullupmsg(mp, -1) == 0) {
3011 3012 /*
3012 3013 * Something screwy happened.
3013 3014 */
3014 3015 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
3015 3016 return;
3016 3017 } else {
3017 3018 spmsg = (spd_msg_t *)mp->b_rptr;
3018 3019 }
3019 3020 }
3020 3021
3021 3022 switch (spdsock_get_ext(extv, spmsg, msgsize)) {
3022 3023 case KGE_DUP:
3023 3024 /* Handle duplicate extension. */
3024 3025 ss1dbg(spds, ("Got duplicate extension of type %d.\n",
3025 3026 extv[0]->spd_ext_type));
3026 3027 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
3027 3028 return;
3028 3029 case KGE_UNK:
3029 3030 /* Handle unknown extension. */
3030 3031 ss1dbg(spds, ("Got unknown extension of type %d.\n",
3031 3032 extv[0]->spd_ext_type));
3032 3033 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
3033 3034 return;
3034 3035 case KGE_LEN:
3035 3036 /* Length error. */
3036 3037 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
3037 3038 extv[0]->spd_ext_len, extv[0]->spd_ext_type));
3038 3039 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
3039 3040 return;
3040 3041 case KGE_CHK:
3041 3042 /* Reality check failed. */
3042 3043 ss1dbg(spds, ("Reality check failed on extension type %d.\n",
3043 3044 extv[0]->spd_ext_type));
3044 3045 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
3045 3046 return;
3046 3047 default:
3047 3048 /* Default case is no errors. */
3048 3049 break;
3049 3050 }
3050 3051
3051 3052 /*
3052 3053 * Special-case SPD_UPDATEALGS so as not to load IPsec.
3053 3054 */
3054 3055 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
3055 3056 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3056 3057
3057 3058 ASSERT(ss != NULL);
3058 3059 ipsec_loader_loadnow(ipss);
3059 3060 ss->spdsock_timeout_arg = mp;
3060 3061 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
3061 3062 q, LOADCHECK_INTERVAL);
3062 3063 return;
3063 3064 }
3064 3065
3065 3066 /* First check for messages that need no polheads at all. */
3066 3067 switch (spmsg->spd_msg_type) {
3067 3068 case SPD_UPDATEALGS:
3068 3069 spdsock_updatealg(q, mp, extv);
3069 3070 return;
3070 3071 case SPD_ALGLIST:
3071 3072 spdsock_alglist(q, mp);
3072 3073 return;
3073 3074 case SPD_DUMPALGS:
3074 3075 spdsock_dumpalgs(q, mp);
3075 3076 return;
3076 3077 }
3077 3078
3078 3079 /*
3079 3080 * Then check for ones that need both primary/secondary polheads,
3080 3081 * finding the appropriate tunnel policy if need be.
3081 3082 */
3082 3083 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
3083 3084 switch (spmsg->spd_msg_type) {
3084 3085 case SPD_FLIP:
3085 3086 spdsock_flip(q, mp, tunname);
3086 3087 return;
3087 3088 case SPD_CLONE:
3088 3089 spdsock_clone(q, mp, tunname);
3089 3090 return;
3090 3091 }
3091 3092
3092 3093 /*
3093 3094 * Finally, find ones that operate on exactly one polhead, or
3094 3095 * "all polheads" of a given type (active/inactive).
3095 3096 */
3096 3097 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
3097 3098 spmsg->spd_msg_type, &itp);
3098 3099 if (iph == NULL)
3099 3100 return;
3100 3101
3101 3102 /* All-polheads-ready operations. */
3102 3103 switch (spmsg->spd_msg_type) {
3103 3104 case SPD_FLUSH:
3104 3105 if (itp != NULL) {
3105 3106 mutex_enter(&itp->itp_lock);
3106 3107 if (spmsg->spd_msg_spdid == SPD_ACTIVE)
3107 3108 itp->itp_flags &= ~ITPF_PFLAGS;
3108 3109 else
3109 3110 itp->itp_flags &= ~ITPF_IFLAGS;
3110 3111 mutex_exit(&itp->itp_lock);
3111 3112 }
3112 3113
3113 3114 spdsock_flush(q, iph, itp, mp);
3114 3115
3115 3116 if (itp != NULL) {
3116 3117 /* SPD_FLUSH is worth a tunnel MTU check. */
3117 3118 update_iptun_policy(itp);
3118 3119 ITP_REFRELE(itp, ns);
3119 3120 }
3120 3121 return;
3121 3122 case SPD_DUMP:
3122 3123 if (itp != NULL)
3123 3124 ITP_REFRELE(itp, ns);
3124 3125 spdsock_dump(q, iph, mp);
3125 3126 return;
3126 3127 }
3127 3128
3128 3129 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
3129 3130 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3130 3131 return;
3131 3132 }
3132 3133
3133 3134 /* Single-polhead-only operations. */
3134 3135 switch (spmsg->spd_msg_type) {
3135 3136 case SPD_ADDRULE:
3136 3137 spdsock_addrule(q, iph, mp, extv, itp);
3137 3138 break;
3138 3139 case SPD_DELETERULE:
3139 3140 spdsock_deleterule(q, iph, mp, extv, itp);
3140 3141 break;
3141 3142 case SPD_LOOKUP:
3142 3143 spdsock_lookup(q, iph, mp, extv, itp);
3143 3144 break;
3144 3145 default:
3145 3146 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3146 3147 break;
3147 3148 }
3148 3149
3149 3150 IPPH_REFRELE(iph, ns);
3150 3151 if (itp != NULL) {
3151 3152 /* SPD_{ADD,DELETE}RULE are worth a tunnel MTU check. */
3152 3153 if (spmsg->spd_msg_type == SPD_ADDRULE ||
3153 3154 spmsg->spd_msg_type == SPD_DELETERULE)
3154 3155 update_iptun_policy(itp);
3155 3156 ITP_REFRELE(itp, ns);
3156 3157 }
3157 3158 }
3158 3159
3159 3160 /*
3160 3161 * If an algorithm mapping was received before IPsec was loaded, process it.
3161 3162 * Called from the IPsec loader.
3162 3163 */
3163 3164 void
3164 3165 spdsock_update_pending_algs(netstack_t *ns)
3165 3166 {
3166 3167 spd_stack_t *spds = ns->netstack_spdsock;
3167 3168
3168 3169 mutex_enter(&spds->spds_alg_lock);
3169 3170 if (spds->spds_mp_algs != NULL) {
3170 3171 (void) spdsock_do_updatealg(spds->spds_extv_algs, spds);
3171 3172 freemsg(spds->spds_mp_algs);
3172 3173 spds->spds_mp_algs = NULL;
3173 3174 }
3174 3175 mutex_exit(&spds->spds_alg_lock);
3175 3176 }
3176 3177
3177 3178 static void
3178 3179 spdsock_loadcheck(void *arg)
3179 3180 {
3180 3181 queue_t *q = (queue_t *)arg;
3181 3182 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3182 3183 mblk_t *mp;
3183 3184 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3184 3185
3185 3186 ASSERT(ss != NULL);
3186 3187
3187 3188 ss->spdsock_timeout = 0;
3188 3189 mp = ss->spdsock_timeout_arg;
3189 3190 ASSERT(mp != NULL);
3190 3191 ss->spdsock_timeout_arg = NULL;
3191 3192 if (ipsec_failed(ipss))
3192 3193 spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3193 3194 else
3194 3195 spdsock_parse(q, mp);
3195 3196 }
3196 3197
3197 3198 /*
3198 3199 * Copy relevant state bits.
3199 3200 */
3200 3201 static void
3201 3202 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3202 3203 {
3203 3204 *tap = spdsock_g_t_info_ack;
3204 3205 tap->CURRENT_state = ss->spdsock_state;
3205 3206 tap->OPT_size = spdsock_max_optsize;
3206 3207 }
3207 3208
3208 3209 /*
3209 3210 * This routine responds to T_CAPABILITY_REQ messages. It is called by
3210 3211 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from
3211 3212 * spdsock_g_t_info_ack. The current state of the stream is copied from
3212 3213 * spdsock_state.
3213 3214 */
3214 3215 static void
3215 3216 spdsock_capability_req(queue_t *q, mblk_t *mp)
3216 3217 {
3217 3218 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3218 3219 t_uscalar_t cap_bits1;
3219 3220 struct T_capability_ack *tcap;
3220 3221
3221 3222 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3222 3223
3223 3224 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3224 3225 mp->b_datap->db_type, T_CAPABILITY_ACK);
3225 3226 if (mp == NULL)
3226 3227 return;
3227 3228
3228 3229 tcap = (struct T_capability_ack *)mp->b_rptr;
3229 3230 tcap->CAP_bits1 = 0;
3230 3231
3231 3232 if (cap_bits1 & TC1_INFO) {
3232 3233 spdsock_copy_info(&tcap->INFO_ack, ss);
3233 3234 tcap->CAP_bits1 |= TC1_INFO;
3234 3235 }
3235 3236
3236 3237 qreply(q, mp);
3237 3238 }
3238 3239
3239 3240 /*
3240 3241 * This routine responds to T_INFO_REQ messages. It is called by
3241 3242 * spdsock_wput_other.
3242 3243 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3243 3244 * The current state of the stream is copied from spdsock_state.
3244 3245 */
3245 3246 static void
3246 3247 spdsock_info_req(q, mp)
3247 3248 queue_t *q;
3248 3249 mblk_t *mp;
3249 3250 {
3250 3251 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3251 3252 T_INFO_ACK);
3252 3253 if (mp == NULL)
3253 3254 return;
3254 3255 spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3255 3256 (spdsock_t *)q->q_ptr);
3256 3257 qreply(q, mp);
3257 3258 }
3258 3259
3259 3260 /*
3260 3261 * spdsock_err_ack. This routine creates a
3261 3262 * T_ERROR_ACK message and passes it
3262 3263 * upstream.
3263 3264 */
3264 3265 static void
3265 3266 spdsock_err_ack(q, mp, t_error, sys_error)
3266 3267 queue_t *q;
3267 3268 mblk_t *mp;
3268 3269 int t_error;
3269 3270 int sys_error;
3270 3271 {
3271 3272 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3272 3273 qreply(q, mp);
3273 3274 }
3274 3275
3275 3276 /*
3276 3277 * This routine retrieves the current status of socket options.
3277 3278 * It returns the size of the option retrieved.
3278 3279 */
3279 3280 /* ARGSUSED */
3280 3281 int
3281 3282 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3282 3283 {
3283 3284 int *i1 = (int *)ptr;
3284 3285
3285 3286 switch (level) {
3286 3287 case SOL_SOCKET:
3287 3288 switch (name) {
3288 3289 case SO_TYPE:
3289 3290 *i1 = SOCK_RAW;
3290 3291 break;
3291 3292 /*
3292 3293 * The following two items can be manipulated,
3293 3294 * but changing them should do nothing.
3294 3295 */
3295 3296 case SO_SNDBUF:
3296 3297 *i1 = (int)q->q_hiwat;
3297 3298 break;
3298 3299 case SO_RCVBUF:
3299 3300 *i1 = (int)(RD(q)->q_hiwat);
3300 3301 break;
3301 3302 }
3302 3303 break;
3303 3304 default:
3304 3305 return (0);
3305 3306 }
3306 3307 return (sizeof (int));
3307 3308 }
3308 3309
3309 3310 /*
3310 3311 * This routine sets socket options.
3311 3312 */
3312 3313 /* ARGSUSED */
3313 3314 int
3314 3315 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3315 3316 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3316 3317 void *thisdg_attrs, cred_t *cr)
3317 3318 {
3318 3319 int *i1 = (int *)invalp;
3319 3320 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3320 3321 spd_stack_t *spds = ss->spdsock_spds;
3321 3322
3322 3323 switch (level) {
3323 3324 case SOL_SOCKET:
3324 3325 switch (name) {
3325 3326 case SO_SNDBUF:
3326 3327 if (*i1 > spds->spds_max_buf)
3327 3328 return (ENOBUFS);
3328 3329 q->q_hiwat = *i1;
3329 3330 break;
3330 3331 case SO_RCVBUF:
3331 3332 if (*i1 > spds->spds_max_buf)
3332 3333 return (ENOBUFS);
3333 3334 RD(q)->q_hiwat = *i1;
3334 3335 (void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3335 3336 break;
3336 3337 }
3337 3338 break;
3338 3339 }
3339 3340 return (0);
3340 3341 }
3341 3342
3342 3343
3343 3344 /*
3344 3345 * Handle STREAMS messages.
3345 3346 */
3346 3347 static void
3347 3348 spdsock_wput_other(queue_t *q, mblk_t *mp)
3348 3349 {
3349 3350 struct iocblk *iocp;
3350 3351 int error;
3351 3352 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3352 3353 spd_stack_t *spds = ss->spdsock_spds;
3353 3354 cred_t *cr;
3354 3355
3355 3356 switch (mp->b_datap->db_type) {
3356 3357 case M_PROTO:
3357 3358 case M_PCPROTO:
3358 3359 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3359 3360 ss3dbg(spds, (
3360 3361 "spdsock_wput_other: Not big enough M_PROTO\n"));
3361 3362 freemsg(mp);
3362 3363 return;
3363 3364 }
3364 3365 switch (((union T_primitives *)mp->b_rptr)->type) {
3365 3366 case T_CAPABILITY_REQ:
3366 3367 spdsock_capability_req(q, mp);
3367 3368 break;
3368 3369 case T_INFO_REQ:
3369 3370 spdsock_info_req(q, mp);
3370 3371 break;
3371 3372 case T_SVR4_OPTMGMT_REQ:
3372 3373 case T_OPTMGMT_REQ:
3373 3374 /*
3374 3375 * All Solaris components should pass a db_credp
3375 3376 * for this TPI message, hence we ASSERT.
3376 3377 * But in case there is some other M_PROTO that looks
3377 3378 * like a TPI message sent by some other kernel
3378 3379 * component, we check and return an error.
3379 3380 */
3380 3381 cr = msg_getcred(mp, NULL);
3381 3382 ASSERT(cr != NULL);
3382 3383 if (cr == NULL) {
3383 3384 spdsock_err_ack(q, mp, TSYSERR, EINVAL);
3384 3385 return;
3385 3386 }
3386 3387 if (((union T_primitives *)mp->b_rptr)->type ==
3387 3388 T_SVR4_OPTMGMT_REQ) {
3388 3389 svr4_optcom_req(q, mp, cr, &spdsock_opt_obj);
3389 3390 } else {
3390 3391 tpi_optcom_req(q, mp, cr, &spdsock_opt_obj);
3391 3392 }
3392 3393 break;
3393 3394 case T_DATA_REQ:
3394 3395 case T_EXDATA_REQ:
3395 3396 case T_ORDREL_REQ:
3396 3397 /* Illegal for spdsock. */
3397 3398 freemsg(mp);
3398 3399 (void) putnextctl1(RD(q), M_ERROR, EPROTO);
3399 3400 break;
3400 3401 default:
3401 3402 /* Not supported by spdsock. */
3402 3403 spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3403 3404 break;
3404 3405 }
3405 3406 return;
3406 3407 case M_IOCTL:
3407 3408 iocp = (struct iocblk *)mp->b_rptr;
3408 3409 error = EINVAL;
3409 3410
3410 3411 switch (iocp->ioc_cmd) {
3411 3412 case ND_SET:
3412 3413 case ND_GET:
3413 3414 if (nd_getset(q, spds->spds_g_nd, mp)) {
3414 3415 qreply(q, mp);
3415 3416 return;
3416 3417 } else
3417 3418 error = ENOENT;
3418 3419 /* FALLTHRU */
3419 3420 default:
3420 3421 miocnak(q, mp, 0, error);
3421 3422 return;
3422 3423 }
3423 3424 case M_FLUSH:
3424 3425 if (*mp->b_rptr & FLUSHW) {
3425 3426 flushq(q, FLUSHALL);
3426 3427 *mp->b_rptr &= ~FLUSHW;
3427 3428 }
3428 3429 if (*mp->b_rptr & FLUSHR) {
3429 3430 qreply(q, mp);
3430 3431 return;
3431 3432 }
3432 3433 /* Else FALLTHRU */
3433 3434 }
3434 3435
3435 3436 /* If fell through, just black-hole the message. */
3436 3437 freemsg(mp);
3437 3438 }
3438 3439
3439 3440 static void
3440 3441 spdsock_wput(queue_t *q, mblk_t *mp)
3441 3442 {
3442 3443 uint8_t *rptr = mp->b_rptr;
3443 3444 mblk_t *mp1;
3444 3445 spdsock_t *ss = (spdsock_t *)q->q_ptr;
3445 3446 spd_stack_t *spds = ss->spdsock_spds;
3446 3447
3447 3448 /*
3448 3449 * If we're dumping, defer processing other messages until the
3449 3450 * dump completes.
3450 3451 */
3451 3452 if (ss->spdsock_dump_req != NULL) {
3452 3453 if (!putq(q, mp))
3453 3454 freemsg(mp);
3454 3455 return;
3455 3456 }
3456 3457
3457 3458 switch (mp->b_datap->db_type) {
3458 3459 case M_DATA:
3459 3460 /*
3460 3461 * Silently discard.
3461 3462 */
3462 3463 ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3463 3464 freemsg(mp);
3464 3465 return;
3465 3466 case M_PROTO:
3466 3467 case M_PCPROTO:
3467 3468 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3468 3469 if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3469 3470 if ((mp1 = mp->b_cont) == NULL) {
3470 3471 /* No data after T_DATA_REQ. */
3471 3472 ss2dbg(spds,
3472 3473 ("No data after DATA_REQ.\n"));
3473 3474 freemsg(mp);
3474 3475 return;
3475 3476 }
3476 3477 freeb(mp);
3477 3478 mp = mp1;
3478 3479 ss2dbg(spds, ("T_DATA_REQ\n"));
3479 3480 break; /* Out of switch. */
3480 3481 }
3481 3482 }
3482 3483 /* FALLTHRU */
3483 3484 default:
3484 3485 ss3dbg(spds, ("In default wput case (%d %d).\n",
3485 3486 mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3486 3487 spdsock_wput_other(q, mp);
3487 3488 return;
3488 3489 }
3489 3490
3490 3491 /* I now have a PF_POLICY message in an M_DATA block. */
3491 3492 spdsock_parse(q, mp);
3492 3493 }
3493 3494
3494 3495 /*
3495 3496 * Device open procedure, called when new queue pair created.
3496 3497 * We are passed the read-side queue.
3497 3498 */
3498 3499 /* ARGSUSED */
3499 3500 static int
3500 3501 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3501 3502 {
3502 3503 spdsock_t *ss;
3503 3504 queue_t *oq = OTHERQ(q);
3504 3505 minor_t ssminor;
3505 3506 netstack_t *ns;
3506 3507 spd_stack_t *spds;
3507 3508
3508 3509 if (secpolicy_ip_config(credp, B_FALSE) != 0)
3509 3510 return (EPERM);
3510 3511
3511 3512 if (q->q_ptr != NULL)
3512 3513 return (0); /* Re-open of an already open instance. */
3513 3514
3514 3515 if (sflag & MODOPEN)
3515 3516 return (EINVAL);
3516 3517
3517 3518 ns = netstack_find_by_cred(credp);
3518 3519 ASSERT(ns != NULL);
3519 3520 spds = ns->netstack_spdsock;
3520 3521 ASSERT(spds != NULL);
3521 3522
3522 3523 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3523 3524
3524 3525 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3525 3526 if (ssminor == 0) {
3526 3527 netstack_rele(spds->spds_netstack);
3527 3528 return (ENOMEM);
3528 3529 }
3529 3530 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3530 3531 if (ss == NULL) {
3531 3532 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3532 3533 netstack_rele(spds->spds_netstack);
3533 3534 return (ENOMEM);
3534 3535 }
3535 3536
3536 3537 ss->spdsock_minor = ssminor;
3537 3538 ss->spdsock_state = TS_UNBND;
3538 3539 ss->spdsock_dump_req = NULL;
3539 3540
3540 3541 ss->spdsock_spds = spds;
3541 3542
3542 3543 q->q_ptr = ss;
3543 3544 oq->q_ptr = ss;
3544 3545
3545 3546 q->q_hiwat = spds->spds_recv_hiwat;
3546 3547
3547 3548 oq->q_hiwat = spds->spds_xmit_hiwat;
3548 3549 oq->q_lowat = spds->spds_xmit_lowat;
3549 3550
3550 3551 qprocson(q);
3551 3552 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3552 3553
3553 3554 *devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3554 3555 return (0);
3555 3556 }
3556 3557
3557 3558 /*
3558 3559 * Read-side service procedure, invoked when we get back-enabled
3559 3560 * when buffer space becomes available.
3560 3561 *
3561 3562 * Dump another chunk if we were dumping before; when we finish, kick
3562 3563 * the write-side queue in case it's waiting for read queue space.
3563 3564 */
3564 3565 void
3565 3566 spdsock_rsrv(queue_t *q)
3566 3567 {
3567 3568 spdsock_t *ss = q->q_ptr;
3568 3569
3569 3570 if (ss->spdsock_dump_req != NULL)
3570 3571 spdsock_dump_some(q, ss);
3571 3572
3572 3573 if (ss->spdsock_dump_req == NULL)
3573 3574 qenable(OTHERQ(q));
3574 3575 }
3575 3576
3576 3577 /*
3577 3578 * Write-side service procedure, invoked when we defer processing
3578 3579 * if another message is received while a dump is in progress.
3579 3580 */
3580 3581 void
3581 3582 spdsock_wsrv(queue_t *q)
3582 3583 {
3583 3584 spdsock_t *ss = q->q_ptr;
3584 3585 mblk_t *mp;
3585 3586 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3586 3587
3587 3588 if (ss->spdsock_dump_req != NULL) {
3588 3589 qenable(OTHERQ(q));
3589 3590 return;
3590 3591 }
3591 3592
3592 3593 while ((mp = getq(q)) != NULL) {
3593 3594 if (ipsec_loaded(ipss)) {
3594 3595 spdsock_wput(q, mp);
3595 3596 if (ss->spdsock_dump_req != NULL)
3596 3597 return;
3597 3598 } else if (!ipsec_failed(ipss)) {
3598 3599 (void) putq(q, mp);
3599 3600 } else {
3600 3601 spdsock_error(q, mp, EPFNOSUPPORT, 0);
3601 3602 }
3602 3603 }
3603 3604 }
3604 3605
3605 3606 static int
3606 3607 spdsock_close(queue_t *q)
3607 3608 {
3608 3609 spdsock_t *ss = q->q_ptr;
3609 3610 spd_stack_t *spds = ss->spdsock_spds;
3610 3611
3611 3612 qprocsoff(q);
3612 3613
3613 3614 /* Safe assumption. */
3614 3615 ASSERT(ss != NULL);
3615 3616
3616 3617 if (ss->spdsock_timeout != 0)
3617 3618 (void) quntimeout(q, ss->spdsock_timeout);
3618 3619
3619 3620 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3620 3621
3621 3622 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3622 3623 netstack_rele(ss->spdsock_spds->spds_netstack);
3623 3624
3624 3625 kmem_free(ss, sizeof (spdsock_t));
3625 3626 return (0);
3626 3627 }
3627 3628
3628 3629 /*
3629 3630 * Merge the IPsec algorithms tables with the received algorithm information.
3630 3631 */
3631 3632 void
3632 3633 spdsock_merge_algs(spd_stack_t *spds)
3633 3634 {
3634 3635 ipsec_alginfo_t *alg, *oalg;
3635 3636 ipsec_algtype_t algtype;
3636 3637 uint_t algidx, algid, nalgs;
3637 3638 crypto_mech_name_t *mechs;
3638 3639 uint_t mech_count, mech_idx;
3639 3640 netstack_t *ns = spds->spds_netstack;
3640 3641 ipsec_stack_t *ipss = ns->netstack_ipsec;
3641 3642
3642 3643 ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3643 3644
3644 3645 /*
3645 3646 * Get the list of supported mechanisms from the crypto framework.
3646 3647 * If a mechanism is supported by KCF, resolve its mechanism
3647 3648 * id and mark it as being valid. This operation must be done
3648 3649 * without holding alg_lock, since it can cause a provider
3649 3650 * module to be loaded and the provider notification callback to
3650 3651 * be invoked.
3651 3652 */
3652 3653 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3653 3654 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3654 3655 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3655 3656 int algflags = 0;
3656 3657 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3657 3658
3658 3659 alg = spds->spds_algs[algtype][algid];
3659 3660 if (alg == NULL)
3660 3661 continue;
3661 3662
3662 3663 /*
3663 3664 * The NULL encryption algorithm is a special
3664 3665 * case because there are no mechanisms, yet
3665 3666 * the algorithm is still valid.
3666 3667 */
3667 3668 if (alg->alg_id == SADB_EALG_NULL) {
3668 3669 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3669 3670 alg->alg_flags |= ALG_FLAG_VALID;
3670 3671 continue;
3671 3672 }
3672 3673
3673 3674 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3674 3675 if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3675 3676 CRYPTO_MAX_MECH_NAME) == 0) {
3676 3677 mt = crypto_mech2id(alg->alg_mech_name);
|
↓ open down ↓ |
1140 lines elided |
↑ open up ↑ |
3677 3678 ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3678 3679 algflags = ALG_FLAG_VALID;
3679 3680 break;
3680 3681 }
3681 3682 }
3682 3683 alg->alg_mech_type = mt;
3683 3684 alg->alg_flags |= algflags;
3684 3685 }
3685 3686 }
3686 3687
3687 - mutex_enter(&ipss->ipsec_alg_lock);
3688 + rw_enter(&ipss->ipsec_alg_lock, RW_WRITER);
3688 3689
3689 3690 /*
3690 3691 * For each algorithm currently defined, check if it is
3691 3692 * present in the new tables created from the SPD_UPDATEALGS
3692 3693 * message received from user-space.
3693 3694 * Delete the algorithm entries that are currently defined
3694 3695 * but not part of the new tables.
3695 3696 */
3696 3697 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3697 3698 nalgs = ipss->ipsec_nalgs[algtype];
3698 3699 for (algidx = 0; algidx < nalgs; algidx++) {
3699 3700 algid = ipss->ipsec_sortlist[algtype][algidx];
3700 3701 if (spds->spds_algs[algtype][algid] == NULL)
3701 3702 ipsec_alg_unreg(algtype, algid, ns);
3702 3703 }
3703 3704 }
3704 3705
3705 3706 /*
3706 3707 * For each algorithm we just received, check if it is
3707 3708 * present in the currently defined tables. If it is, swap
3708 3709 * the entry with the one we just allocated.
3709 3710 * If the new algorithm is not in the current tables,
3710 3711 * add it.
3711 3712 */
3712 3713 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3713 3714 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3714 3715 alg = spds->spds_algs[algtype][algid];
3715 3716 if (alg == NULL)
3716 3717 continue;
3717 3718
3718 3719 if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3719 3720 NULL) {
3720 3721 /*
3721 3722 * New algorithm, add it to the algorithm
3722 3723 * table.
3723 3724 */
3724 3725 ipsec_alg_reg(algtype, alg, ns);
3725 3726 } else {
3726 3727 /*
3727 3728 * Algorithm is already in the table. Swap
3728 3729 * the existing entry with the new one.
3729 3730 */
3730 3731 ipsec_alg_fix_min_max(alg, algtype, ns);
3731 3732 ipss->ipsec_alglists[algtype][algid] = alg;
3732 3733 ipsec_alg_free(oalg);
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
3733 3734 }
3734 3735 spds->spds_algs[algtype][algid] = NULL;
3735 3736 }
3736 3737 }
3737 3738
3738 3739 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3739 3740 ipss->ipsec_algs_exec_mode[algtype] =
3740 3741 spds->spds_algs_exec_mode[algtype];
3741 3742 }
3742 3743
3743 - mutex_exit(&ipss->ipsec_alg_lock);
3744 + rw_exit(&ipss->ipsec_alg_lock);
3744 3745
3745 3746 crypto_free_mech_list(mechs, mech_count);
3746 3747
3747 3748 ipsecah_algs_changed(ns);
3748 3749 ipsecesp_algs_changed(ns);
3749 3750 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX