Print this page
5513 KM_NORMALPRI should be documented in kmem_alloc(9f) and kmem_cache_create(9f) man pages
14465 Present KM_NOSLEEP_LAZY as documented interface
Change-Id: I002ec28ddf390650f1fcba1ca94f6abfdb241439
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/inet/ip/ip_attr.c
+++ new/usr/src/uts/common/inet/ip/ip_attr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /* Copyright (c) 1990 Mentat Inc. */
26 26
27 27 /*
28 28 * Copyright 2019 Joyent, Inc.
29 29 */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/stream.h>
33 33 #include <sys/strsun.h>
34 34 #include <sys/zone.h>
35 35 #include <sys/ddi.h>
36 36 #include <sys/sunddi.h>
37 37 #include <sys/cmn_err.h>
38 38 #include <sys/debug.h>
39 39 #include <sys/atomic.h>
40 40
41 41 #include <sys/systm.h>
42 42 #include <sys/param.h>
43 43 #include <sys/kmem.h>
44 44 #include <sys/sdt.h>
45 45 #include <sys/socket.h>
46 46 #include <sys/mac.h>
47 47 #include <net/if.h>
48 48 #include <net/if_arp.h>
49 49 #include <net/route.h>
50 50 #include <sys/sockio.h>
51 51 #include <netinet/in.h>
52 52 #include <net/if_dl.h>
53 53
54 54 #include <inet/common.h>
55 55 #include <inet/mi.h>
56 56 #include <inet/mib2.h>
57 57 #include <inet/nd.h>
58 58 #include <inet/arp.h>
59 59 #include <inet/snmpcom.h>
60 60 #include <inet/kstatcom.h>
61 61
62 62 #include <netinet/igmp_var.h>
63 63 #include <netinet/ip6.h>
64 64 #include <netinet/icmp6.h>
65 65 #include <netinet/sctp.h>
66 66
67 67 #include <inet/ip.h>
68 68 #include <inet/ip_impl.h>
69 69 #include <inet/ip6.h>
70 70 #include <inet/ip6_asp.h>
71 71 #include <inet/tcp.h>
72 72 #include <inet/ip_multi.h>
73 73 #include <inet/ip_if.h>
74 74 #include <inet/ip_ire.h>
75 75 #include <inet/ip_ftable.h>
76 76 #include <inet/ip_rts.h>
77 77 #include <inet/optcom.h>
78 78 #include <inet/ip_ndp.h>
79 79 #include <inet/ip_listutils.h>
80 80 #include <netinet/igmp.h>
81 81 #include <netinet/ip_mroute.h>
82 82 #include <inet/ipp_common.h>
83 83
84 84 #include <net/pfkeyv2.h>
85 85 #include <inet/sadb.h>
86 86 #include <inet/ipsec_impl.h>
87 87 #include <inet/ipdrop.h>
88 88 #include <inet/ip_netinfo.h>
89 89 #include <sys/squeue_impl.h>
90 90 #include <sys/squeue.h>
91 91
92 92 #include <inet/ipclassifier.h>
93 93 #include <inet/sctp_ip.h>
94 94 #include <inet/sctp/sctp_impl.h>
95 95 #include <inet/udp_impl.h>
96 96 #include <sys/sunddi.h>
97 97
98 98 #include <sys/tsol/label.h>
99 99 #include <sys/tsol/tnet.h>
100 100
101 101 /*
102 102 * Release a reference on ip_xmit_attr.
103 103 * The reference is acquired by conn_get_ixa()
104 104 *
105 105 * This macro has a lowercase function-call version for callers outside
106 106 * this file.
107 107 */
108 108 #define IXA_REFRELE(ixa) \
109 109 { \
110 110 if (atomic_dec_32_nv(&(ixa)->ixa_refcnt) == 0) \
111 111 ixa_inactive(ixa); \
112 112 }
113 113
114 114 #define IXA_REFHOLD(ixa) \
115 115 { \
116 116 ASSERT3U((ixa)->ixa_refcnt, !=, 0); \
117 117 atomic_inc_32(&(ixa)->ixa_refcnt); \
118 118 }
119 119
120 120 /*
121 121 * When we need to handle a transmit side asynchronous operation, then we need
122 122 * to save sufficient information so that we can call the fragment and postfrag
123 123 * functions. That information is captured in an mblk containing this structure.
124 124 *
125 125 * Since this is currently only used for IPsec, we include information for
126 126 * the kernel crypto framework.
127 127 */
128 128 typedef struct ixamblk_s {
129 129 boolean_t ixm_inbound; /* B_FALSE */
130 130 iaflags_t ixm_flags; /* ixa_flags */
131 131 netstackid_t ixm_stackid; /* Verify it didn't go away */
132 132 uint_t ixm_ifindex; /* Used to find the nce */
133 133 in6_addr_t ixm_nceaddr_v6; /* Used to find nce */
134 134 #define ixm_nceaddr_v4 V4_PART_OF_V6(ixm_nceaddr_v6)
135 135 uint32_t ixm_fragsize;
136 136 uint_t ixm_pktlen;
137 137 uint16_t ixm_ip_hdr_length; /* Points to ULP header */
138 138 uint8_t ixm_protocol; /* Protocol number for ULP cksum */
139 139 pfirepostfrag_t ixm_postfragfn;
140 140
141 141 zoneid_t ixm_zoneid; /* Needed for ipobs */
142 142 zoneid_t ixm_no_loop_zoneid; /* IXAF_NO_LOOP_ZONEID_SET */
143 143
144 144 uint_t ixm_scopeid; /* For IPv6 link-locals */
145 145
146 146 uint32_t ixm_ident; /* For IPv6 fragment header */
147 147 uint32_t ixm_xmit_hint;
148 148
149 149 uint64_t ixm_conn_id; /* Used by DTrace */
150 150 cred_t *ixm_cred; /* For getpeerucred - refhold if set */
151 151 pid_t ixm_cpid; /* For getpeerucred */
152 152
153 153 ts_label_t *ixm_tsl; /* Refhold if set. */
154 154
155 155 /*
156 156 * When the pointers below are set they have a refhold on the struct.
157 157 */
158 158 ipsec_latch_t *ixm_ipsec_latch;
159 159 struct ipsa_s *ixm_ipsec_ah_sa; /* SA for AH */
160 160 struct ipsa_s *ixm_ipsec_esp_sa; /* SA for ESP */
161 161 struct ipsec_policy_s *ixm_ipsec_policy; /* why are we here? */
162 162 struct ipsec_action_s *ixm_ipsec_action; /* For reflected packets */
163 163
164 164 ipsa_ref_t ixm_ipsec_ref[2]; /* Soft reference to SA */
165 165
166 166 /* Need these while waiting for SA */
167 167 uint16_t ixm_ipsec_src_port; /* Source port number of d-gram. */
168 168 uint16_t ixm_ipsec_dst_port; /* Destination port number of d-gram. */
169 169 uint8_t ixm_ipsec_icmp_type; /* ICMP type of d-gram */
170 170 uint8_t ixm_ipsec_icmp_code; /* ICMP code of d-gram */
171 171
172 172 sa_family_t ixm_ipsec_inaf; /* Inner address family */
173 173 uint32_t ixm_ipsec_insrc[IXA_MAX_ADDRLEN]; /* Inner src address */
174 174 uint32_t ixm_ipsec_indst[IXA_MAX_ADDRLEN]; /* Inner dest address */
175 175 uint8_t ixm_ipsec_insrcpfx; /* Inner source prefix */
176 176 uint8_t ixm_ipsec_indstpfx; /* Inner destination prefix */
177 177
178 178 uint8_t ixm_ipsec_proto; /* IP protocol number for d-gram. */
179 179 } ixamblk_t;
180 180
181 181
182 182 /*
183 183 * When we need to handle a receive side asynchronous operation, then we need
184 184 * to save sufficient information so that we can call ip_fanout.
185 185 * That information is captured in an mblk containing this structure.
186 186 *
187 187 * Since this is currently only used for IPsec, we include information for
188 188 * the kernel crypto framework.
189 189 */
190 190 typedef struct iramblk_s {
191 191 boolean_t irm_inbound; /* B_TRUE */
192 192 iaflags_t irm_flags; /* ira_flags */
193 193 netstackid_t irm_stackid; /* Verify it didn't go away */
194 194 uint_t irm_ifindex; /* To find ira_ill */
195 195
196 196 uint_t irm_rifindex; /* ira_rifindex */
197 197 uint_t irm_ruifindex; /* ira_ruifindex */
198 198 uint_t irm_pktlen;
199 199 uint16_t irm_ip_hdr_length; /* Points to ULP header */
200 200 uint8_t irm_protocol; /* Protocol number for ULP cksum */
201 201 zoneid_t irm_zoneid; /* ALL_ZONES unless local delivery */
202 202
203 203 squeue_t *irm_sqp;
204 204 ill_rx_ring_t *irm_ring;
205 205
206 206 ipaddr_t irm_mroute_tunnel; /* IRAF_MROUTE_TUNNEL_SET */
207 207 zoneid_t irm_no_loop_zoneid; /* IRAF_NO_LOOP_ZONEID_SET */
208 208 uint32_t irm_esp_udp_ports; /* IRAF_ESP_UDP_PORTS */
209 209
210 210 char irm_l2src[IRA_L2SRC_SIZE]; /* If IRAF_L2SRC_SET */
211 211
212 212 cred_t *irm_cred; /* For getpeerucred - refhold if set */
213 213 pid_t irm_cpid; /* For getpeerucred */
214 214
215 215 ts_label_t *irm_tsl; /* Refhold if set. */
216 216
217 217 /*
218 218 * When set these correspond to a refhold on the object.
219 219 */
220 220 struct ipsa_s *irm_ipsec_ah_sa; /* SA for AH */
221 221 struct ipsa_s *irm_ipsec_esp_sa; /* SA for ESP */
222 222 struct ipsec_action_s *irm_ipsec_action; /* For reflected packets */
223 223 } iramblk_t;
224 224
225 225
226 226 /*
227 227 * Take the information in ip_xmit_attr_t and stick it in an mblk
228 228 * that can later be passed to ip_xmit_attr_from_mblk to recreate the
229 229 * ip_xmit_attr_t.
230 230 *
231 231 * Returns NULL on memory allocation failure.
232 232 */
233 233 mblk_t *
234 234 ip_xmit_attr_to_mblk(ip_xmit_attr_t *ixa)
235 235 {
236 236 mblk_t *ixamp;
237 237 ixamblk_t *ixm;
238 238 nce_t *nce = ixa->ixa_nce;
239 239
240 240 ASSERT(nce != NULL);
241 241 ixamp = allocb(sizeof (*ixm), BPRI_MED);
242 242 if (ixamp == NULL)
243 243 return (NULL);
244 244
245 245 ixamp->b_datap->db_type = M_BREAK;
246 246 ixamp->b_wptr += sizeof (*ixm);
247 247 ixm = (ixamblk_t *)ixamp->b_rptr;
248 248
249 249 bzero(ixm, sizeof (*ixm));
250 250 ixm->ixm_inbound = B_FALSE;
251 251 ixm->ixm_flags = ixa->ixa_flags;
252 252 ixm->ixm_stackid = ixa->ixa_ipst->ips_netstack->netstack_stackid;
253 253 ixm->ixm_ifindex = nce->nce_ill->ill_phyint->phyint_ifindex;
254 254 ixm->ixm_nceaddr_v6 = nce->nce_addr;
255 255 ixm->ixm_fragsize = ixa->ixa_fragsize;
256 256 ixm->ixm_pktlen = ixa->ixa_pktlen;
257 257 ixm->ixm_ip_hdr_length = ixa->ixa_ip_hdr_length;
258 258 ixm->ixm_protocol = ixa->ixa_protocol;
259 259 ixm->ixm_postfragfn = ixa->ixa_postfragfn;
260 260 ixm->ixm_zoneid = ixa->ixa_zoneid;
261 261 ixm->ixm_no_loop_zoneid = ixa->ixa_no_loop_zoneid;
262 262 ixm->ixm_scopeid = ixa->ixa_scopeid;
263 263 ixm->ixm_ident = ixa->ixa_ident;
264 264 ixm->ixm_xmit_hint = ixa->ixa_xmit_hint;
265 265
266 266 if (ixa->ixa_tsl != NULL) {
267 267 ixm->ixm_tsl = ixa->ixa_tsl;
268 268 label_hold(ixm->ixm_tsl);
269 269 }
270 270 if (ixa->ixa_cred != NULL) {
271 271 ixm->ixm_cred = ixa->ixa_cred;
272 272 crhold(ixa->ixa_cred);
273 273 }
274 274 ixm->ixm_cpid = ixa->ixa_cpid;
275 275 ixm->ixm_conn_id = ixa->ixa_conn_id;
276 276
277 277 if (ixa->ixa_flags & IXAF_IPSEC_SECURE) {
278 278 if (ixa->ixa_ipsec_ah_sa != NULL) {
279 279 ixm->ixm_ipsec_ah_sa = ixa->ixa_ipsec_ah_sa;
280 280 IPSA_REFHOLD(ixa->ixa_ipsec_ah_sa);
281 281 }
282 282 if (ixa->ixa_ipsec_esp_sa != NULL) {
283 283 ixm->ixm_ipsec_esp_sa = ixa->ixa_ipsec_esp_sa;
284 284 IPSA_REFHOLD(ixa->ixa_ipsec_esp_sa);
285 285 }
286 286 if (ixa->ixa_ipsec_policy != NULL) {
287 287 ixm->ixm_ipsec_policy = ixa->ixa_ipsec_policy;
288 288 IPPOL_REFHOLD(ixa->ixa_ipsec_policy);
289 289 }
290 290 if (ixa->ixa_ipsec_action != NULL) {
291 291 ixm->ixm_ipsec_action = ixa->ixa_ipsec_action;
292 292 IPACT_REFHOLD(ixa->ixa_ipsec_action);
293 293 }
294 294 if (ixa->ixa_ipsec_latch != NULL) {
295 295 ixm->ixm_ipsec_latch = ixa->ixa_ipsec_latch;
296 296 IPLATCH_REFHOLD(ixa->ixa_ipsec_latch);
297 297 }
298 298 ixm->ixm_ipsec_ref[0] = ixa->ixa_ipsec_ref[0];
299 299 ixm->ixm_ipsec_ref[1] = ixa->ixa_ipsec_ref[1];
300 300 ixm->ixm_ipsec_src_port = ixa->ixa_ipsec_src_port;
301 301 ixm->ixm_ipsec_dst_port = ixa->ixa_ipsec_dst_port;
302 302 ixm->ixm_ipsec_icmp_type = ixa->ixa_ipsec_icmp_type;
303 303 ixm->ixm_ipsec_icmp_code = ixa->ixa_ipsec_icmp_code;
304 304 ixm->ixm_ipsec_inaf = ixa->ixa_ipsec_inaf;
305 305 ixm->ixm_ipsec_insrc[0] = ixa->ixa_ipsec_insrc[0];
306 306 ixm->ixm_ipsec_insrc[1] = ixa->ixa_ipsec_insrc[1];
307 307 ixm->ixm_ipsec_insrc[2] = ixa->ixa_ipsec_insrc[2];
308 308 ixm->ixm_ipsec_insrc[3] = ixa->ixa_ipsec_insrc[3];
309 309 ixm->ixm_ipsec_indst[0] = ixa->ixa_ipsec_indst[0];
310 310 ixm->ixm_ipsec_indst[1] = ixa->ixa_ipsec_indst[1];
311 311 ixm->ixm_ipsec_indst[2] = ixa->ixa_ipsec_indst[2];
312 312 ixm->ixm_ipsec_indst[3] = ixa->ixa_ipsec_indst[3];
313 313 ixm->ixm_ipsec_insrcpfx = ixa->ixa_ipsec_insrcpfx;
314 314 ixm->ixm_ipsec_indstpfx = ixa->ixa_ipsec_indstpfx;
315 315 ixm->ixm_ipsec_proto = ixa->ixa_ipsec_proto;
316 316 }
317 317 return (ixamp);
318 318 }
319 319
320 320 /*
321 321 * Extract the ip_xmit_attr_t from the mblk, checking that the
322 322 * ip_stack_t, ill_t, and nce_t still exist. Returns B_FALSE if that is
323 323 * not the case.
324 324 *
325 325 * Otherwise ixa is updated.
326 326 * Caller needs to release references on the ixa by calling ixa_refrele()
327 327 * which will imediately call ixa_inactive to release the references.
328 328 */
329 329 boolean_t
330 330 ip_xmit_attr_from_mblk(mblk_t *ixamp, ip_xmit_attr_t *ixa)
331 331 {
332 332 ixamblk_t *ixm;
333 333 netstack_t *ns;
334 334 ip_stack_t *ipst;
335 335 ill_t *ill;
336 336 nce_t *nce;
337 337
338 338 /* We assume the caller hasn't initialized ixa */
339 339 bzero(ixa, sizeof (*ixa));
340 340
341 341 ASSERT(DB_TYPE(ixamp) == M_BREAK);
342 342 ASSERT(ixamp->b_cont == NULL);
343 343
344 344 ixm = (ixamblk_t *)ixamp->b_rptr;
345 345 ASSERT(!ixm->ixm_inbound);
346 346
347 347 /* Verify the netstack is still around */
348 348 ns = netstack_find_by_stackid(ixm->ixm_stackid);
349 349 if (ns == NULL) {
350 350 /* Disappeared on us */
351 351 (void) ip_xmit_attr_free_mblk(ixamp);
352 352 return (B_FALSE);
353 353 }
354 354 ipst = ns->netstack_ip;
355 355
356 356 /* Verify the ill is still around */
357 357 ill = ill_lookup_on_ifindex(ixm->ixm_ifindex,
358 358 !(ixm->ixm_flags & IXAF_IS_IPV4), ipst);
359 359
360 360 /* We have the ill, hence the netstack can't go away */
361 361 netstack_rele(ns);
362 362 if (ill == NULL) {
363 363 /* Disappeared on us */
364 364 (void) ip_xmit_attr_free_mblk(ixamp);
365 365 return (B_FALSE);
366 366 }
367 367 /*
368 368 * Find the nce. We don't load-spread (only lookup nce's on the ill)
369 369 * because we want to find the same nce as the one we had when
370 370 * ip_xmit_attr_to_mblk was called.
371 371 */
372 372 if (ixm->ixm_flags & IXAF_IS_IPV4) {
373 373 nce = nce_lookup_v4(ill, &ixm->ixm_nceaddr_v4);
374 374 } else {
375 375 nce = nce_lookup_v6(ill, &ixm->ixm_nceaddr_v6);
376 376 }
377 377
378 378 /* We have the nce, hence the ill can't go away */
379 379 ill_refrele(ill);
380 380 if (nce == NULL) {
381 381 /*
382 382 * Since this is unusual and we don't know what type of
383 383 * nce it was, we drop the packet.
384 384 */
385 385 (void) ip_xmit_attr_free_mblk(ixamp);
386 386 return (B_FALSE);
387 387 }
388 388
389 389 ixa->ixa_flags = ixm->ixm_flags;
390 390 ixa->ixa_refcnt = 1;
391 391 ixa->ixa_ipst = ipst;
392 392 ixa->ixa_fragsize = ixm->ixm_fragsize;
393 393 ixa->ixa_pktlen = ixm->ixm_pktlen;
394 394 ixa->ixa_ip_hdr_length = ixm->ixm_ip_hdr_length;
395 395 ixa->ixa_protocol = ixm->ixm_protocol;
396 396 ixa->ixa_nce = nce;
397 397 ixa->ixa_postfragfn = ixm->ixm_postfragfn;
398 398 ixa->ixa_zoneid = ixm->ixm_zoneid;
399 399 ixa->ixa_no_loop_zoneid = ixm->ixm_no_loop_zoneid;
400 400 ixa->ixa_scopeid = ixm->ixm_scopeid;
401 401 ixa->ixa_ident = ixm->ixm_ident;
402 402 ixa->ixa_xmit_hint = ixm->ixm_xmit_hint;
403 403
404 404 if (ixm->ixm_tsl != NULL) {
405 405 ixa->ixa_tsl = ixm->ixm_tsl;
406 406 ixa->ixa_free_flags |= IXA_FREE_TSL;
407 407 ixm->ixm_tsl = NULL;
408 408 }
409 409 if (ixm->ixm_cred != NULL) {
410 410 ixa->ixa_cred = ixm->ixm_cred;
411 411 ixa->ixa_free_flags |= IXA_FREE_CRED;
412 412 ixm->ixm_cred = NULL;
413 413 }
414 414 ixa->ixa_cpid = ixm->ixm_cpid;
415 415 ixa->ixa_conn_id = ixm->ixm_conn_id;
416 416
417 417 ixa->ixa_ipsec_ah_sa = ixm->ixm_ipsec_ah_sa;
418 418 ixa->ixa_ipsec_esp_sa = ixm->ixm_ipsec_esp_sa;
419 419 ixa->ixa_ipsec_policy = ixm->ixm_ipsec_policy;
420 420 ixa->ixa_ipsec_action = ixm->ixm_ipsec_action;
421 421 ixa->ixa_ipsec_latch = ixm->ixm_ipsec_latch;
422 422
423 423 ixa->ixa_ipsec_ref[0] = ixm->ixm_ipsec_ref[0];
424 424 ixa->ixa_ipsec_ref[1] = ixm->ixm_ipsec_ref[1];
425 425 ixa->ixa_ipsec_src_port = ixm->ixm_ipsec_src_port;
426 426 ixa->ixa_ipsec_dst_port = ixm->ixm_ipsec_dst_port;
427 427 ixa->ixa_ipsec_icmp_type = ixm->ixm_ipsec_icmp_type;
428 428 ixa->ixa_ipsec_icmp_code = ixm->ixm_ipsec_icmp_code;
429 429 ixa->ixa_ipsec_inaf = ixm->ixm_ipsec_inaf;
430 430 ixa->ixa_ipsec_insrc[0] = ixm->ixm_ipsec_insrc[0];
431 431 ixa->ixa_ipsec_insrc[1] = ixm->ixm_ipsec_insrc[1];
432 432 ixa->ixa_ipsec_insrc[2] = ixm->ixm_ipsec_insrc[2];
433 433 ixa->ixa_ipsec_insrc[3] = ixm->ixm_ipsec_insrc[3];
434 434 ixa->ixa_ipsec_indst[0] = ixm->ixm_ipsec_indst[0];
435 435 ixa->ixa_ipsec_indst[1] = ixm->ixm_ipsec_indst[1];
436 436 ixa->ixa_ipsec_indst[2] = ixm->ixm_ipsec_indst[2];
437 437 ixa->ixa_ipsec_indst[3] = ixm->ixm_ipsec_indst[3];
438 438 ixa->ixa_ipsec_insrcpfx = ixm->ixm_ipsec_insrcpfx;
439 439 ixa->ixa_ipsec_indstpfx = ixm->ixm_ipsec_indstpfx;
440 440 ixa->ixa_ipsec_proto = ixm->ixm_ipsec_proto;
441 441
442 442 freeb(ixamp);
443 443 return (B_TRUE);
444 444 }
445 445
446 446 /*
447 447 * Free the ixm mblk and any references it holds
448 448 * Returns b_cont.
449 449 */
450 450 mblk_t *
451 451 ip_xmit_attr_free_mblk(mblk_t *ixamp)
452 452 {
453 453 ixamblk_t *ixm;
454 454 mblk_t *mp;
455 455
456 456 /* Consume mp */
457 457 ASSERT(DB_TYPE(ixamp) == M_BREAK);
458 458 mp = ixamp->b_cont;
459 459
460 460 ixm = (ixamblk_t *)ixamp->b_rptr;
461 461 ASSERT(!ixm->ixm_inbound);
462 462
463 463 if (ixm->ixm_ipsec_ah_sa != NULL) {
464 464 IPSA_REFRELE(ixm->ixm_ipsec_ah_sa);
465 465 ixm->ixm_ipsec_ah_sa = NULL;
466 466 }
467 467 if (ixm->ixm_ipsec_esp_sa != NULL) {
468 468 IPSA_REFRELE(ixm->ixm_ipsec_esp_sa);
469 469 ixm->ixm_ipsec_esp_sa = NULL;
470 470 }
471 471 if (ixm->ixm_ipsec_policy != NULL) {
472 472 IPPOL_REFRELE(ixm->ixm_ipsec_policy);
473 473 ixm->ixm_ipsec_policy = NULL;
474 474 }
475 475 if (ixm->ixm_ipsec_action != NULL) {
476 476 IPACT_REFRELE(ixm->ixm_ipsec_action);
477 477 ixm->ixm_ipsec_action = NULL;
478 478 }
479 479 if (ixm->ixm_ipsec_latch) {
480 480 IPLATCH_REFRELE(ixm->ixm_ipsec_latch);
481 481 ixm->ixm_ipsec_latch = NULL;
482 482 }
483 483
484 484 if (ixm->ixm_tsl != NULL) {
485 485 label_rele(ixm->ixm_tsl);
486 486 ixm->ixm_tsl = NULL;
487 487 }
488 488 if (ixm->ixm_cred != NULL) {
489 489 crfree(ixm->ixm_cred);
490 490 ixm->ixm_cred = NULL;
491 491 }
492 492 freeb(ixamp);
493 493 return (mp);
494 494 }
495 495
496 496 /*
497 497 * Take the information in ip_recv_attr_t and stick it in an mblk
498 498 * that can later be passed to ip_recv_attr_from_mblk to recreate the
499 499 * ip_recv_attr_t.
500 500 *
501 501 * Returns NULL on memory allocation failure.
502 502 */
503 503 mblk_t *
504 504 ip_recv_attr_to_mblk(ip_recv_attr_t *ira)
505 505 {
506 506 mblk_t *iramp;
507 507 iramblk_t *irm;
508 508 ill_t *ill = ira->ira_ill;
509 509
510 510 ASSERT(ira->ira_ill != NULL || ira->ira_ruifindex != 0);
511 511
512 512 iramp = allocb(sizeof (*irm), BPRI_MED);
513 513 if (iramp == NULL)
514 514 return (NULL);
515 515
516 516 iramp->b_datap->db_type = M_BREAK;
517 517 iramp->b_wptr += sizeof (*irm);
518 518 irm = (iramblk_t *)iramp->b_rptr;
519 519
520 520 bzero(irm, sizeof (*irm));
521 521 irm->irm_inbound = B_TRUE;
522 522 irm->irm_flags = ira->ira_flags;
523 523 if (ill != NULL) {
524 524 /* Internal to IP - preserve ip_stack_t, ill and rill */
525 525 irm->irm_stackid =
526 526 ill->ill_ipst->ips_netstack->netstack_stackid;
527 527 irm->irm_ifindex = ira->ira_ill->ill_phyint->phyint_ifindex;
528 528 ASSERT(ira->ira_rill->ill_phyint->phyint_ifindex ==
529 529 ira->ira_rifindex);
530 530 } else {
531 531 /* Let ip_recv_attr_from_stackid know there isn't one */
532 532 irm->irm_stackid = -1;
533 533 }
534 534 irm->irm_rifindex = ira->ira_rifindex;
535 535 irm->irm_ruifindex = ira->ira_ruifindex;
536 536 irm->irm_pktlen = ira->ira_pktlen;
537 537 irm->irm_ip_hdr_length = ira->ira_ip_hdr_length;
538 538 irm->irm_protocol = ira->ira_protocol;
539 539
540 540 irm->irm_sqp = ira->ira_sqp;
541 541 irm->irm_ring = ira->ira_ring;
542 542
543 543 irm->irm_zoneid = ira->ira_zoneid;
544 544 irm->irm_mroute_tunnel = ira->ira_mroute_tunnel;
545 545 irm->irm_no_loop_zoneid = ira->ira_no_loop_zoneid;
546 546 irm->irm_esp_udp_ports = ira->ira_esp_udp_ports;
547 547
548 548 if (ira->ira_tsl != NULL) {
549 549 irm->irm_tsl = ira->ira_tsl;
550 550 label_hold(irm->irm_tsl);
551 551 }
552 552 if (ira->ira_cred != NULL) {
553 553 irm->irm_cred = ira->ira_cred;
554 554 crhold(ira->ira_cred);
555 555 }
556 556 irm->irm_cpid = ira->ira_cpid;
557 557
558 558 if (ira->ira_flags & IRAF_L2SRC_SET)
559 559 bcopy(ira->ira_l2src, irm->irm_l2src, IRA_L2SRC_SIZE);
560 560
561 561 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
562 562 if (ira->ira_ipsec_ah_sa != NULL) {
563 563 irm->irm_ipsec_ah_sa = ira->ira_ipsec_ah_sa;
564 564 IPSA_REFHOLD(ira->ira_ipsec_ah_sa);
565 565 }
566 566 if (ira->ira_ipsec_esp_sa != NULL) {
567 567 irm->irm_ipsec_esp_sa = ira->ira_ipsec_esp_sa;
568 568 IPSA_REFHOLD(ira->ira_ipsec_esp_sa);
569 569 }
570 570 if (ira->ira_ipsec_action != NULL) {
571 571 irm->irm_ipsec_action = ira->ira_ipsec_action;
572 572 IPACT_REFHOLD(ira->ira_ipsec_action);
573 573 }
574 574 }
575 575 return (iramp);
576 576 }
577 577
578 578 /*
579 579 * Extract the ip_recv_attr_t from the mblk. If we are used inside IP
580 580 * then irm_stackid is not -1, in which case we check that the
581 581 * ip_stack_t and ill_t still exist. Returns B_FALSE if that is
582 582 * not the case.
583 583 * If irm_stackid is zero then we are used by an ULP (e.g., squeue_enter)
584 584 * and we just proceed with ira_ill and ira_rill as NULL.
585 585 *
586 586 * The caller needs to release any references on the pointers inside the ire
587 587 * by calling ira_cleanup.
588 588 */
589 589 boolean_t
590 590 ip_recv_attr_from_mblk(mblk_t *iramp, ip_recv_attr_t *ira)
591 591 {
592 592 iramblk_t *irm;
593 593 netstack_t *ns;
594 594 ip_stack_t *ipst = NULL;
595 595 ill_t *ill = NULL, *rill = NULL;
596 596
597 597 /* We assume the caller hasn't initialized ira */
598 598 bzero(ira, sizeof (*ira));
599 599
600 600 ASSERT(DB_TYPE(iramp) == M_BREAK);
601 601 ASSERT(iramp->b_cont == NULL);
602 602
603 603 irm = (iramblk_t *)iramp->b_rptr;
604 604 ASSERT(irm->irm_inbound);
605 605
606 606 if (irm->irm_stackid != -1) {
607 607 /* Verify the netstack is still around */
608 608 ns = netstack_find_by_stackid(irm->irm_stackid);
609 609 if (ns == NULL) {
610 610 /* Disappeared on us */
611 611 (void) ip_recv_attr_free_mblk(iramp);
612 612 return (B_FALSE);
613 613 }
614 614 ipst = ns->netstack_ip;
615 615
616 616 /* Verify the ill is still around */
617 617 ill = ill_lookup_on_ifindex(irm->irm_ifindex,
618 618 !(irm->irm_flags & IRAF_IS_IPV4), ipst);
619 619
620 620 if (irm->irm_ifindex == irm->irm_rifindex) {
621 621 rill = ill;
622 622 } else {
623 623 rill = ill_lookup_on_ifindex(irm->irm_rifindex,
624 624 !(irm->irm_flags & IRAF_IS_IPV4), ipst);
625 625 }
626 626
627 627 /* We have the ill, hence the netstack can't go away */
628 628 netstack_rele(ns);
629 629 if (ill == NULL || rill == NULL) {
630 630 /* Disappeared on us */
631 631 if (ill != NULL)
632 632 ill_refrele(ill);
633 633 if (rill != NULL && rill != ill)
634 634 ill_refrele(rill);
635 635 (void) ip_recv_attr_free_mblk(iramp);
636 636 return (B_FALSE);
637 637 }
638 638 }
639 639
640 640 ira->ira_flags = irm->irm_flags;
641 641 /* Caller must ill_refele(ira_ill) by using ira_cleanup() */
642 642 ira->ira_ill = ill;
643 643 ira->ira_rill = rill;
644 644
645 645 ira->ira_rifindex = irm->irm_rifindex;
646 646 ira->ira_ruifindex = irm->irm_ruifindex;
647 647 ira->ira_pktlen = irm->irm_pktlen;
648 648 ira->ira_ip_hdr_length = irm->irm_ip_hdr_length;
649 649 ira->ira_protocol = irm->irm_protocol;
650 650
651 651 ira->ira_sqp = irm->irm_sqp;
652 652 /* The rest of IP assumes that the rings never go away. */
653 653 ira->ira_ring = irm->irm_ring;
654 654
655 655 ira->ira_zoneid = irm->irm_zoneid;
656 656 ira->ira_mroute_tunnel = irm->irm_mroute_tunnel;
657 657 ira->ira_no_loop_zoneid = irm->irm_no_loop_zoneid;
658 658 ira->ira_esp_udp_ports = irm->irm_esp_udp_ports;
659 659
660 660 if (irm->irm_tsl != NULL) {
661 661 ira->ira_tsl = irm->irm_tsl;
662 662 ira->ira_free_flags |= IRA_FREE_TSL;
663 663 irm->irm_tsl = NULL;
664 664 }
665 665 if (irm->irm_cred != NULL) {
666 666 ira->ira_cred = irm->irm_cred;
667 667 ira->ira_free_flags |= IRA_FREE_CRED;
668 668 irm->irm_cred = NULL;
669 669 }
670 670 ira->ira_cpid = irm->irm_cpid;
671 671
672 672 if (ira->ira_flags & IRAF_L2SRC_SET)
673 673 bcopy(irm->irm_l2src, ira->ira_l2src, IRA_L2SRC_SIZE);
674 674
675 675 ira->ira_ipsec_ah_sa = irm->irm_ipsec_ah_sa;
676 676 ira->ira_ipsec_esp_sa = irm->irm_ipsec_esp_sa;
677 677 ira->ira_ipsec_action = irm->irm_ipsec_action;
678 678
679 679 freeb(iramp);
680 680 return (B_TRUE);
681 681 }
682 682
683 683 /*
684 684 * Free the irm mblk and any references it holds
685 685 * Returns b_cont.
686 686 */
687 687 mblk_t *
688 688 ip_recv_attr_free_mblk(mblk_t *iramp)
689 689 {
690 690 iramblk_t *irm;
691 691 mblk_t *mp;
692 692
693 693 /* Consume mp */
694 694 ASSERT(DB_TYPE(iramp) == M_BREAK);
695 695 mp = iramp->b_cont;
696 696
697 697 irm = (iramblk_t *)iramp->b_rptr;
698 698 ASSERT(irm->irm_inbound);
699 699
700 700 if (irm->irm_ipsec_ah_sa != NULL) {
701 701 IPSA_REFRELE(irm->irm_ipsec_ah_sa);
702 702 irm->irm_ipsec_ah_sa = NULL;
703 703 }
704 704 if (irm->irm_ipsec_esp_sa != NULL) {
705 705 IPSA_REFRELE(irm->irm_ipsec_esp_sa);
706 706 irm->irm_ipsec_esp_sa = NULL;
707 707 }
708 708 if (irm->irm_ipsec_action != NULL) {
709 709 IPACT_REFRELE(irm->irm_ipsec_action);
710 710 irm->irm_ipsec_action = NULL;
711 711 }
712 712 if (irm->irm_tsl != NULL) {
713 713 label_rele(irm->irm_tsl);
714 714 irm->irm_tsl = NULL;
715 715 }
716 716 if (irm->irm_cred != NULL) {
717 717 crfree(irm->irm_cred);
718 718 irm->irm_cred = NULL;
719 719 }
720 720
721 721 freeb(iramp);
722 722 return (mp);
723 723 }
724 724
725 725 /*
726 726 * Returns true if the mblk contains an ip_recv_attr_t
727 727 * For now we just check db_type.
728 728 */
729 729 boolean_t
730 730 ip_recv_attr_is_mblk(mblk_t *mp)
731 731 {
732 732 /*
733 733 * Need to handle the various forms of tcp_timermp which are tagged
734 734 * with b_wptr and might have a NULL b_datap.
735 735 */
736 736 if (mp->b_wptr == NULL || mp->b_wptr == (uchar_t *)-1)
737 737 return (B_FALSE);
738 738
739 739 #ifdef DEBUG
740 740 iramblk_t *irm;
741 741
742 742 if (DB_TYPE(mp) != M_BREAK)
743 743 return (B_FALSE);
744 744
745 745 irm = (iramblk_t *)mp->b_rptr;
746 746 ASSERT(irm->irm_inbound);
747 747 return (B_TRUE);
748 748 #else
749 749 return (DB_TYPE(mp) == M_BREAK);
750 750 #endif
751 751 }
752 752
753 753 static ip_xmit_attr_t *
754 754 conn_get_ixa_impl(conn_t *connp, boolean_t replace, int kmflag)
755 755 {
756 756 ip_xmit_attr_t *oldixa; /* Already attached to conn_t */
757 757 ip_xmit_attr_t *ixa; /* New one, which we return. */
758 758
759 759 /*
760 760 * NOTE: If the marked-below common case isn't, move the
761 761 * kmem_alloc() up here and put a free in what was marked as the
762 762 * (not really) common case instead.
763 763 */
764 764
765 765 mutex_enter(&connp->conn_lock);
766 766 oldixa = connp->conn_ixa;
767 767
768 768 /* At least one reference for the conn_t */
769 769 ASSERT3U(oldixa->ixa_refcnt, >=, 1);
770 770 if (atomic_inc_32_nv(&oldixa->ixa_refcnt) == 2) {
771 771 /* No other thread using conn_ixa (common case) */
772 772 mutex_exit(&connp->conn_lock);
773 773 return (oldixa);
774 774 }
775 775 /* Do allocation inside-the-conn_lock because it's less common. */
776 776 ixa = kmem_alloc(sizeof (*ixa), kmflag);
777 777 if (ixa == NULL) {
778 778 mutex_exit(&connp->conn_lock);
779 779 IXA_REFRELE(oldixa);
780 780 return (NULL);
781 781 }
782 782 ixa_safe_copy(oldixa, ixa);
783 783
784 784 /* Make sure we drop conn_lock before any refrele */
785 785 if (replace) {
786 786 ixa->ixa_refcnt++; /* No atomic needed - not visible */
787 787 connp->conn_ixa = ixa;
788 788 mutex_exit(&connp->conn_lock);
789 789 IXA_REFRELE(oldixa); /* Undo refcnt from conn_t */
790 790 } else {
791 791 mutex_exit(&connp->conn_lock);
792 792 }
793 793 IXA_REFRELE(oldixa); /* Undo above atomic_add_32_nv */
794 794
795 795 return (ixa);
796 796 }
797 797
798 798 /*
799 799 * Return an ip_xmit_attr_t to use with a conn_t that ensures that only
800 800 * the caller can access the ip_xmit_attr_t.
801 801 *
802 802 * If nobody else is using conn_ixa we return it.
803 803 * Otherwise we make a "safe" copy of conn_ixa
804 804 * and return it. The "safe" copy has the pointers set to NULL
805 805 * (since the pointers might be changed by another thread using
806 806 * conn_ixa). The caller needs to check for NULL pointers to see
807 807 * if ip_set_destination needs to be called to re-establish the pointers.
808 808 *
809 809 * If 'replace' is set then we replace conn_ixa with the new ip_xmit_attr_t.
810 810 * That is used when we connect() the ULP.
811 811 */
812 812 ip_xmit_attr_t *
813 813 conn_get_ixa(conn_t *connp, boolean_t replace)
814 814 {
815 815 return (conn_get_ixa_impl(connp, replace, KM_NOSLEEP));
816 816 }
817 817
818 818 /*
819 819 * Used only when the option is to have the kernel hang due to not
820 820 * cleaning up ixa references on ills etc.
821 821 */
822 822 ip_xmit_attr_t *
823 823 conn_get_ixa_tryhard(conn_t *connp, boolean_t replace)
824 824 {
825 825 return (conn_get_ixa_impl(connp, replace, KM_SLEEP));
826 826 }
827 827
828 828 /*
829 829 * Replace conn_ixa with the ixa argument.
830 830 *
831 831 * The caller must hold conn_lock.
832 832 *
833 833 * We return the old ixa; the caller must ixa_refrele that after conn_lock
834 834 * has been dropped.
835 835 */
836 836 ip_xmit_attr_t *
837 837 conn_replace_ixa(conn_t *connp, ip_xmit_attr_t *ixa)
838 838 {
839 839 ip_xmit_attr_t *oldixa;
840 840
841 841 ASSERT(MUTEX_HELD(&connp->conn_lock));
842 842
843 843 oldixa = connp->conn_ixa;
844 844 IXA_REFHOLD(ixa);
845 845 ixa->ixa_conn_id = oldixa->ixa_conn_id;
846 846 connp->conn_ixa = ixa;
847 847 return (oldixa);
848 848 }
849 849
850 850 /*
851 851 * Return a ip_xmit_attr_t to use with a conn_t that is based on but
852 852 * separate from conn_ixa.
853 853 *
854 854 * This "safe" copy has the pointers set to NULL
|
↓ open down ↓ |
854 lines elided |
↑ open up ↑ |
855 855 * (since the pointers might be changed by another thread using
856 856 * conn_ixa). The caller needs to check for NULL pointers to see
857 857 * if ip_set_destination needs to be called to re-establish the pointers.
858 858 */
859 859 ip_xmit_attr_t *
860 860 conn_get_ixa_exclusive(conn_t *connp)
861 861 {
862 862 ip_xmit_attr_t *oldixa;
863 863 ip_xmit_attr_t *ixa;
864 864
865 - ixa = kmem_alloc(sizeof (*ixa), KM_NOSLEEP | KM_NORMALPRI);
865 + ixa = kmem_alloc(sizeof (*ixa), KM_NOSLEEP_LAZY);
866 866 if (ixa == NULL)
867 867 return (NULL);
868 868
869 869 mutex_enter(&connp->conn_lock);
870 870
871 871 oldixa = connp->conn_ixa;
872 872 IXA_REFHOLD(oldixa);
873 873
874 874 ixa_safe_copy(oldixa, ixa);
875 875 mutex_exit(&connp->conn_lock);
876 876 IXA_REFRELE(oldixa);
877 877 return (ixa);
878 878 }
879 879
880 880 void
881 881 ixa_safe_copy(ip_xmit_attr_t *src, ip_xmit_attr_t *ixa)
882 882 {
883 883 bcopy(src, ixa, sizeof (*ixa));
884 884 ixa->ixa_refcnt = 1;
885 885 /*
886 886 * Clear any pointers that have references and might be changed
887 887 * by ip_set_destination or the ULP
888 888 */
889 889 ixa->ixa_ire = NULL;
890 890 ixa->ixa_nce = NULL;
891 891 ixa->ixa_dce = NULL;
892 892 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
893 893 ixa->ixa_dce_generation = DCE_GENERATION_VERIFY;
894 894 #ifdef DEBUG
895 895 ixa->ixa_curthread = NULL;
896 896 #endif
897 897 /* Clear all the IPsec pointers and the flag as well. */
898 898 ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
899 899
900 900 ixa->ixa_ipsec_latch = NULL;
901 901 ixa->ixa_ipsec_ah_sa = NULL;
902 902 ixa->ixa_ipsec_esp_sa = NULL;
903 903 ixa->ixa_ipsec_policy = NULL;
904 904 ixa->ixa_ipsec_action = NULL;
905 905
906 906 /*
907 907 * We leave ixa_tsl unchanged, but if it has a refhold we need
908 908 * to get an extra refhold.
909 909 */
910 910 if (ixa->ixa_free_flags & IXA_FREE_TSL)
911 911 label_hold(ixa->ixa_tsl);
912 912
913 913 /*
914 914 * We leave ixa_cred unchanged, but if it has a refhold we need
915 915 * to get an extra refhold.
916 916 */
917 917 if (ixa->ixa_free_flags & IXA_FREE_CRED)
918 918 crhold(ixa->ixa_cred);
919 919
920 920 /*
921 921 * There is no cleanup in progress on this new copy.
922 922 */
923 923 ixa->ixa_tcpcleanup = IXATC_IDLE;
924 924 }
925 925
926 926 /*
927 927 * Duplicate an ip_xmit_attr_t.
928 928 * Assumes that the caller controls the ixa, hence we do not need to use
929 929 * a safe copy. We just have to increase the refcnt on any pointers.
930 930 */
931 931 ip_xmit_attr_t *
932 932 ip_xmit_attr_duplicate(ip_xmit_attr_t *src_ixa)
933 933 {
934 934 ip_xmit_attr_t *ixa;
935 935
936 936 ixa = kmem_alloc(sizeof (*ixa), KM_NOSLEEP);
937 937 if (ixa == NULL)
938 938 return (NULL);
939 939 bcopy(src_ixa, ixa, sizeof (*ixa));
940 940 ixa->ixa_refcnt = 1;
941 941
942 942 if (ixa->ixa_ire != NULL)
943 943 ire_refhold_notr(ixa->ixa_ire);
944 944 if (ixa->ixa_nce != NULL)
945 945 nce_refhold(ixa->ixa_nce);
946 946 if (ixa->ixa_dce != NULL)
947 947 dce_refhold_notr(ixa->ixa_dce);
948 948
949 949 #ifdef DEBUG
950 950 ixa->ixa_curthread = NULL;
951 951 #endif
952 952
953 953 if (ixa->ixa_ipsec_latch != NULL)
954 954 IPLATCH_REFHOLD(ixa->ixa_ipsec_latch);
955 955 if (ixa->ixa_ipsec_ah_sa != NULL)
956 956 IPSA_REFHOLD(ixa->ixa_ipsec_ah_sa);
957 957 if (ixa->ixa_ipsec_esp_sa != NULL)
958 958 IPSA_REFHOLD(ixa->ixa_ipsec_esp_sa);
959 959 if (ixa->ixa_ipsec_policy != NULL)
960 960 IPPOL_REFHOLD(ixa->ixa_ipsec_policy);
961 961 if (ixa->ixa_ipsec_action != NULL)
962 962 IPACT_REFHOLD(ixa->ixa_ipsec_action);
963 963
964 964 if (ixa->ixa_tsl != NULL) {
965 965 label_hold(ixa->ixa_tsl);
966 966 ixa->ixa_free_flags |= IXA_FREE_TSL;
967 967 }
968 968 if (ixa->ixa_cred != NULL) {
969 969 crhold(ixa->ixa_cred);
970 970 ixa->ixa_free_flags |= IXA_FREE_CRED;
971 971 }
972 972 return (ixa);
973 973 }
974 974
975 975 /*
976 976 * Used to replace the ixa_label field.
977 977 * The caller should have a reference on the label, which we transfer to
978 978 * the attributes so that when the attribute is freed/cleaned up
979 979 * we will release that reference.
980 980 */
981 981 void
982 982 ip_xmit_attr_replace_tsl(ip_xmit_attr_t *ixa, ts_label_t *tsl)
983 983 {
984 984 ASSERT(tsl != NULL);
985 985
986 986 if (ixa->ixa_free_flags & IXA_FREE_TSL) {
987 987 ASSERT(ixa->ixa_tsl != NULL);
988 988 label_rele(ixa->ixa_tsl);
989 989 } else {
990 990 ixa->ixa_free_flags |= IXA_FREE_TSL;
991 991 }
992 992 ixa->ixa_tsl = tsl;
993 993 }
994 994
995 995 /*
996 996 * Replace the ip_recv_attr_t's label.
997 997 * Due to kernel RPC's use of db_credp we also need to replace ira_cred;
998 998 * TCP/UDP uses ira_cred to set db_credp for non-socket users.
999 999 * This can fail (and return B_FALSE) due to lack of memory.
1000 1000 */
1001 1001 boolean_t
1002 1002 ip_recv_attr_replace_label(ip_recv_attr_t *ira, ts_label_t *tsl)
1003 1003 {
1004 1004 cred_t *newcr;
1005 1005
1006 1006 if (ira->ira_free_flags & IRA_FREE_TSL) {
1007 1007 ASSERT(ira->ira_tsl != NULL);
1008 1008 label_rele(ira->ira_tsl);
1009 1009 }
1010 1010 label_hold(tsl);
1011 1011 ira->ira_tsl = tsl;
1012 1012 ira->ira_free_flags |= IRA_FREE_TSL;
1013 1013
1014 1014 /*
1015 1015 * Reset zoneid if we have a shared address. That allows
1016 1016 * ip_fanout_tx_v4/v6 to determine the zoneid again.
1017 1017 */
1018 1018 if (ira->ira_flags & IRAF_TX_SHARED_ADDR)
1019 1019 ira->ira_zoneid = ALL_ZONES;
1020 1020
1021 1021 /* We update ira_cred for RPC */
1022 1022 newcr = copycred_from_tslabel(ira->ira_cred, ira->ira_tsl, KM_NOSLEEP);
1023 1023 if (newcr == NULL)
1024 1024 return (B_FALSE);
1025 1025 if (ira->ira_free_flags & IRA_FREE_CRED)
1026 1026 crfree(ira->ira_cred);
1027 1027 ira->ira_cred = newcr;
1028 1028 ira->ira_free_flags |= IRA_FREE_CRED;
1029 1029 return (B_TRUE);
1030 1030 }
1031 1031
1032 1032 /*
1033 1033 * This needs to be called after ip_set_destination/tsol_check_dest might
1034 1034 * have changed ixa_tsl to be specific for a destination, and we now want to
1035 1035 * send to a different destination.
1036 1036 * We have to restart with crgetlabel() since ip_set_destination/
1037 1037 * tsol_check_dest will start with ixa_tsl.
1038 1038 */
1039 1039 void
1040 1040 ip_xmit_attr_restore_tsl(ip_xmit_attr_t *ixa, cred_t *cr)
1041 1041 {
1042 1042 if (!is_system_labeled())
1043 1043 return;
1044 1044
1045 1045 if (ixa->ixa_free_flags & IXA_FREE_TSL) {
1046 1046 ASSERT(ixa->ixa_tsl != NULL);
1047 1047 label_rele(ixa->ixa_tsl);
1048 1048 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
1049 1049 }
1050 1050 ixa->ixa_tsl = crgetlabel(cr);
1051 1051 }
1052 1052
1053 1053 void
1054 1054 ixa_refrele(ip_xmit_attr_t *ixa)
1055 1055 {
1056 1056 IXA_REFRELE(ixa);
1057 1057 }
1058 1058
1059 1059 void
1060 1060 ixa_inactive(ip_xmit_attr_t *ixa)
1061 1061 {
1062 1062 ASSERT(ixa->ixa_refcnt == 0);
1063 1063
1064 1064 ixa_cleanup(ixa);
1065 1065 kmem_free(ixa, sizeof (*ixa));
1066 1066 }
1067 1067
1068 1068 /*
1069 1069 * Release any references contained in the ixa.
1070 1070 * Also clear any fields that are not controlled by ixa_flags.
1071 1071 */
1072 1072 void
1073 1073 ixa_cleanup(ip_xmit_attr_t *ixa)
1074 1074 {
1075 1075 if (ixa->ixa_ire != NULL) {
1076 1076 ire_refrele_notr(ixa->ixa_ire);
1077 1077 ixa->ixa_ire = NULL;
1078 1078 }
1079 1079 if (ixa->ixa_dce != NULL) {
1080 1080 dce_refrele_notr(ixa->ixa_dce);
1081 1081 ixa->ixa_dce = NULL;
1082 1082 }
1083 1083 if (ixa->ixa_nce != NULL) {
1084 1084 nce_refrele(ixa->ixa_nce);
1085 1085 ixa->ixa_nce = NULL;
1086 1086 }
1087 1087 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
1088 1088 ixa->ixa_dce_generation = DCE_GENERATION_VERIFY;
1089 1089 if (ixa->ixa_flags & IXAF_IPSEC_SECURE) {
1090 1090 ipsec_out_release_refs(ixa);
1091 1091 }
1092 1092 if (ixa->ixa_free_flags & IXA_FREE_TSL) {
1093 1093 ASSERT(ixa->ixa_tsl != NULL);
1094 1094 label_rele(ixa->ixa_tsl);
1095 1095 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
1096 1096 }
1097 1097 ixa->ixa_tsl = NULL;
1098 1098 if (ixa->ixa_free_flags & IXA_FREE_CRED) {
1099 1099 ASSERT(ixa->ixa_cred != NULL);
1100 1100 crfree(ixa->ixa_cred);
1101 1101 ixa->ixa_free_flags &= ~IXA_FREE_CRED;
1102 1102 }
1103 1103 ixa->ixa_cred = NULL;
1104 1104 ixa->ixa_src_preferences = 0;
1105 1105 ixa->ixa_ifindex = 0;
1106 1106 ixa->ixa_multicast_ifindex = 0;
1107 1107 ixa->ixa_multicast_ifaddr = INADDR_ANY;
1108 1108 }
1109 1109
1110 1110 /*
1111 1111 * Release any references contained in the ira.
1112 1112 * Callers which use ip_recv_attr_from_mblk() would pass B_TRUE as the second
1113 1113 * argument.
1114 1114 */
1115 1115 void
1116 1116 ira_cleanup(ip_recv_attr_t *ira, boolean_t refrele_ill)
1117 1117 {
1118 1118 if (ira->ira_ill != NULL) {
1119 1119 if (ira->ira_rill != ira->ira_ill) {
1120 1120 /* Caused by async processing */
1121 1121 ill_refrele(ira->ira_rill);
1122 1122 }
1123 1123 if (refrele_ill)
1124 1124 ill_refrele(ira->ira_ill);
1125 1125 }
1126 1126 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
1127 1127 ipsec_in_release_refs(ira);
1128 1128 }
1129 1129 if (ira->ira_free_flags & IRA_FREE_TSL) {
1130 1130 ASSERT(ira->ira_tsl != NULL);
1131 1131 label_rele(ira->ira_tsl);
1132 1132 ira->ira_free_flags &= ~IRA_FREE_TSL;
1133 1133 }
1134 1134 ira->ira_tsl = NULL;
1135 1135 if (ira->ira_free_flags & IRA_FREE_CRED) {
1136 1136 ASSERT(ira->ira_cred != NULL);
1137 1137 crfree(ira->ira_cred);
1138 1138 ira->ira_free_flags &= ~IRA_FREE_CRED;
1139 1139 }
1140 1140 ira->ira_cred = NULL;
1141 1141 }
1142 1142
1143 1143 /*
1144 1144 * Function to help release any IRE, NCE, or DCEs that
1145 1145 * have been deleted and are marked as condemned.
1146 1146 * The caller is responsible for any serialization which is different
1147 1147 * for TCP, SCTP, and others.
1148 1148 */
1149 1149 static void
1150 1150 ixa_cleanup_stale(ip_xmit_attr_t *ixa)
1151 1151 {
1152 1152 ire_t *ire;
1153 1153 nce_t *nce;
1154 1154 dce_t *dce;
1155 1155
1156 1156 ire = ixa->ixa_ire;
1157 1157 nce = ixa->ixa_nce;
1158 1158 dce = ixa->ixa_dce;
1159 1159
1160 1160 if (ire != NULL && IRE_IS_CONDEMNED(ire)) {
1161 1161 ire_refrele_notr(ire);
1162 1162 ire = ire_blackhole(ixa->ixa_ipst,
1163 1163 !(ixa->ixa_flags & IXAF_IS_IPV4));
1164 1164 ASSERT(ire != NULL);
1165 1165 #ifdef DEBUG
1166 1166 ire_refhold_notr(ire);
1167 1167 ire_refrele(ire);
1168 1168 #endif
1169 1169 ixa->ixa_ire = ire;
1170 1170 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
1171 1171 }
1172 1172 if (nce != NULL && nce->nce_is_condemned) {
1173 1173 /* Can make it NULL as long as we set IRE_GENERATION_VERIFY */
1174 1174 nce_refrele(nce);
1175 1175 ixa->ixa_nce = NULL;
1176 1176 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
1177 1177 }
1178 1178 if (dce != NULL && DCE_IS_CONDEMNED(dce)) {
1179 1179 dce_refrele_notr(dce);
1180 1180 dce = dce_get_default(ixa->ixa_ipst);
1181 1181 ASSERT(dce != NULL);
1182 1182 #ifdef DEBUG
1183 1183 dce_refhold_notr(dce);
1184 1184 dce_refrele(dce);
1185 1185 #endif
1186 1186 ixa->ixa_dce = dce;
1187 1187 ixa->ixa_dce_generation = DCE_GENERATION_VERIFY;
1188 1188 }
1189 1189 }
1190 1190
1191 1191 static mblk_t *
1192 1192 tcp_ixa_cleanup_getmblk(conn_t *connp)
1193 1193 {
1194 1194 tcp_stack_t *tcps = connp->conn_netstack->netstack_tcp;
1195 1195 int need_retry;
1196 1196 mblk_t *mp;
1197 1197
1198 1198 mutex_enter(&tcps->tcps_ixa_cleanup_lock);
1199 1199
1200 1200 /*
1201 1201 * It's possible that someone else came in and started cleaning up
1202 1202 * another connection between the time we verified this one is not being
1203 1203 * cleaned up and the time we actually get the shared mblk. If that's
1204 1204 * the case, we've dropped the lock, and some other thread may have
1205 1205 * cleaned up this connection again, and is still waiting for
1206 1206 * notification of that cleanup's completion. Therefore we need to
1207 1207 * recheck.
1208 1208 */
1209 1209 do {
1210 1210 need_retry = 0;
1211 1211 while (connp->conn_ixa->ixa_tcpcleanup != IXATC_IDLE) {
1212 1212 cv_wait(&tcps->tcps_ixa_cleanup_done_cv,
1213 1213 &tcps->tcps_ixa_cleanup_lock);
1214 1214 }
1215 1215
1216 1216 while ((mp = tcps->tcps_ixa_cleanup_mp) == NULL) {
1217 1217 /*
1218 1218 * Multiple concurrent cleanups; need to have the last
1219 1219 * one run since it could be an unplumb.
1220 1220 */
1221 1221 need_retry = 1;
1222 1222 cv_wait(&tcps->tcps_ixa_cleanup_ready_cv,
1223 1223 &tcps->tcps_ixa_cleanup_lock);
1224 1224 }
1225 1225 } while (need_retry);
1226 1226
1227 1227 /*
1228 1228 * We now have the lock and the mblk; now make sure that no one else can
1229 1229 * try to clean up this connection or enqueue it for cleanup, clear the
1230 1230 * mblk pointer for this stack, drop the lock, and return the mblk.
1231 1231 */
1232 1232 ASSERT(MUTEX_HELD(&tcps->tcps_ixa_cleanup_lock));
1233 1233 ASSERT(connp->conn_ixa->ixa_tcpcleanup == IXATC_IDLE);
1234 1234 ASSERT(tcps->tcps_ixa_cleanup_mp == mp);
1235 1235 ASSERT(mp != NULL);
1236 1236
1237 1237 connp->conn_ixa->ixa_tcpcleanup = IXATC_INPROGRESS;
1238 1238 tcps->tcps_ixa_cleanup_mp = NULL;
1239 1239 mutex_exit(&tcps->tcps_ixa_cleanup_lock);
1240 1240
1241 1241 return (mp);
1242 1242 }
1243 1243
1244 1244 /*
1245 1245 * Used to run ixa_cleanup_stale inside the tcp squeue.
1246 1246 * When done we hand the mp back by assigning it to tcps_ixa_cleanup_mp
1247 1247 * and waking up the caller.
1248 1248 */
1249 1249 /* ARGSUSED2 */
1250 1250 static void
1251 1251 tcp_ixa_cleanup(void *arg, mblk_t *mp, void *arg2,
1252 1252 ip_recv_attr_t *dummy)
1253 1253 {
1254 1254 conn_t *connp = (conn_t *)arg;
1255 1255 tcp_stack_t *tcps;
1256 1256
1257 1257 tcps = connp->conn_netstack->netstack_tcp;
1258 1258
1259 1259 ixa_cleanup_stale(connp->conn_ixa);
1260 1260
1261 1261 mutex_enter(&tcps->tcps_ixa_cleanup_lock);
1262 1262 ASSERT(tcps->tcps_ixa_cleanup_mp == NULL);
1263 1263 connp->conn_ixa->ixa_tcpcleanup = IXATC_COMPLETE;
1264 1264 tcps->tcps_ixa_cleanup_mp = mp;
1265 1265 cv_signal(&tcps->tcps_ixa_cleanup_ready_cv);
1266 1266 /*
1267 1267 * It is possible for any number of threads to be waiting for cleanup of
1268 1268 * different connections. Absent a per-connection (or per-IXA) CV, we
1269 1269 * need to wake them all up even though only one can be waiting on this
1270 1270 * particular cleanup.
1271 1271 */
1272 1272 cv_broadcast(&tcps->tcps_ixa_cleanup_done_cv);
1273 1273 mutex_exit(&tcps->tcps_ixa_cleanup_lock);
1274 1274 }
1275 1275
1276 1276 static void
1277 1277 tcp_ixa_cleanup_wait_and_finish(conn_t *connp)
1278 1278 {
1279 1279 tcp_stack_t *tcps = connp->conn_netstack->netstack_tcp;
1280 1280
1281 1281 mutex_enter(&tcps->tcps_ixa_cleanup_lock);
1282 1282
1283 1283 ASSERT(connp->conn_ixa->ixa_tcpcleanup != IXATC_IDLE);
1284 1284
1285 1285 while (connp->conn_ixa->ixa_tcpcleanup == IXATC_INPROGRESS) {
1286 1286 cv_wait(&tcps->tcps_ixa_cleanup_done_cv,
1287 1287 &tcps->tcps_ixa_cleanup_lock);
1288 1288 }
1289 1289
1290 1290 ASSERT(connp->conn_ixa->ixa_tcpcleanup == IXATC_COMPLETE);
1291 1291 connp->conn_ixa->ixa_tcpcleanup = IXATC_IDLE;
1292 1292 cv_broadcast(&tcps->tcps_ixa_cleanup_done_cv);
1293 1293
1294 1294 mutex_exit(&tcps->tcps_ixa_cleanup_lock);
1295 1295 }
1296 1296
1297 1297 /*
1298 1298 * ipcl_walk() function to help release any IRE, NCE, or DCEs that
1299 1299 * have been deleted and are marked as condemned.
1300 1300 * Note that we can't cleanup the pointers since there can be threads
1301 1301 * in conn_ip_output() sending while we are called.
1302 1302 */
1303 1303 void
1304 1304 conn_ixa_cleanup(conn_t *connp, void *arg)
1305 1305 {
1306 1306 boolean_t tryhard = (boolean_t)arg;
1307 1307
1308 1308 if (IPCL_IS_TCP(connp)) {
1309 1309 mblk_t *mp;
1310 1310
1311 1311 mp = tcp_ixa_cleanup_getmblk(connp);
1312 1312
1313 1313 if (connp->conn_sqp->sq_run == curthread) {
1314 1314 /* Already on squeue */
1315 1315 tcp_ixa_cleanup(connp, mp, NULL, NULL);
1316 1316 } else {
1317 1317 CONN_INC_REF(connp);
1318 1318 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_ixa_cleanup,
1319 1319 connp, NULL, SQ_PROCESS, SQTAG_TCP_IXA_CLEANUP);
1320 1320 }
1321 1321 tcp_ixa_cleanup_wait_and_finish(connp);
1322 1322 } else if (IPCL_IS_SCTP(connp)) {
1323 1323 sctp_t *sctp;
1324 1324 sctp_faddr_t *fp;
1325 1325
1326 1326 sctp = CONN2SCTP(connp);
1327 1327 RUN_SCTP(sctp);
1328 1328 ixa_cleanup_stale(connp->conn_ixa);
1329 1329 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->sf_next)
1330 1330 ixa_cleanup_stale(fp->sf_ixa);
1331 1331 WAKE_SCTP(sctp);
1332 1332 } else {
1333 1333 ip_xmit_attr_t *ixa;
1334 1334
1335 1335 /*
1336 1336 * If there is a different thread using conn_ixa then we get a
1337 1337 * new copy and cut the old one loose from conn_ixa. Otherwise
1338 1338 * we use conn_ixa and prevent any other thread from
1339 1339 * using/changing it. Anybody using conn_ixa (e.g., a thread in
1340 1340 * conn_ip_output) will do an ixa_refrele which will remove any
1341 1341 * references on the ire etc.
1342 1342 *
1343 1343 * Once we are done other threads can use conn_ixa since the
1344 1344 * refcnt will be back at one.
1345 1345 *
1346 1346 * We are called either because an ill is going away, or
1347 1347 * due to memory reclaim. In the former case we wait for
1348 1348 * memory since we must remove the refcnts on the ill.
1349 1349 */
1350 1350 if (tryhard) {
1351 1351 ixa = conn_get_ixa_tryhard(connp, B_TRUE);
1352 1352 ASSERT(ixa != NULL);
1353 1353 } else {
1354 1354 ixa = conn_get_ixa(connp, B_TRUE);
1355 1355 if (ixa == NULL) {
1356 1356 /*
1357 1357 * Somebody else was using it and kmem_alloc
1358 1358 * failed! Next memory reclaim will try to
1359 1359 * clean up.
1360 1360 */
1361 1361 DTRACE_PROBE1(conn__ixa__cleanup__bail,
1362 1362 conn_t *, connp);
1363 1363 return;
1364 1364 }
1365 1365 }
1366 1366 ixa_cleanup_stale(ixa);
1367 1367 IXA_REFRELE(ixa);
1368 1368 }
1369 1369 }
1370 1370
1371 1371 /*
1372 1372 * ixa needs to be an exclusive copy so that no one changes the cookie
1373 1373 * or the ixa_nce.
1374 1374 */
1375 1375 boolean_t
1376 1376 ixa_check_drain_insert(conn_t *connp, ip_xmit_attr_t *ixa)
1377 1377 {
1378 1378 uintptr_t cookie = ixa->ixa_cookie;
1379 1379 ill_dld_direct_t *idd;
1380 1380 idl_tx_list_t *idl_txl;
1381 1381 ill_t *ill = ixa->ixa_nce->nce_ill;
1382 1382 boolean_t inserted = B_FALSE;
1383 1383
1384 1384 idd = &(ill)->ill_dld_capab->idc_direct;
1385 1385 idl_txl = &ixa->ixa_ipst->ips_idl_tx_list[IDLHASHINDEX(cookie)];
1386 1386 mutex_enter(&idl_txl->txl_lock);
1387 1387
1388 1388 /*
1389 1389 * If `cookie' is zero, ip_xmit() -> canputnext() failed -- i.e., flow
1390 1390 * control is asserted on an ill that does not support direct calls.
1391 1391 * Jump to insert.
1392 1392 */
1393 1393 if (cookie == 0)
1394 1394 goto tryinsert;
1395 1395
1396 1396 ASSERT(ILL_DIRECT_CAPABLE(ill));
1397 1397
1398 1398 if (idd->idd_tx_fctl_df(idd->idd_tx_fctl_dh, cookie) == 0) {
1399 1399 DTRACE_PROBE1(ill__tx__not__blocked, uintptr_t, cookie);
1400 1400 } else if (idl_txl->txl_cookie != (uintptr_t)NULL &&
1401 1401 idl_txl->txl_cookie != ixa->ixa_cookie) {
1402 1402 DTRACE_PROBE2(ill__tx__cookie__collision, uintptr_t, cookie,
1403 1403 uintptr_t, idl_txl->txl_cookie);
1404 1404 /* TODO: bump kstat for cookie collision */
1405 1405 } else {
1406 1406 /*
1407 1407 * Check/set conn_blocked under conn_lock. Note that txl_lock
1408 1408 * will not suffice since two separate UDP threads may be
1409 1409 * racing to send to different destinations that are
1410 1410 * associated with different cookies and thus may not be
1411 1411 * holding the same txl_lock. Further, since a given conn_t
1412 1412 * can only be on a single drain list, the conn_t will be
1413 1413 * enqueued on whichever thread wins this race.
1414 1414 */
1415 1415 tryinsert: mutex_enter(&connp->conn_lock);
1416 1416 if (connp->conn_blocked) {
1417 1417 DTRACE_PROBE1(ill__tx__conn__already__blocked,
1418 1418 conn_t *, connp);
1419 1419 mutex_exit(&connp->conn_lock);
1420 1420 } else {
1421 1421 connp->conn_blocked = B_TRUE;
1422 1422 mutex_exit(&connp->conn_lock);
1423 1423 idl_txl->txl_cookie = cookie;
1424 1424 conn_drain_insert(connp, idl_txl);
1425 1425 if (!IPCL_IS_NONSTR(connp))
1426 1426 noenable(connp->conn_wq);
1427 1427 inserted = B_TRUE;
1428 1428 }
1429 1429 }
1430 1430 mutex_exit(&idl_txl->txl_lock);
1431 1431 return (inserted);
1432 1432 }
|
↓ open down ↓ |
557 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX