1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 1990 Mentat Inc.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright 2019 Joyent, Inc.
26 * Copyright (c) 2014, OmniTI Computer Consulting, Inc. All rights reserved.
27 */
28
29 /*
30 * This file contains the interface control functions for IP.
31 */
32
33 #include <sys/types.h>
34 #include <sys/stream.h>
35 #include <sys/dlpi.h>
36 #include <sys/stropts.h>
37 #include <sys/strsun.h>
38 #include <sys/sysmacros.h>
39 #include <sys/strsubr.h>
40 #include <sys/strlog.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/cmn_err.h>
44 #include <sys/kstat.h>
45 #include <sys/debug.h>
46 #include <sys/zone.h>
47 #include <sys/sunldi.h>
48 #include <sys/file.h>
49 #include <sys/bitmap.h>
50 #include <sys/cpuvar.h>
51 #include <sys/time.h>
52 #include <sys/ctype.h>
53 #include <sys/kmem.h>
54 #include <sys/systm.h>
55 #include <sys/param.h>
56 #include <sys/socket.h>
57 #include <sys/isa_defs.h>
58 #include <net/if.h>
59 #include <net/if_arp.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/route.h>
63 #include <sys/sockio.h>
64 #include <netinet/in.h>
65 #include <netinet/ip6.h>
66 #include <netinet/icmp6.h>
67 #include <netinet/igmp_var.h>
68 #include <sys/policy.h>
69 #include <sys/ethernet.h>
70 #include <sys/callb.h>
71 #include <sys/md5.h>
72
73 #include <inet/common.h> /* for various inet/mi.h and inet/nd.h needs */
74 #include <inet/mi.h>
75 #include <inet/nd.h>
76 #include <inet/tunables.h>
77 #include <inet/arp.h>
78 #include <inet/ip_arp.h>
79 #include <inet/mib2.h>
80 #include <inet/ip.h>
81 #include <inet/ip6.h>
82 #include <inet/ip6_asp.h>
83 #include <inet/tcp.h>
84 #include <inet/ip_multi.h>
85 #include <inet/ip_ire.h>
86 #include <inet/ip_ftable.h>
87 #include <inet/ip_rts.h>
88 #include <inet/ip_ndp.h>
89 #include <inet/ip_if.h>
90 #include <inet/ip_impl.h>
91 #include <inet/sctp_ip.h>
92 #include <inet/ip_netinfo.h>
93 #include <inet/ilb_ip.h>
94
95 #include <netinet/igmp.h>
96 #include <inet/ip_listutils.h>
97 #include <inet/ipclassifier.h>
98 #include <sys/mac_client.h>
99 #include <sys/dld.h>
100 #include <sys/mac_flow.h>
101
102 #include <sys/systeminfo.h>
103 #include <sys/bootconf.h>
104
105 #include <sys/tsol/tndb.h>
106 #include <sys/tsol/tnet.h>
107
108 #include <inet/rawip_impl.h> /* needed for icmp_stack_t */
109 #include <inet/udp_impl.h> /* needed for udp_stack_t */
110
111 /* The character which tells where the ill_name ends */
112 #define IPIF_SEPARATOR_CHAR ':'
113
114 /* IP ioctl function table entry */
115 typedef struct ipft_s {
116 int ipft_cmd;
117 pfi_t ipft_pfi;
118 int ipft_min_size;
119 int ipft_flags;
120 } ipft_t;
121 #define IPFT_F_NO_REPLY 0x1 /* IP ioctl does not expect any reply */
122 #define IPFT_F_SELF_REPLY 0x2 /* ioctl callee does the ioctl reply */
123
124 static int nd_ill_forward_get(queue_t *, mblk_t *, caddr_t, cred_t *);
125 static int nd_ill_forward_set(queue_t *q, mblk_t *mp,
126 char *value, caddr_t cp, cred_t *ioc_cr);
127
128 static boolean_t ill_is_quiescent(ill_t *);
129 static boolean_t ip_addr_ok_v4(ipaddr_t addr, ipaddr_t subnet_mask);
130 static ip_m_t *ip_m_lookup(t_uscalar_t mac_type);
131 static int ip_sioctl_addr_tail(ipif_t *ipif, sin_t *sin, queue_t *q,
132 mblk_t *mp, boolean_t need_up);
133 static int ip_sioctl_dstaddr_tail(ipif_t *ipif, sin_t *sin, queue_t *q,
134 mblk_t *mp, boolean_t need_up);
135 static int ip_sioctl_slifzone_tail(ipif_t *ipif, zoneid_t zoneid,
136 queue_t *q, mblk_t *mp, boolean_t need_up);
137 static int ip_sioctl_flags_tail(ipif_t *ipif, uint64_t flags, queue_t *q,
138 mblk_t *mp);
139 static int ip_sioctl_netmask_tail(ipif_t *ipif, sin_t *sin, queue_t *q,
140 mblk_t *mp);
141 static int ip_sioctl_subnet_tail(ipif_t *ipif, in6_addr_t, in6_addr_t,
142 queue_t *q, mblk_t *mp, boolean_t need_up);
143 static int ip_sioctl_plink_ipmod(ipsq_t *ipsq, queue_t *q, mblk_t *mp,
144 int ioccmd, struct linkblk *li);
145 static ipaddr_t ip_subnet_mask(ipaddr_t addr, ipif_t **, ip_stack_t *);
146 static void ip_wput_ioctl(queue_t *q, mblk_t *mp);
147 static void ipsq_flush(ill_t *ill);
148
149 static int ip_sioctl_token_tail(ipif_t *ipif, sin6_t *sin6, int addrlen,
150 queue_t *q, mblk_t *mp, boolean_t need_up);
151 static void ipsq_delete(ipsq_t *);
152
153 static ipif_t *ipif_allocate(ill_t *ill, int id, uint_t ire_type,
154 boolean_t initialize, boolean_t insert, int *errorp);
155 static ire_t **ipif_create_bcast_ires(ipif_t *ipif, ire_t **irep);
156 static void ipif_delete_bcast_ires(ipif_t *ipif);
157 static int ipif_add_ires_v4(ipif_t *, boolean_t);
158 static boolean_t ipif_comp_multi(ipif_t *old_ipif, ipif_t *new_ipif,
159 boolean_t isv6);
160 static int ipif_logical_down(ipif_t *ipif, queue_t *q, mblk_t *mp);
161 static void ipif_free(ipif_t *ipif);
162 static void ipif_free_tail(ipif_t *ipif);
163 static void ipif_set_default(ipif_t *ipif);
164 static int ipif_set_values(queue_t *q, mblk_t *mp,
165 char *interf_name, uint_t *ppa);
166 static int ipif_set_values_tail(ill_t *ill, ipif_t *ipif, mblk_t *mp,
167 queue_t *q);
168 static ipif_t *ipif_lookup_on_name(char *name, size_t namelen,
169 boolean_t do_alloc, boolean_t *exists, boolean_t isv6, zoneid_t zoneid,
170 ip_stack_t *);
171 static ipif_t *ipif_lookup_on_name_async(char *name, size_t namelen,
172 boolean_t isv6, zoneid_t zoneid, queue_t *q, mblk_t *mp, ipsq_func_t func,
173 int *error, ip_stack_t *);
174
175 static int ill_alloc_ppa(ill_if_t *, ill_t *);
176 static void ill_delete_interface_type(ill_if_t *);
177 static int ill_dl_up(ill_t *ill, ipif_t *ipif);
178 static void ill_dl_down(ill_t *ill);
179 static void ill_down(ill_t *ill);
180 static void ill_down_ipifs(ill_t *, boolean_t);
181 static void ill_free_mib(ill_t *ill);
182 static void ill_glist_delete(ill_t *);
183 static void ill_phyint_reinit(ill_t *ill);
184 static void ill_set_nce_router_flags(ill_t *, boolean_t);
185 static void ill_set_phys_addr_tail(ipsq_t *, queue_t *, mblk_t *, void *);
186 static void ill_replumb_tail(ipsq_t *, queue_t *, mblk_t *, void *);
187
188 static ip_v6intfid_func_t ip_ether_v6intfid, ip_ib_v6intfid;
189 static ip_v6intfid_func_t ip_ipv4_v6intfid, ip_ipv6_v6intfid;
190 static ip_v6intfid_func_t ip_ipmp_v6intfid, ip_nodef_v6intfid;
191 static ip_v6intfid_func_t ip_ipv4_v6destintfid, ip_ipv6_v6destintfid;
192 static ip_v4mapinfo_func_t ip_ether_v4_mapping;
193 static ip_v6mapinfo_func_t ip_ether_v6_mapping;
194 static ip_v4mapinfo_func_t ip_ib_v4_mapping;
195 static ip_v6mapinfo_func_t ip_ib_v6_mapping;
196 static ip_v4mapinfo_func_t ip_mbcast_mapping;
197 static void ip_cgtp_bcast_add(ire_t *, ip_stack_t *);
198 static void ip_cgtp_bcast_delete(ire_t *, ip_stack_t *);
199 static void phyint_free(phyint_t *);
200
201 static void ill_capability_dispatch(ill_t *, mblk_t *, dl_capability_sub_t *);
202 static void ill_capability_id_ack(ill_t *, mblk_t *, dl_capability_sub_t *);
203 static void ill_capability_vrrp_ack(ill_t *, mblk_t *, dl_capability_sub_t *);
204 static void ill_capability_hcksum_ack(ill_t *, mblk_t *, dl_capability_sub_t *);
205 static void ill_capability_hcksum_reset_fill(ill_t *, mblk_t *);
206 static void ill_capability_zerocopy_ack(ill_t *, mblk_t *,
207 dl_capability_sub_t *);
208 static void ill_capability_zerocopy_reset_fill(ill_t *, mblk_t *);
209 static void ill_capability_dld_reset_fill(ill_t *, mblk_t *);
210 static void ill_capability_dld_ack(ill_t *, mblk_t *,
211 dl_capability_sub_t *);
212 static void ill_capability_dld_enable(ill_t *);
213 static void ill_capability_ack_thr(void *);
214 static void ill_capability_lso_enable(ill_t *);
215
216 static ill_t *ill_prev_usesrc(ill_t *);
217 static int ill_relink_usesrc_ills(ill_t *, ill_t *, uint_t);
218 static void ill_disband_usesrc_group(ill_t *);
219 static void ip_sioctl_garp_reply(mblk_t *, ill_t *, void *, int);
220
221 #ifdef DEBUG
222 static void ill_trace_cleanup(const ill_t *);
223 static void ipif_trace_cleanup(const ipif_t *);
224 #endif
225
226 static void ill_dlpi_clear_deferred(ill_t *ill);
227
228 static void phyint_flags_init(phyint_t *, t_uscalar_t);
229
230 /*
231 * if we go over the memory footprint limit more than once in this msec
232 * interval, we'll start pruning aggressively.
233 */
234 int ip_min_frag_prune_time = 0;
235
236 static ipft_t ip_ioctl_ftbl[] = {
237 { IP_IOC_IRE_DELETE, ip_ire_delete, sizeof (ipid_t), 0 },
238 { IP_IOC_IRE_DELETE_NO_REPLY, ip_ire_delete, sizeof (ipid_t),
239 IPFT_F_NO_REPLY },
240 { IP_IOC_RTS_REQUEST, ip_rts_request, 0, IPFT_F_SELF_REPLY },
241 { 0 }
242 };
243
244 /* Simple ICMP IP Header Template */
245 static ipha_t icmp_ipha = {
246 IP_SIMPLE_HDR_VERSION, 0, 0, 0, 0, 0, IPPROTO_ICMP
247 };
248
249 static uchar_t ip_six_byte_all_ones[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
250
251 static ip_m_t ip_m_tbl[] = {
252 { DL_ETHER, IFT_ETHER, ETHERTYPE_IP, ETHERTYPE_IPV6,
253 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_ether_v6intfid,
254 ip_nodef_v6intfid },
255 { DL_CSMACD, IFT_ISO88023, ETHERTYPE_IP, ETHERTYPE_IPV6,
256 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_nodef_v6intfid,
257 ip_nodef_v6intfid },
258 { DL_TPB, IFT_ISO88024, ETHERTYPE_IP, ETHERTYPE_IPV6,
259 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_nodef_v6intfid,
260 ip_nodef_v6intfid },
261 { DL_TPR, IFT_ISO88025, ETHERTYPE_IP, ETHERTYPE_IPV6,
262 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_nodef_v6intfid,
263 ip_nodef_v6intfid },
264 { DL_FDDI, IFT_FDDI, ETHERTYPE_IP, ETHERTYPE_IPV6,
265 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_ether_v6intfid,
266 ip_nodef_v6intfid },
267 { DL_IB, IFT_IB, ETHERTYPE_IP, ETHERTYPE_IPV6,
268 ip_ib_v4_mapping, ip_ib_v6_mapping, ip_ib_v6intfid,
269 ip_nodef_v6intfid },
270 { DL_IPV4, IFT_IPV4, IPPROTO_ENCAP, IPPROTO_IPV6,
271 ip_mbcast_mapping, ip_mbcast_mapping, ip_ipv4_v6intfid,
272 ip_ipv4_v6destintfid },
273 { DL_IPV6, IFT_IPV6, IPPROTO_ENCAP, IPPROTO_IPV6,
274 ip_mbcast_mapping, ip_mbcast_mapping, ip_ipv6_v6intfid,
275 ip_ipv6_v6destintfid },
276 { DL_6TO4, IFT_6TO4, IPPROTO_ENCAP, IPPROTO_IPV6,
277 ip_mbcast_mapping, ip_mbcast_mapping, ip_ipv4_v6intfid,
278 ip_nodef_v6intfid },
279 { SUNW_DL_VNI, IFT_OTHER, ETHERTYPE_IP, ETHERTYPE_IPV6,
280 NULL, NULL, ip_nodef_v6intfid, ip_nodef_v6intfid },
281 { SUNW_DL_IPMP, IFT_OTHER, ETHERTYPE_IP, ETHERTYPE_IPV6,
282 NULL, NULL, ip_ipmp_v6intfid, ip_nodef_v6intfid },
283 { DL_OTHER, IFT_OTHER, ETHERTYPE_IP, ETHERTYPE_IPV6,
284 ip_ether_v4_mapping, ip_ether_v6_mapping, ip_nodef_v6intfid,
285 ip_nodef_v6intfid }
286 };
287
288 char ipif_loopback_name[] = "lo0";
289
290 /* These are used by all IP network modules. */
291 sin6_t sin6_null; /* Zero address for quick clears */
292 sin_t sin_null; /* Zero address for quick clears */
293
294 /* When set search for unused ipif_seqid */
295 static ipif_t ipif_zero;
296
297 /*
298 * ppa arena is created after these many
299 * interfaces have been plumbed.
300 */
301 uint_t ill_no_arena = 12; /* Setable in /etc/system */
302
303 /*
304 * Allocate per-interface mibs.
305 * Returns true if ok. False otherwise.
306 * ipsq may not yet be allocated (loopback case ).
307 */
308 static boolean_t
309 ill_allocate_mibs(ill_t *ill)
310 {
311 /* Already allocated? */
312 if (ill->ill_ip_mib != NULL) {
313 if (ill->ill_isv6)
314 ASSERT(ill->ill_icmp6_mib != NULL);
315 return (B_TRUE);
316 }
317
318 ill->ill_ip_mib = kmem_zalloc(sizeof (*ill->ill_ip_mib),
319 KM_NOSLEEP);
320 if (ill->ill_ip_mib == NULL) {
321 return (B_FALSE);
322 }
323
324 /* Setup static information */
325 SET_MIB(ill->ill_ip_mib->ipIfStatsEntrySize,
326 sizeof (mib2_ipIfStatsEntry_t));
327 if (ill->ill_isv6) {
328 ill->ill_ip_mib->ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv6;
329 SET_MIB(ill->ill_ip_mib->ipIfStatsAddrEntrySize,
330 sizeof (mib2_ipv6AddrEntry_t));
331 SET_MIB(ill->ill_ip_mib->ipIfStatsRouteEntrySize,
332 sizeof (mib2_ipv6RouteEntry_t));
333 SET_MIB(ill->ill_ip_mib->ipIfStatsNetToMediaEntrySize,
334 sizeof (mib2_ipv6NetToMediaEntry_t));
335 SET_MIB(ill->ill_ip_mib->ipIfStatsMemberEntrySize,
336 sizeof (ipv6_member_t));
337 SET_MIB(ill->ill_ip_mib->ipIfStatsGroupSourceEntrySize,
338 sizeof (ipv6_grpsrc_t));
339 } else {
340 ill->ill_ip_mib->ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv4;
341 SET_MIB(ill->ill_ip_mib->ipIfStatsAddrEntrySize,
342 sizeof (mib2_ipAddrEntry_t));
343 SET_MIB(ill->ill_ip_mib->ipIfStatsRouteEntrySize,
344 sizeof (mib2_ipRouteEntry_t));
345 SET_MIB(ill->ill_ip_mib->ipIfStatsNetToMediaEntrySize,
346 sizeof (mib2_ipNetToMediaEntry_t));
347 SET_MIB(ill->ill_ip_mib->ipIfStatsMemberEntrySize,
348 sizeof (ip_member_t));
349 SET_MIB(ill->ill_ip_mib->ipIfStatsGroupSourceEntrySize,
350 sizeof (ip_grpsrc_t));
351
352 /*
353 * For a v4 ill, we are done at this point, because per ill
354 * icmp mibs are only used for v6.
355 */
356 return (B_TRUE);
357 }
358
359 ill->ill_icmp6_mib = kmem_zalloc(sizeof (*ill->ill_icmp6_mib),
360 KM_NOSLEEP);
361 if (ill->ill_icmp6_mib == NULL) {
362 kmem_free(ill->ill_ip_mib, sizeof (*ill->ill_ip_mib));
363 ill->ill_ip_mib = NULL;
364 return (B_FALSE);
365 }
366 /* static icmp info */
367 ill->ill_icmp6_mib->ipv6IfIcmpEntrySize =
368 sizeof (mib2_ipv6IfIcmpEntry_t);
369 /*
370 * The ipIfStatsIfindex and ipv6IfIcmpIndex will be assigned later
371 * after the phyint merge occurs in ipif_set_values -> ill_glist_insert
372 * -> ill_phyint_reinit
373 */
374 return (B_TRUE);
375 }
376
377 /*
378 * Completely vaporize a lower level tap and all associated interfaces.
379 * ill_delete is called only out of ip_close when the device control
380 * stream is being closed.
381 */
382 void
383 ill_delete(ill_t *ill)
384 {
385 ipif_t *ipif;
386 ill_t *prev_ill;
387 ip_stack_t *ipst = ill->ill_ipst;
388
389 /*
390 * ill_delete may be forcibly entering the ipsq. The previous
391 * ioctl may not have completed and may need to be aborted.
392 * ipsq_flush takes care of it. If we don't need to enter the
393 * the ipsq forcibly, the 2nd invocation of ipsq_flush in
394 * ill_delete_tail is sufficient.
395 */
396 ipsq_flush(ill);
397
398 /*
399 * Nuke all interfaces. ipif_free will take down the interface,
400 * remove it from the list, and free the data structure.
401 * Walk down the ipif list and remove the logical interfaces
402 * first before removing the main ipif. We can't unplumb
403 * zeroth interface first in the case of IPv6 as update_conn_ill
404 * -> ip_ll_multireq de-references ill_ipif for checking
405 * POINTOPOINT.
406 *
407 * If ill_ipif was not properly initialized (i.e low on memory),
408 * then no interfaces to clean up. In this case just clean up the
409 * ill.
410 */
411 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
412 ipif_free(ipif);
413
414 /*
415 * clean out all the nce_t entries that depend on this
416 * ill for the ill_phys_addr.
417 */
418 nce_flush(ill, B_TRUE);
419
420 /* Clean up msgs on pending upcalls for mrouted */
421 reset_mrt_ill(ill);
422
423 update_conn_ill(ill, ipst);
424
425 /*
426 * Remove multicast references added as a result of calls to
427 * ip_join_allmulti().
428 */
429 ip_purge_allmulti(ill);
430
431 /*
432 * If the ill being deleted is under IPMP, boot it out of the illgrp.
433 */
434 if (IS_UNDER_IPMP(ill))
435 ipmp_ill_leave_illgrp(ill);
436
437 /*
438 * ill_down will arrange to blow off any IRE's dependent on this
439 * ILL, and shut down fragmentation reassembly.
440 */
441 ill_down(ill);
442
443 /* Let SCTP know, so that it can remove this from its list. */
444 sctp_update_ill(ill, SCTP_ILL_REMOVE);
445
446 /*
447 * Walk all CONNs that can have a reference on an ire or nce for this
448 * ill (we actually walk all that now have stale references).
449 */
450 ipcl_walk(conn_ixa_cleanup, (void *)B_TRUE, ipst);
451
452 /* With IPv6 we have dce_ifindex. Cleanup for neatness */
453 if (ill->ill_isv6)
454 dce_cleanup(ill->ill_phyint->phyint_ifindex, ipst);
455
456 /*
457 * If an address on this ILL is being used as a source address then
458 * clear out the pointers in other ILLs that point to this ILL.
459 */
460 rw_enter(&ipst->ips_ill_g_usesrc_lock, RW_WRITER);
461 if (ill->ill_usesrc_grp_next != NULL) {
462 if (ill->ill_usesrc_ifindex == 0) { /* usesrc ILL ? */
463 ill_disband_usesrc_group(ill);
464 } else { /* consumer of the usesrc ILL */
465 prev_ill = ill_prev_usesrc(ill);
466 prev_ill->ill_usesrc_grp_next =
467 ill->ill_usesrc_grp_next;
468 }
469 }
470 rw_exit(&ipst->ips_ill_g_usesrc_lock);
471 }
472
473 static void
474 ipif_non_duplicate(ipif_t *ipif)
475 {
476 ill_t *ill = ipif->ipif_ill;
477 mutex_enter(&ill->ill_lock);
478 if (ipif->ipif_flags & IPIF_DUPLICATE) {
479 ipif->ipif_flags &= ~IPIF_DUPLICATE;
480 ASSERT(ill->ill_ipif_dup_count > 0);
481 ill->ill_ipif_dup_count--;
482 }
483 mutex_exit(&ill->ill_lock);
484 }
485
486 /*
487 * ill_delete_tail is called from ip_modclose after all references
488 * to the closing ill are gone. The wait is done in ip_modclose
489 */
490 void
491 ill_delete_tail(ill_t *ill)
492 {
493 mblk_t **mpp;
494 ipif_t *ipif;
495 ip_stack_t *ipst = ill->ill_ipst;
496
497 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
498 ipif_non_duplicate(ipif);
499 (void) ipif_down_tail(ipif);
500 }
501
502 ASSERT(ill->ill_ipif_dup_count == 0);
503
504 /*
505 * If polling capability is enabled (which signifies direct
506 * upcall into IP and driver has ill saved as a handle),
507 * we need to make sure that unbind has completed before we
508 * let the ill disappear and driver no longer has any reference
509 * to this ill.
510 */
511 mutex_enter(&ill->ill_lock);
512 while (ill->ill_state_flags & ILL_DL_UNBIND_IN_PROGRESS)
513 cv_wait(&ill->ill_cv, &ill->ill_lock);
514 mutex_exit(&ill->ill_lock);
515 ASSERT(!(ill->ill_capabilities &
516 (ILL_CAPAB_DLD | ILL_CAPAB_DLD_POLL | ILL_CAPAB_DLD_DIRECT)));
517
518 if (ill->ill_net_type != IRE_LOOPBACK)
519 qprocsoff(ill->ill_rq);
520
521 /*
522 * We do an ipsq_flush once again now. New messages could have
523 * landed up from below (M_ERROR or M_HANGUP). Similarly ioctls
524 * could also have landed up if an ioctl thread had looked up
525 * the ill before we set the ILL_CONDEMNED flag, but not yet
526 * enqueued the ioctl when we did the ipsq_flush last time.
527 */
528 ipsq_flush(ill);
529
530 /*
531 * Free capabilities.
532 */
533 if (ill->ill_hcksum_capab != NULL) {
534 kmem_free(ill->ill_hcksum_capab, sizeof (ill_hcksum_capab_t));
535 ill->ill_hcksum_capab = NULL;
536 }
537
538 if (ill->ill_zerocopy_capab != NULL) {
539 kmem_free(ill->ill_zerocopy_capab,
540 sizeof (ill_zerocopy_capab_t));
541 ill->ill_zerocopy_capab = NULL;
542 }
543
544 if (ill->ill_lso_capab != NULL) {
545 kmem_free(ill->ill_lso_capab, sizeof (ill_lso_capab_t));
546 ill->ill_lso_capab = NULL;
547 }
548
549 if (ill->ill_dld_capab != NULL) {
550 kmem_free(ill->ill_dld_capab, sizeof (ill_dld_capab_t));
551 ill->ill_dld_capab = NULL;
552 }
553
554 /* Clean up ill_allowed_ips* related state */
555 if (ill->ill_allowed_ips != NULL) {
556 ASSERT(ill->ill_allowed_ips_cnt > 0);
557 kmem_free(ill->ill_allowed_ips,
558 ill->ill_allowed_ips_cnt * sizeof (in6_addr_t));
559 ill->ill_allowed_ips = NULL;
560 ill->ill_allowed_ips_cnt = 0;
561 }
562
563 while (ill->ill_ipif != NULL)
564 ipif_free_tail(ill->ill_ipif);
565
566 /*
567 * We have removed all references to ilm from conn and the ones joined
568 * within the kernel.
569 *
570 * We don't walk conns, mrts and ires because
571 *
572 * 1) update_conn_ill and reset_mrt_ill cleans up conns and mrts.
573 * 2) ill_down ->ill_downi walks all the ires and cleans up
574 * ill references.
575 */
576
577 /*
578 * If this ill is an IPMP meta-interface, blow away the illgrp. This
579 * is safe to do because the illgrp has already been unlinked from the
580 * group by I_PUNLINK, and thus SIOCSLIFGROUPNAME cannot find it.
581 */
582 if (IS_IPMP(ill)) {
583 ipmp_illgrp_destroy(ill->ill_grp);
584 ill->ill_grp = NULL;
585 }
586
587 if (ill->ill_mphysaddr_list != NULL) {
588 multiphysaddr_t *mpa, *tmpa;
589
590 mpa = ill->ill_mphysaddr_list;
591 ill->ill_mphysaddr_list = NULL;
592 while (mpa) {
593 tmpa = mpa->mpa_next;
594 kmem_free(mpa, sizeof (*mpa));
595 mpa = tmpa;
596 }
597 }
598 /*
599 * Take us out of the list of ILLs. ill_glist_delete -> phyint_free
600 * could free the phyint. No more reference to the phyint after this
601 * point.
602 */
603 (void) ill_glist_delete(ill);
604
605 if (ill->ill_frag_ptr != NULL) {
606 uint_t count;
607
608 for (count = 0; count < ILL_FRAG_HASH_TBL_COUNT; count++) {
609 mutex_destroy(&ill->ill_frag_hash_tbl[count].ipfb_lock);
610 }
611 mi_free(ill->ill_frag_ptr);
612 ill->ill_frag_ptr = NULL;
613 ill->ill_frag_hash_tbl = NULL;
614 }
615
616 freemsg(ill->ill_nd_lla_mp);
617 /* Free all retained control messages. */
618 mpp = &ill->ill_first_mp_to_free;
619 do {
620 while (mpp[0]) {
621 mblk_t *mp;
622 mblk_t *mp1;
623
624 mp = mpp[0];
625 mpp[0] = mp->b_next;
626 for (mp1 = mp; mp1 != NULL; mp1 = mp1->b_cont) {
627 mp1->b_next = NULL;
628 mp1->b_prev = NULL;
629 }
630 freemsg(mp);
631 }
632 } while (mpp++ != &ill->ill_last_mp_to_free);
633
634 ill_free_mib(ill);
635
636 #ifdef DEBUG
637 ill_trace_cleanup(ill);
638 #endif
639
640 /* The default multicast interface might have changed */
641 ire_increment_multicast_generation(ipst, ill->ill_isv6);
642
643 /* Drop refcnt here */
644 netstack_rele(ill->ill_ipst->ips_netstack);
645 ill->ill_ipst = NULL;
646 }
647
648 static void
649 ill_free_mib(ill_t *ill)
650 {
651 ip_stack_t *ipst = ill->ill_ipst;
652
653 /*
654 * MIB statistics must not be lost, so when an interface
655 * goes away the counter values will be added to the global
656 * MIBs.
657 */
658 if (ill->ill_ip_mib != NULL) {
659 if (ill->ill_isv6) {
660 ip_mib2_add_ip_stats(&ipst->ips_ip6_mib,
661 ill->ill_ip_mib);
662 } else {
663 ip_mib2_add_ip_stats(&ipst->ips_ip_mib,
664 ill->ill_ip_mib);
665 }
666
667 kmem_free(ill->ill_ip_mib, sizeof (*ill->ill_ip_mib));
668 ill->ill_ip_mib = NULL;
669 }
670 if (ill->ill_icmp6_mib != NULL) {
671 ip_mib2_add_icmp6_stats(&ipst->ips_icmp6_mib,
672 ill->ill_icmp6_mib);
673 kmem_free(ill->ill_icmp6_mib, sizeof (*ill->ill_icmp6_mib));
674 ill->ill_icmp6_mib = NULL;
675 }
676 }
677
678 /*
679 * Concatenate together a physical address and a sap.
680 *
681 * Sap_lengths are interpreted as follows:
682 * sap_length == 0 ==> no sap
683 * sap_length > 0 ==> sap is at the head of the dlpi address
684 * sap_length < 0 ==> sap is at the tail of the dlpi address
685 */
686 static void
687 ill_dlur_copy_address(uchar_t *phys_src, uint_t phys_length,
688 t_scalar_t sap_src, t_scalar_t sap_length, uchar_t *dst)
689 {
690 uint16_t sap_addr = (uint16_t)sap_src;
691
692 if (sap_length == 0) {
693 if (phys_src == NULL)
694 bzero(dst, phys_length);
695 else
696 bcopy(phys_src, dst, phys_length);
697 } else if (sap_length < 0) {
698 if (phys_src == NULL)
699 bzero(dst, phys_length);
700 else
701 bcopy(phys_src, dst, phys_length);
702 bcopy(&sap_addr, (char *)dst + phys_length, sizeof (sap_addr));
703 } else {
704 bcopy(&sap_addr, dst, sizeof (sap_addr));
705 if (phys_src == NULL)
706 bzero((char *)dst + sap_length, phys_length);
707 else
708 bcopy(phys_src, (char *)dst + sap_length, phys_length);
709 }
710 }
711
712 /*
713 * Generate a dl_unitdata_req mblk for the device and address given.
714 * addr_length is the length of the physical portion of the address.
715 * If addr is NULL include an all zero address of the specified length.
716 * TRUE? In any case, addr_length is taken to be the entire length of the
717 * dlpi address, including the absolute value of sap_length.
718 */
719 mblk_t *
720 ill_dlur_gen(uchar_t *addr, uint_t addr_length, t_uscalar_t sap,
721 t_scalar_t sap_length)
722 {
723 dl_unitdata_req_t *dlur;
724 mblk_t *mp;
725 t_scalar_t abs_sap_length; /* absolute value */
726
727 abs_sap_length = ABS(sap_length);
728 mp = ip_dlpi_alloc(sizeof (*dlur) + addr_length + abs_sap_length,
729 DL_UNITDATA_REQ);
730 if (mp == NULL)
731 return (NULL);
732 dlur = (dl_unitdata_req_t *)mp->b_rptr;
733 /* HACK: accomodate incompatible DLPI drivers */
734 if (addr_length == 8)
735 addr_length = 6;
736 dlur->dl_dest_addr_length = addr_length + abs_sap_length;
737 dlur->dl_dest_addr_offset = sizeof (*dlur);
738 dlur->dl_priority.dl_min = 0;
739 dlur->dl_priority.dl_max = 0;
740 ill_dlur_copy_address(addr, addr_length, sap, sap_length,
741 (uchar_t *)&dlur[1]);
742 return (mp);
743 }
744
745 /*
746 * Add the pending mp to the list. There can be only 1 pending mp
747 * in the list. Any exclusive ioctl that needs to wait for a response
748 * from another module or driver needs to use this function to set
749 * the ipx_pending_mp to the ioctl mblk and wait for the response from
750 * the other module/driver. This is also used while waiting for the
751 * ipif/ill/ire refcnts to drop to zero in bringing down an ipif.
752 */
753 boolean_t
754 ipsq_pending_mp_add(conn_t *connp, ipif_t *ipif, queue_t *q, mblk_t *add_mp,
755 int waitfor)
756 {
757 ipxop_t *ipx = ipif->ipif_ill->ill_phyint->phyint_ipsq->ipsq_xop;
758
759 ASSERT(IAM_WRITER_IPIF(ipif));
760 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
761 ASSERT((add_mp->b_next == NULL) && (add_mp->b_prev == NULL));
762 ASSERT(ipx->ipx_pending_mp == NULL);
763 /*
764 * The caller may be using a different ipif than the one passed into
765 * ipsq_current_start() (e.g., suppose an ioctl that came in on the V4
766 * ill needs to wait for the V6 ill to quiesce). So we can't ASSERT
767 * that `ipx_current_ipif == ipif'.
768 */
769 ASSERT(ipx->ipx_current_ipif != NULL);
770
771 /*
772 * M_IOCDATA from ioctls, M_ERROR/M_HANGUP/M_PROTO/M_PCPROTO from the
773 * driver.
774 */
775 ASSERT((DB_TYPE(add_mp) == M_IOCDATA) || (DB_TYPE(add_mp) == M_ERROR) ||
776 (DB_TYPE(add_mp) == M_HANGUP) || (DB_TYPE(add_mp) == M_PROTO) ||
777 (DB_TYPE(add_mp) == M_PCPROTO));
778
779 if (connp != NULL) {
780 ASSERT(MUTEX_HELD(&connp->conn_lock));
781 /*
782 * Return error if the conn has started closing. The conn
783 * could have finished cleaning up the pending mp list,
784 * If so we should not add another mp to the list negating
785 * the cleanup.
786 */
787 if (connp->conn_state_flags & CONN_CLOSING)
788 return (B_FALSE);
789 }
790 mutex_enter(&ipx->ipx_lock);
791 ipx->ipx_pending_ipif = ipif;
792 /*
793 * Note down the queue in b_queue. This will be returned by
794 * ipsq_pending_mp_get. Caller will then use these values to restart
795 * the processing
796 */
797 add_mp->b_next = NULL;
798 add_mp->b_queue = q;
799 ipx->ipx_pending_mp = add_mp;
800 ipx->ipx_waitfor = waitfor;
801 mutex_exit(&ipx->ipx_lock);
802
803 if (connp != NULL)
804 connp->conn_oper_pending_ill = ipif->ipif_ill;
805
806 return (B_TRUE);
807 }
808
809 /*
810 * Retrieve the ipx_pending_mp and return it. There can be only 1 mp
811 * queued in the list.
812 */
813 mblk_t *
814 ipsq_pending_mp_get(ipsq_t *ipsq, conn_t **connpp)
815 {
816 mblk_t *curr = NULL;
817 ipxop_t *ipx = ipsq->ipsq_xop;
818
819 *connpp = NULL;
820 mutex_enter(&ipx->ipx_lock);
821 if (ipx->ipx_pending_mp == NULL) {
822 mutex_exit(&ipx->ipx_lock);
823 return (NULL);
824 }
825
826 /* There can be only 1 such excl message */
827 curr = ipx->ipx_pending_mp;
828 ASSERT(curr->b_next == NULL);
829 ipx->ipx_pending_ipif = NULL;
830 ipx->ipx_pending_mp = NULL;
831 ipx->ipx_waitfor = 0;
832 mutex_exit(&ipx->ipx_lock);
833
834 if (CONN_Q(curr->b_queue)) {
835 /*
836 * This mp did a refhold on the conn, at the start of the ioctl.
837 * So we can safely return a pointer to the conn to the caller.
838 */
839 *connpp = Q_TO_CONN(curr->b_queue);
840 } else {
841 *connpp = NULL;
842 }
843 curr->b_next = NULL;
844 curr->b_prev = NULL;
845 return (curr);
846 }
847
848 /*
849 * Cleanup the ioctl mp queued in ipx_pending_mp
850 * - Called in the ill_delete path
851 * - Called in the M_ERROR or M_HANGUP path on the ill.
852 * - Called in the conn close path.
853 *
854 * Returns success on finding the pending mblk associated with the ioctl or
855 * exclusive operation in progress, failure otherwise.
856 */
857 boolean_t
858 ipsq_pending_mp_cleanup(ill_t *ill, conn_t *connp)
859 {
860 mblk_t *mp;
861 ipxop_t *ipx;
862 queue_t *q;
863 ipif_t *ipif;
864 int cmd;
865
866 ASSERT(IAM_WRITER_ILL(ill));
867 ipx = ill->ill_phyint->phyint_ipsq->ipsq_xop;
868
869 mutex_enter(&ipx->ipx_lock);
870 mp = ipx->ipx_pending_mp;
871 if (connp != NULL) {
872 if (mp == NULL || mp->b_queue != CONNP_TO_WQ(connp)) {
873 /*
874 * Nothing to clean since the conn that is closing
875 * does not have a matching pending mblk in
876 * ipx_pending_mp.
877 */
878 mutex_exit(&ipx->ipx_lock);
879 return (B_FALSE);
880 }
881 } else {
882 /*
883 * A non-zero ill_error signifies we are called in the
884 * M_ERROR or M_HANGUP path and we need to unconditionally
885 * abort any current ioctl and do the corresponding cleanup.
886 * A zero ill_error means we are in the ill_delete path and
887 * we do the cleanup only if there is a pending mp.
888 */
889 if (mp == NULL && ill->ill_error == 0) {
890 mutex_exit(&ipx->ipx_lock);
891 return (B_FALSE);
892 }
893 }
894
895 /* Now remove from the ipx_pending_mp */
896 ipx->ipx_pending_mp = NULL;
897 ipif = ipx->ipx_pending_ipif;
898 ipx->ipx_pending_ipif = NULL;
899 ipx->ipx_waitfor = 0;
900 ipx->ipx_current_ipif = NULL;
901 cmd = ipx->ipx_current_ioctl;
902 ipx->ipx_current_ioctl = 0;
903 ipx->ipx_current_done = B_TRUE;
904 mutex_exit(&ipx->ipx_lock);
905
906 if (mp == NULL)
907 return (B_FALSE);
908
909 q = mp->b_queue;
910 mp->b_next = NULL;
911 mp->b_prev = NULL;
912 mp->b_queue = NULL;
913
914 if (DB_TYPE(mp) == M_IOCTL || DB_TYPE(mp) == M_IOCDATA) {
915 DTRACE_PROBE4(ipif__ioctl,
916 char *, "ipsq_pending_mp_cleanup",
917 int, cmd, ill_t *, ipif == NULL ? NULL : ipif->ipif_ill,
918 ipif_t *, ipif);
919 if (connp == NULL) {
920 ip_ioctl_finish(q, mp, ENXIO, NO_COPYOUT, NULL);
921 } else {
922 ip_ioctl_finish(q, mp, ENXIO, CONN_CLOSE, NULL);
923 mutex_enter(&ipif->ipif_ill->ill_lock);
924 ipif->ipif_state_flags &= ~IPIF_CHANGING;
925 mutex_exit(&ipif->ipif_ill->ill_lock);
926 }
927 } else {
928 inet_freemsg(mp);
929 }
930 return (B_TRUE);
931 }
932
933 /*
934 * Called in the conn close path and ill delete path
935 */
936 static void
937 ipsq_xopq_mp_cleanup(ill_t *ill, conn_t *connp)
938 {
939 ipsq_t *ipsq;
940 mblk_t *prev;
941 mblk_t *curr;
942 mblk_t *next;
943 queue_t *wq, *rq = NULL;
944 mblk_t *tmp_list = NULL;
945
946 ASSERT(IAM_WRITER_ILL(ill));
947 if (connp != NULL)
948 wq = CONNP_TO_WQ(connp);
949 else
950 wq = ill->ill_wq;
951
952 /*
953 * In the case of lo0 being unplumbed, ill_wq will be NULL. Guard
954 * against this here.
955 */
956 if (wq != NULL)
957 rq = RD(wq);
958
959 ipsq = ill->ill_phyint->phyint_ipsq;
960 /*
961 * Cleanup the ioctl mp's queued in ipsq_xopq_pending_mp if any.
962 * In the case of ioctl from a conn, there can be only 1 mp
963 * queued on the ipsq. If an ill is being unplumbed flush all
964 * the messages.
965 */
966 mutex_enter(&ipsq->ipsq_lock);
967 for (prev = NULL, curr = ipsq->ipsq_xopq_mphead; curr != NULL;
968 curr = next) {
969 next = curr->b_next;
970 if (connp == NULL ||
971 (curr->b_queue == wq || curr->b_queue == rq)) {
972 /* Unlink the mblk from the pending mp list */
973 if (prev != NULL) {
974 prev->b_next = curr->b_next;
975 } else {
976 ASSERT(ipsq->ipsq_xopq_mphead == curr);
977 ipsq->ipsq_xopq_mphead = curr->b_next;
978 }
979 if (ipsq->ipsq_xopq_mptail == curr)
980 ipsq->ipsq_xopq_mptail = prev;
981 /*
982 * Create a temporary list and release the ipsq lock
983 * New elements are added to the head of the tmp_list
984 */
985 curr->b_next = tmp_list;
986 tmp_list = curr;
987 } else {
988 prev = curr;
989 }
990 }
991 mutex_exit(&ipsq->ipsq_lock);
992
993 while (tmp_list != NULL) {
994 curr = tmp_list;
995 tmp_list = curr->b_next;
996 curr->b_next = NULL;
997 curr->b_prev = NULL;
998 wq = curr->b_queue;
999 curr->b_queue = NULL;
1000 if (DB_TYPE(curr) == M_IOCTL || DB_TYPE(curr) == M_IOCDATA) {
1001 DTRACE_PROBE4(ipif__ioctl,
1002 char *, "ipsq_xopq_mp_cleanup",
1003 int, 0, ill_t *, NULL, ipif_t *, NULL);
1004 ip_ioctl_finish(wq, curr, ENXIO, connp != NULL ?
1005 CONN_CLOSE : NO_COPYOUT, NULL);
1006 } else {
1007 /*
1008 * IP-MT XXX In the case of TLI/XTI bind / optmgmt
1009 * this can't be just inet_freemsg. we have to
1010 * restart it otherwise the thread will be stuck.
1011 */
1012 inet_freemsg(curr);
1013 }
1014 }
1015 }
1016
1017 /*
1018 * This conn has started closing. Cleanup any pending ioctl from this conn.
1019 * STREAMS ensures that there can be at most 1 active ioctl on a stream.
1020 */
1021 void
1022 conn_ioctl_cleanup(conn_t *connp)
1023 {
1024 ipsq_t *ipsq;
1025 ill_t *ill;
1026 boolean_t refheld;
1027
1028 /*
1029 * Check for a queued ioctl. If the ioctl has not yet started, the mp
1030 * is pending in the list headed by ipsq_xopq_head. If the ioctl has
1031 * started the mp could be present in ipx_pending_mp. Note that if
1032 * conn_oper_pending_ill is NULL, the ioctl may still be in flight and
1033 * not yet queued anywhere. In this case, the conn close code will wait
1034 * until the conn_ref is dropped. If the stream was a tcp stream, then
1035 * tcp_close will wait first until all ioctls have completed for this
1036 * conn.
1037 */
1038 mutex_enter(&connp->conn_lock);
1039 ill = connp->conn_oper_pending_ill;
1040 if (ill == NULL) {
1041 mutex_exit(&connp->conn_lock);
1042 return;
1043 }
1044
1045 /*
1046 * We may not be able to refhold the ill if the ill/ipif
1047 * is changing. But we need to make sure that the ill will
1048 * not vanish. So we just bump up the ill_waiter count.
1049 */
1050 refheld = ill_waiter_inc(ill);
1051 mutex_exit(&connp->conn_lock);
1052 if (refheld) {
1053 if (ipsq_enter(ill, B_TRUE, NEW_OP)) {
1054 ill_waiter_dcr(ill);
1055 /*
1056 * Check whether this ioctl has started and is
1057 * pending. If it is not found there then check
1058 * whether this ioctl has not even started and is in
1059 * the ipsq_xopq list.
1060 */
1061 if (!ipsq_pending_mp_cleanup(ill, connp))
1062 ipsq_xopq_mp_cleanup(ill, connp);
1063 ipsq = ill->ill_phyint->phyint_ipsq;
1064 ipsq_exit(ipsq);
1065 return;
1066 }
1067 }
1068
1069 /*
1070 * The ill is also closing and we could not bump up the
1071 * ill_waiter_count or we could not enter the ipsq. Leave
1072 * the cleanup to ill_delete
1073 */
1074 mutex_enter(&connp->conn_lock);
1075 while (connp->conn_oper_pending_ill != NULL)
1076 cv_wait(&connp->conn_refcv, &connp->conn_lock);
1077 mutex_exit(&connp->conn_lock);
1078 if (refheld)
1079 ill_waiter_dcr(ill);
1080 }
1081
1082 /*
1083 * ipcl_walk function for cleaning up conn_*_ill fields.
1084 * Note that we leave ixa_multicast_ifindex, conn_incoming_ifindex, and
1085 * conn_bound_if in place. We prefer dropping
1086 * packets instead of sending them out the wrong interface, or accepting
1087 * packets from the wrong ifindex.
1088 */
1089 static void
1090 conn_cleanup_ill(conn_t *connp, caddr_t arg)
1091 {
1092 ill_t *ill = (ill_t *)arg;
1093
1094 mutex_enter(&connp->conn_lock);
1095 if (connp->conn_dhcpinit_ill == ill) {
1096 connp->conn_dhcpinit_ill = NULL;
1097 ASSERT(ill->ill_dhcpinit != 0);
1098 atomic_dec_32(&ill->ill_dhcpinit);
1099 ill_set_inputfn(ill);
1100 }
1101 mutex_exit(&connp->conn_lock);
1102 }
1103
1104 static int
1105 ill_down_ipifs_tail(ill_t *ill)
1106 {
1107 ipif_t *ipif;
1108 int err;
1109
1110 ASSERT(IAM_WRITER_ILL(ill));
1111 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
1112 ipif_non_duplicate(ipif);
1113 /*
1114 * ipif_down_tail will call arp_ll_down on the last ipif
1115 * and typically return EINPROGRESS when the DL_UNBIND is sent.
1116 */
1117 if ((err = ipif_down_tail(ipif)) != 0)
1118 return (err);
1119 }
1120 return (0);
1121 }
1122
1123 /* ARGSUSED */
1124 void
1125 ipif_all_down_tail(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
1126 {
1127 ASSERT(IAM_WRITER_IPSQ(ipsq));
1128 (void) ill_down_ipifs_tail(q->q_ptr);
1129 freemsg(mp);
1130 ipsq_current_finish(ipsq);
1131 }
1132
1133 /*
1134 * ill_down_start is called when we want to down this ill and bring it up again
1135 * It is called when we receive an M_ERROR / M_HANGUP. In this case we shut down
1136 * all interfaces, but don't tear down any plumbing.
1137 */
1138 boolean_t
1139 ill_down_start(queue_t *q, mblk_t *mp)
1140 {
1141 ill_t *ill = q->q_ptr;
1142 ipif_t *ipif;
1143
1144 ASSERT(IAM_WRITER_ILL(ill));
1145 /*
1146 * It is possible that some ioctl is already in progress while we
1147 * received the M_ERROR / M_HANGUP in which case, we need to abort
1148 * the ioctl. ill_down_start() is being processed as CUR_OP rather
1149 * than as NEW_OP since the cause of the M_ERROR / M_HANGUP may prevent
1150 * the in progress ioctl from ever completing.
1151 *
1152 * The thread that started the ioctl (if any) must have returned,
1153 * since we are now executing as writer. After the 2 calls below,
1154 * the state of the ipsq and the ill would reflect no trace of any
1155 * pending operation. Subsequently if there is any response to the
1156 * original ioctl from the driver, it would be discarded as an
1157 * unsolicited message from the driver.
1158 */
1159 (void) ipsq_pending_mp_cleanup(ill, NULL);
1160 ill_dlpi_clear_deferred(ill);
1161
1162 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
1163 (void) ipif_down(ipif, NULL, NULL);
1164
1165 ill_down(ill);
1166
1167 /*
1168 * Walk all CONNs that can have a reference on an ire or nce for this
1169 * ill (we actually walk all that now have stale references).
1170 */
1171 ipcl_walk(conn_ixa_cleanup, (void *)B_TRUE, ill->ill_ipst);
1172
1173 /* With IPv6 we have dce_ifindex. Cleanup for neatness */
1174 if (ill->ill_isv6)
1175 dce_cleanup(ill->ill_phyint->phyint_ifindex, ill->ill_ipst);
1176
1177 ipsq_current_start(ill->ill_phyint->phyint_ipsq, ill->ill_ipif, 0);
1178
1179 /*
1180 * Atomically test and add the pending mp if references are active.
1181 */
1182 mutex_enter(&ill->ill_lock);
1183 if (!ill_is_quiescent(ill)) {
1184 /* call cannot fail since `conn_t *' argument is NULL */
1185 (void) ipsq_pending_mp_add(NULL, ill->ill_ipif, ill->ill_rq,
1186 mp, ILL_DOWN);
1187 mutex_exit(&ill->ill_lock);
1188 return (B_FALSE);
1189 }
1190 mutex_exit(&ill->ill_lock);
1191 return (B_TRUE);
1192 }
1193
1194 static void
1195 ill_down(ill_t *ill)
1196 {
1197 mblk_t *mp;
1198 ip_stack_t *ipst = ill->ill_ipst;
1199
1200 /*
1201 * Blow off any IREs dependent on this ILL.
1202 * The caller needs to handle conn_ixa_cleanup
1203 */
1204 ill_delete_ires(ill);
1205
1206 ire_walk_ill(0, 0, ill_downi, ill, ill);
1207
1208 /* Remove any conn_*_ill depending on this ill */
1209 ipcl_walk(conn_cleanup_ill, (caddr_t)ill, ipst);
1210
1211 /*
1212 * Free state for additional IREs.
1213 */
1214 mutex_enter(&ill->ill_saved_ire_lock);
1215 mp = ill->ill_saved_ire_mp;
1216 ill->ill_saved_ire_mp = NULL;
1217 ill->ill_saved_ire_cnt = 0;
1218 mutex_exit(&ill->ill_saved_ire_lock);
1219 freemsg(mp);
1220 }
1221
1222 /*
1223 * ire_walk routine used to delete every IRE that depends on
1224 * 'ill'. (Always called as writer, and may only be called from ire_walk.)
1225 *
1226 * Note: since the routes added by the kernel are deleted separately,
1227 * this will only be 1) IRE_IF_CLONE and 2) manually added IRE_INTERFACE.
1228 *
1229 * We also remove references on ire_nce_cache entries that refer to the ill.
1230 */
1231 void
1232 ill_downi(ire_t *ire, char *ill_arg)
1233 {
1234 ill_t *ill = (ill_t *)ill_arg;
1235 nce_t *nce;
1236
1237 mutex_enter(&ire->ire_lock);
1238 nce = ire->ire_nce_cache;
1239 if (nce != NULL && nce->nce_ill == ill)
1240 ire->ire_nce_cache = NULL;
1241 else
1242 nce = NULL;
1243 mutex_exit(&ire->ire_lock);
1244 if (nce != NULL)
1245 nce_refrele(nce);
1246 if (ire->ire_ill == ill) {
1247 /*
1248 * The existing interface binding for ire must be
1249 * deleted before trying to bind the route to another
1250 * interface. However, since we are using the contents of the
1251 * ire after ire_delete, the caller has to ensure that
1252 * CONDEMNED (deleted) ire's are not removed from the list
1253 * when ire_delete() returns. Currently ill_downi() is
1254 * only called as part of ire_walk*() routines, so that
1255 * the irb_refhold() done by ire_walk*() will ensure that
1256 * ire_delete() does not lead to ire_inactive().
1257 */
1258 ASSERT(ire->ire_bucket->irb_refcnt > 0);
1259 ire_delete(ire);
1260 if (ire->ire_unbound)
1261 ire_rebind(ire);
1262 }
1263 }
1264
1265 /* Remove IRE_IF_CLONE on this ill */
1266 void
1267 ill_downi_if_clone(ire_t *ire, char *ill_arg)
1268 {
1269 ill_t *ill = (ill_t *)ill_arg;
1270
1271 ASSERT(ire->ire_type & IRE_IF_CLONE);
1272 if (ire->ire_ill == ill)
1273 ire_delete(ire);
1274 }
1275
1276 /* Consume an M_IOCACK of the fastpath probe. */
1277 void
1278 ill_fastpath_ack(ill_t *ill, mblk_t *mp)
1279 {
1280 mblk_t *mp1 = mp;
1281
1282 /*
1283 * If this was the first attempt turn on the fastpath probing.
1284 */
1285 mutex_enter(&ill->ill_lock);
1286 if (ill->ill_dlpi_fastpath_state == IDS_INPROGRESS)
1287 ill->ill_dlpi_fastpath_state = IDS_OK;
1288 mutex_exit(&ill->ill_lock);
1289
1290 /* Free the M_IOCACK mblk, hold on to the data */
1291 mp = mp->b_cont;
1292 freeb(mp1);
1293 if (mp == NULL)
1294 return;
1295 if (mp->b_cont != NULL)
1296 nce_fastpath_update(ill, mp);
1297 else
1298 ip0dbg(("ill_fastpath_ack: no b_cont\n"));
1299 freemsg(mp);
1300 }
1301
1302 /*
1303 * Throw an M_IOCTL message downstream asking "do you know fastpath?"
1304 * The data portion of the request is a dl_unitdata_req_t template for
1305 * what we would send downstream in the absence of a fastpath confirmation.
1306 */
1307 int
1308 ill_fastpath_probe(ill_t *ill, mblk_t *dlur_mp)
1309 {
1310 struct iocblk *ioc;
1311 mblk_t *mp;
1312
1313 if (dlur_mp == NULL)
1314 return (EINVAL);
1315
1316 mutex_enter(&ill->ill_lock);
1317 switch (ill->ill_dlpi_fastpath_state) {
1318 case IDS_FAILED:
1319 /*
1320 * Driver NAKed the first fastpath ioctl - assume it doesn't
1321 * support it.
1322 */
1323 mutex_exit(&ill->ill_lock);
1324 return (ENOTSUP);
1325 case IDS_UNKNOWN:
1326 /* This is the first probe */
1327 ill->ill_dlpi_fastpath_state = IDS_INPROGRESS;
1328 break;
1329 default:
1330 break;
1331 }
1332 mutex_exit(&ill->ill_lock);
1333
1334 if ((mp = mkiocb(DL_IOC_HDR_INFO)) == NULL)
1335 return (EAGAIN);
1336
1337 mp->b_cont = copyb(dlur_mp);
1338 if (mp->b_cont == NULL) {
1339 freeb(mp);
1340 return (EAGAIN);
1341 }
1342
1343 ioc = (struct iocblk *)mp->b_rptr;
1344 ioc->ioc_count = msgdsize(mp->b_cont);
1345
1346 DTRACE_PROBE3(ill__dlpi, char *, "ill_fastpath_probe",
1347 char *, "DL_IOC_HDR_INFO", ill_t *, ill);
1348 putnext(ill->ill_wq, mp);
1349 return (0);
1350 }
1351
1352 void
1353 ill_capability_probe(ill_t *ill)
1354 {
1355 mblk_t *mp;
1356
1357 ASSERT(IAM_WRITER_ILL(ill));
1358
1359 if (ill->ill_dlpi_capab_state != IDCS_UNKNOWN &&
1360 ill->ill_dlpi_capab_state != IDCS_FAILED)
1361 return;
1362
1363 /*
1364 * We are starting a new cycle of capability negotiation.
1365 * Free up the capab reset messages of any previous incarnation.
1366 * We will do a fresh allocation when we get the response to our probe
1367 */
1368 if (ill->ill_capab_reset_mp != NULL) {
1369 freemsg(ill->ill_capab_reset_mp);
1370 ill->ill_capab_reset_mp = NULL;
1371 }
1372
1373 ip1dbg(("ill_capability_probe: starting capability negotiation\n"));
1374
1375 mp = ip_dlpi_alloc(sizeof (dl_capability_req_t), DL_CAPABILITY_REQ);
1376 if (mp == NULL)
1377 return;
1378
1379 ill_capability_send(ill, mp);
1380 ill->ill_dlpi_capab_state = IDCS_PROBE_SENT;
1381 }
1382
1383 static boolean_t
1384 ill_capability_wait(ill_t *ill)
1385 {
1386 /*
1387 * I'm in this ill's squeue, aka a writer. The ILL_CONDEMNED flag can
1388 * only be set by someone who is the writer. Since we
1389 * drop-and-reacquire the squeue in this loop, we need to check for
1390 * ILL_CONDEMNED, which if set means nothing can signal our capability
1391 * condition variable.
1392 */
1393 ASSERT(IAM_WRITER_ILL(ill));
1394
1395 while (ill->ill_capab_pending_cnt != 0 &&
1396 (ill->ill_state_flags & ILL_CONDEMNED) == 0) {
1397 mutex_enter(&ill->ill_dlpi_capab_lock);
1398 ipsq_exit(ill->ill_phyint->phyint_ipsq);
1399 cv_wait(&ill->ill_dlpi_capab_cv, &ill->ill_dlpi_capab_lock);
1400 mutex_exit(&ill->ill_dlpi_capab_lock);
1401 /*
1402 * If ipsq_enter() fails, someone set ILL_CONDEMNED
1403 * while we dropped the squeue. Indicate such to the caller.
1404 */
1405 if (!ipsq_enter(ill, B_FALSE, CUR_OP))
1406 return (B_FALSE);
1407 }
1408
1409 return ((ill->ill_state_flags & ILL_CONDEMNED) == 0);
1410 }
1411
1412 void
1413 ill_capability_reset(ill_t *ill, boolean_t reneg)
1414 {
1415 ASSERT(IAM_WRITER_ILL(ill));
1416
1417 if (ill->ill_dlpi_capab_state != IDCS_OK)
1418 return;
1419
1420 ill->ill_dlpi_capab_state = reneg ? IDCS_RENEG : IDCS_RESET_SENT;
1421
1422 ASSERT(ill->ill_capab_reset_mp != NULL);
1423
1424 ill_capability_send(ill, ill->ill_capab_reset_mp);
1425 ill->ill_capab_reset_mp = NULL;
1426 /*
1427 * We turn off all capabilities except those pertaining to
1428 * direct function call capabilities viz. ILL_CAPAB_DLD*
1429 * which will be turned off by the corresponding reset functions.
1430 */
1431 ill->ill_capabilities &= ~(ILL_CAPAB_HCKSUM | ILL_CAPAB_ZEROCOPY);
1432 }
1433
1434 static void
1435 ill_capability_reset_alloc(ill_t *ill)
1436 {
1437 mblk_t *mp;
1438 size_t size = 0;
1439 int err;
1440 dl_capability_req_t *capb;
1441
1442 ASSERT(IAM_WRITER_ILL(ill));
1443 ASSERT(ill->ill_capab_reset_mp == NULL);
1444
1445 if (ILL_HCKSUM_CAPABLE(ill)) {
1446 size += sizeof (dl_capability_sub_t) +
1447 sizeof (dl_capab_hcksum_t);
1448 }
1449
1450 if (ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) {
1451 size += sizeof (dl_capability_sub_t) +
1452 sizeof (dl_capab_zerocopy_t);
1453 }
1454
1455 if (ill->ill_capabilities & ILL_CAPAB_DLD) {
1456 size += sizeof (dl_capability_sub_t) +
1457 sizeof (dl_capab_dld_t);
1458 }
1459
1460 mp = allocb_wait(size + sizeof (dl_capability_req_t), BPRI_MED,
1461 STR_NOSIG, &err);
1462
1463 mp->b_datap->db_type = M_PROTO;
1464 bzero(mp->b_rptr, size + sizeof (dl_capability_req_t));
1465
1466 capb = (dl_capability_req_t *)mp->b_rptr;
1467 capb->dl_primitive = DL_CAPABILITY_REQ;
1468 capb->dl_sub_offset = sizeof (dl_capability_req_t);
1469 capb->dl_sub_length = size;
1470
1471 mp->b_wptr += sizeof (dl_capability_req_t);
1472
1473 /*
1474 * Each handler fills in the corresponding dl_capability_sub_t
1475 * inside the mblk,
1476 */
1477 ill_capability_hcksum_reset_fill(ill, mp);
1478 ill_capability_zerocopy_reset_fill(ill, mp);
1479 ill_capability_dld_reset_fill(ill, mp);
1480
1481 ill->ill_capab_reset_mp = mp;
1482 }
1483
1484 static void
1485 ill_capability_id_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *outers)
1486 {
1487 dl_capab_id_t *id_ic;
1488 uint_t sub_dl_cap = outers->dl_cap;
1489 dl_capability_sub_t *inners;
1490 uint8_t *capend;
1491
1492 ASSERT(sub_dl_cap == DL_CAPAB_ID_WRAPPER);
1493
1494 /*
1495 * Note: range checks here are not absolutely sufficient to
1496 * make us robust against malformed messages sent by drivers;
1497 * this is in keeping with the rest of IP's dlpi handling.
1498 * (Remember, it's coming from something else in the kernel
1499 * address space)
1500 */
1501
1502 capend = (uint8_t *)(outers + 1) + outers->dl_length;
1503 if (capend > mp->b_wptr) {
1504 cmn_err(CE_WARN, "ill_capability_id_ack: "
1505 "malformed sub-capability too long for mblk");
1506 return;
1507 }
1508
1509 id_ic = (dl_capab_id_t *)(outers + 1);
1510
1511 if (outers->dl_length < sizeof (*id_ic) ||
1512 (inners = &id_ic->id_subcap,
1513 inners->dl_length > (outers->dl_length - sizeof (*inners)))) {
1514 cmn_err(CE_WARN, "ill_capability_id_ack: malformed "
1515 "encapsulated capab type %d too long for mblk",
1516 inners->dl_cap);
1517 return;
1518 }
1519
1520 if (!dlcapabcheckqid(&id_ic->id_mid, ill->ill_lmod_rq)) {
1521 ip1dbg(("ill_capability_id_ack: mid token for capab type %d "
1522 "isn't as expected; pass-thru module(s) detected, "
1523 "discarding capability\n", inners->dl_cap));
1524 return;
1525 }
1526
1527 /* Process the encapsulated sub-capability */
1528 ill_capability_dispatch(ill, mp, inners);
1529 }
1530
1531 static void
1532 ill_capability_dld_reset_fill(ill_t *ill, mblk_t *mp)
1533 {
1534 dl_capability_sub_t *dl_subcap;
1535
1536 if (!(ill->ill_capabilities & ILL_CAPAB_DLD))
1537 return;
1538
1539 /*
1540 * The dl_capab_dld_t that follows the dl_capability_sub_t is not
1541 * initialized below since it is not used by DLD.
1542 */
1543 dl_subcap = (dl_capability_sub_t *)mp->b_wptr;
1544 dl_subcap->dl_cap = DL_CAPAB_DLD;
1545 dl_subcap->dl_length = sizeof (dl_capab_dld_t);
1546
1547 mp->b_wptr += sizeof (dl_capability_sub_t) + sizeof (dl_capab_dld_t);
1548 }
1549
1550 static void
1551 ill_capability_dispatch(ill_t *ill, mblk_t *mp, dl_capability_sub_t *subp)
1552 {
1553 /*
1554 * If no ipif was brought up over this ill, this DL_CAPABILITY_REQ/ACK
1555 * is only to get the VRRP capability.
1556 *
1557 * Note that we cannot check ill_ipif_up_count here since
1558 * ill_ipif_up_count is only incremented when the resolver is setup.
1559 * That is done asynchronously, and can race with this function.
1560 */
1561 if (!ill->ill_dl_up) {
1562 if (subp->dl_cap == DL_CAPAB_VRRP)
1563 ill_capability_vrrp_ack(ill, mp, subp);
1564 return;
1565 }
1566
1567 switch (subp->dl_cap) {
1568 case DL_CAPAB_HCKSUM:
1569 ill_capability_hcksum_ack(ill, mp, subp);
1570 break;
1571 case DL_CAPAB_ZEROCOPY:
1572 ill_capability_zerocopy_ack(ill, mp, subp);
1573 break;
1574 case DL_CAPAB_DLD:
1575 ill_capability_dld_ack(ill, mp, subp);
1576 break;
1577 case DL_CAPAB_VRRP:
1578 break;
1579 default:
1580 ip1dbg(("ill_capability_dispatch: unknown capab type %d\n",
1581 subp->dl_cap));
1582 }
1583 }
1584
1585 /*
1586 * Process the vrrp capability received from a DLS Provider. isub must point
1587 * to the sub-capability (DL_CAPAB_VRRP) of a DL_CAPABILITY_ACK message.
1588 */
1589 static void
1590 ill_capability_vrrp_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *isub)
1591 {
1592 dl_capab_vrrp_t *vrrp;
1593 uint_t sub_dl_cap = isub->dl_cap;
1594 uint8_t *capend;
1595
1596 ASSERT(IAM_WRITER_ILL(ill));
1597 ASSERT(sub_dl_cap == DL_CAPAB_VRRP);
1598
1599 /*
1600 * Note: range checks here are not absolutely sufficient to
1601 * make us robust against malformed messages sent by drivers;
1602 * this is in keeping with the rest of IP's dlpi handling.
1603 * (Remember, it's coming from something else in the kernel
1604 * address space)
1605 */
1606 capend = (uint8_t *)(isub + 1) + isub->dl_length;
1607 if (capend > mp->b_wptr) {
1608 cmn_err(CE_WARN, "ill_capability_vrrp_ack: "
1609 "malformed sub-capability too long for mblk");
1610 return;
1611 }
1612 vrrp = (dl_capab_vrrp_t *)(isub + 1);
1613
1614 /*
1615 * Compare the IP address family and set ILLF_VRRP for the right ill.
1616 */
1617 if ((vrrp->vrrp_af == AF_INET6 && ill->ill_isv6) ||
1618 (vrrp->vrrp_af == AF_INET && !ill->ill_isv6)) {
1619 ill->ill_flags |= ILLF_VRRP;
1620 }
1621 }
1622
1623 /*
1624 * Process a hardware checksum offload capability negotiation ack received
1625 * from a DLS Provider.isub must point to the sub-capability (DL_CAPAB_HCKSUM)
1626 * of a DL_CAPABILITY_ACK message.
1627 */
1628 static void
1629 ill_capability_hcksum_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *isub)
1630 {
1631 dl_capability_req_t *ocap;
1632 dl_capab_hcksum_t *ihck, *ohck;
1633 ill_hcksum_capab_t **ill_hcksum;
1634 mblk_t *nmp = NULL;
1635 uint_t sub_dl_cap = isub->dl_cap;
1636 uint8_t *capend;
1637
1638 ASSERT(sub_dl_cap == DL_CAPAB_HCKSUM);
1639
1640 ill_hcksum = (ill_hcksum_capab_t **)&ill->ill_hcksum_capab;
1641
1642 /*
1643 * Note: range checks here are not absolutely sufficient to
1644 * make us robust against malformed messages sent by drivers;
1645 * this is in keeping with the rest of IP's dlpi handling.
1646 * (Remember, it's coming from something else in the kernel
1647 * address space)
1648 */
1649 capend = (uint8_t *)(isub + 1) + isub->dl_length;
1650 if (capend > mp->b_wptr) {
1651 cmn_err(CE_WARN, "ill_capability_hcksum_ack: "
1652 "malformed sub-capability too long for mblk");
1653 return;
1654 }
1655
1656 /*
1657 * There are two types of acks we process here:
1658 * 1. acks in reply to a (first form) generic capability req
1659 * (no ENABLE flag set)
1660 * 2. acks in reply to a ENABLE capability req.
1661 * (ENABLE flag set)
1662 */
1663 ihck = (dl_capab_hcksum_t *)(isub + 1);
1664
1665 if (ihck->hcksum_version != HCKSUM_VERSION_1) {
1666 cmn_err(CE_CONT, "ill_capability_hcksum_ack: "
1667 "unsupported hardware checksum "
1668 "sub-capability (version %d, expected %d)",
1669 ihck->hcksum_version, HCKSUM_VERSION_1);
1670 return;
1671 }
1672
1673 if (!dlcapabcheckqid(&ihck->hcksum_mid, ill->ill_lmod_rq)) {
1674 ip1dbg(("ill_capability_hcksum_ack: mid token for hardware "
1675 "checksum capability isn't as expected; pass-thru "
1676 "module(s) detected, discarding capability\n"));
1677 return;
1678 }
1679
1680 #define CURR_HCKSUM_CAPAB \
1681 (HCKSUM_INET_PARTIAL | HCKSUM_INET_FULL_V4 | \
1682 HCKSUM_INET_FULL_V6 | HCKSUM_IPHDRCKSUM)
1683
1684 if ((ihck->hcksum_txflags & HCKSUM_ENABLE) &&
1685 (ihck->hcksum_txflags & CURR_HCKSUM_CAPAB)) {
1686 /* do ENABLE processing */
1687 if (*ill_hcksum == NULL) {
1688 *ill_hcksum = kmem_zalloc(sizeof (ill_hcksum_capab_t),
1689 KM_NOSLEEP);
1690
1691 if (*ill_hcksum == NULL) {
1692 cmn_err(CE_WARN, "ill_capability_hcksum_ack: "
1693 "could not enable hcksum version %d "
1694 "for %s (ENOMEM)\n", HCKSUM_CURRENT_VERSION,
1695 ill->ill_name);
1696 return;
1697 }
1698 }
1699
1700 (*ill_hcksum)->ill_hcksum_version = ihck->hcksum_version;
1701 (*ill_hcksum)->ill_hcksum_txflags = ihck->hcksum_txflags;
1702 ill->ill_capabilities |= ILL_CAPAB_HCKSUM;
1703 ip1dbg(("ill_capability_hcksum_ack: interface %s "
1704 "has enabled hardware checksumming\n ",
1705 ill->ill_name));
1706 } else if (ihck->hcksum_txflags & CURR_HCKSUM_CAPAB) {
1707 /*
1708 * Enabling hardware checksum offload
1709 * Currently IP supports {TCP,UDP}/IPv4
1710 * partial and full cksum offload and
1711 * IPv4 header checksum offload.
1712 * Allocate new mblk which will
1713 * contain a new capability request
1714 * to enable hardware checksum offload.
1715 */
1716 uint_t size;
1717 uchar_t *rptr;
1718
1719 size = sizeof (dl_capability_req_t) +
1720 sizeof (dl_capability_sub_t) + isub->dl_length;
1721
1722 if ((nmp = ip_dlpi_alloc(size, DL_CAPABILITY_REQ)) == NULL) {
1723 cmn_err(CE_WARN, "ill_capability_hcksum_ack: "
1724 "could not enable hardware cksum for %s (ENOMEM)\n",
1725 ill->ill_name);
1726 return;
1727 }
1728
1729 rptr = nmp->b_rptr;
1730 /* initialize dl_capability_req_t */
1731 ocap = (dl_capability_req_t *)nmp->b_rptr;
1732 ocap->dl_sub_offset =
1733 sizeof (dl_capability_req_t);
1734 ocap->dl_sub_length =
1735 sizeof (dl_capability_sub_t) +
1736 isub->dl_length;
1737 nmp->b_rptr += sizeof (dl_capability_req_t);
1738
1739 /* initialize dl_capability_sub_t */
1740 bcopy(isub, nmp->b_rptr, sizeof (*isub));
1741 nmp->b_rptr += sizeof (*isub);
1742
1743 /* initialize dl_capab_hcksum_t */
1744 ohck = (dl_capab_hcksum_t *)nmp->b_rptr;
1745 bcopy(ihck, ohck, sizeof (*ihck));
1746
1747 nmp->b_rptr = rptr;
1748 ASSERT(nmp->b_wptr == (nmp->b_rptr + size));
1749
1750 /* Set ENABLE flag */
1751 ohck->hcksum_txflags &= CURR_HCKSUM_CAPAB;
1752 ohck->hcksum_txflags |= HCKSUM_ENABLE;
1753
1754 /*
1755 * nmp points to a DL_CAPABILITY_REQ message to enable
1756 * hardware checksum acceleration.
1757 */
1758 ill_capability_send(ill, nmp);
1759 } else {
1760 ip1dbg(("ill_capability_hcksum_ack: interface %s has "
1761 "advertised %x hardware checksum capability flags\n",
1762 ill->ill_name, ihck->hcksum_txflags));
1763 }
1764 }
1765
1766 static void
1767 ill_capability_hcksum_reset_fill(ill_t *ill, mblk_t *mp)
1768 {
1769 dl_capab_hcksum_t *hck_subcap;
1770 dl_capability_sub_t *dl_subcap;
1771
1772 if (!ILL_HCKSUM_CAPABLE(ill))
1773 return;
1774
1775 ASSERT(ill->ill_hcksum_capab != NULL);
1776
1777 dl_subcap = (dl_capability_sub_t *)mp->b_wptr;
1778 dl_subcap->dl_cap = DL_CAPAB_HCKSUM;
1779 dl_subcap->dl_length = sizeof (*hck_subcap);
1780
1781 hck_subcap = (dl_capab_hcksum_t *)(dl_subcap + 1);
1782 hck_subcap->hcksum_version = ill->ill_hcksum_capab->ill_hcksum_version;
1783 hck_subcap->hcksum_txflags = 0;
1784
1785 mp->b_wptr += sizeof (*dl_subcap) + sizeof (*hck_subcap);
1786 }
1787
1788 static void
1789 ill_capability_zerocopy_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *isub)
1790 {
1791 mblk_t *nmp = NULL;
1792 dl_capability_req_t *oc;
1793 dl_capab_zerocopy_t *zc_ic, *zc_oc;
1794 ill_zerocopy_capab_t **ill_zerocopy_capab;
1795 uint_t sub_dl_cap = isub->dl_cap;
1796 uint8_t *capend;
1797
1798 ASSERT(sub_dl_cap == DL_CAPAB_ZEROCOPY);
1799
1800 ill_zerocopy_capab = (ill_zerocopy_capab_t **)&ill->ill_zerocopy_capab;
1801
1802 /*
1803 * Note: range checks here are not absolutely sufficient to
1804 * make us robust against malformed messages sent by drivers;
1805 * this is in keeping with the rest of IP's dlpi handling.
1806 * (Remember, it's coming from something else in the kernel
1807 * address space)
1808 */
1809 capend = (uint8_t *)(isub + 1) + isub->dl_length;
1810 if (capend > mp->b_wptr) {
1811 cmn_err(CE_WARN, "ill_capability_zerocopy_ack: "
1812 "malformed sub-capability too long for mblk");
1813 return;
1814 }
1815
1816 zc_ic = (dl_capab_zerocopy_t *)(isub + 1);
1817 if (zc_ic->zerocopy_version != ZEROCOPY_VERSION_1) {
1818 cmn_err(CE_CONT, "ill_capability_zerocopy_ack: "
1819 "unsupported ZEROCOPY sub-capability (version %d, "
1820 "expected %d)", zc_ic->zerocopy_version,
1821 ZEROCOPY_VERSION_1);
1822 return;
1823 }
1824
1825 if (!dlcapabcheckqid(&zc_ic->zerocopy_mid, ill->ill_lmod_rq)) {
1826 ip1dbg(("ill_capability_zerocopy_ack: mid token for zerocopy "
1827 "capability isn't as expected; pass-thru module(s) "
1828 "detected, discarding capability\n"));
1829 return;
1830 }
1831
1832 if ((zc_ic->zerocopy_flags & DL_CAPAB_VMSAFE_MEM) != 0) {
1833 if (*ill_zerocopy_capab == NULL) {
1834 *ill_zerocopy_capab =
1835 kmem_zalloc(sizeof (ill_zerocopy_capab_t),
1836 KM_NOSLEEP);
1837
1838 if (*ill_zerocopy_capab == NULL) {
1839 cmn_err(CE_WARN, "ill_capability_zerocopy_ack: "
1840 "could not enable Zero-copy version %d "
1841 "for %s (ENOMEM)\n", ZEROCOPY_VERSION_1,
1842 ill->ill_name);
1843 return;
1844 }
1845 }
1846
1847 ip1dbg(("ill_capability_zerocopy_ack: interface %s "
1848 "supports Zero-copy version %d\n", ill->ill_name,
1849 ZEROCOPY_VERSION_1));
1850
1851 (*ill_zerocopy_capab)->ill_zerocopy_version =
1852 zc_ic->zerocopy_version;
1853 (*ill_zerocopy_capab)->ill_zerocopy_flags =
1854 zc_ic->zerocopy_flags;
1855
1856 ill->ill_capabilities |= ILL_CAPAB_ZEROCOPY;
1857 } else {
1858 uint_t size;
1859 uchar_t *rptr;
1860
1861 size = sizeof (dl_capability_req_t) +
1862 sizeof (dl_capability_sub_t) +
1863 sizeof (dl_capab_zerocopy_t);
1864
1865 if ((nmp = ip_dlpi_alloc(size, DL_CAPABILITY_REQ)) == NULL) {
1866 cmn_err(CE_WARN, "ill_capability_zerocopy_ack: "
1867 "could not enable zerocopy for %s (ENOMEM)\n",
1868 ill->ill_name);
1869 return;
1870 }
1871
1872 rptr = nmp->b_rptr;
1873 /* initialize dl_capability_req_t */
1874 oc = (dl_capability_req_t *)rptr;
1875 oc->dl_sub_offset = sizeof (dl_capability_req_t);
1876 oc->dl_sub_length = sizeof (dl_capability_sub_t) +
1877 sizeof (dl_capab_zerocopy_t);
1878 rptr += sizeof (dl_capability_req_t);
1879
1880 /* initialize dl_capability_sub_t */
1881 bcopy(isub, rptr, sizeof (*isub));
1882 rptr += sizeof (*isub);
1883
1884 /* initialize dl_capab_zerocopy_t */
1885 zc_oc = (dl_capab_zerocopy_t *)rptr;
1886 *zc_oc = *zc_ic;
1887
1888 ip1dbg(("ill_capability_zerocopy_ack: asking interface %s "
1889 "to enable zero-copy version %d\n", ill->ill_name,
1890 ZEROCOPY_VERSION_1));
1891
1892 /* set VMSAFE_MEM flag */
1893 zc_oc->zerocopy_flags |= DL_CAPAB_VMSAFE_MEM;
1894
1895 /* nmp points to a DL_CAPABILITY_REQ message to enable zcopy */
1896 ill_capability_send(ill, nmp);
1897 }
1898 }
1899
1900 static void
1901 ill_capability_zerocopy_reset_fill(ill_t *ill, mblk_t *mp)
1902 {
1903 dl_capab_zerocopy_t *zerocopy_subcap;
1904 dl_capability_sub_t *dl_subcap;
1905
1906 if (!(ill->ill_capabilities & ILL_CAPAB_ZEROCOPY))
1907 return;
1908
1909 ASSERT(ill->ill_zerocopy_capab != NULL);
1910
1911 dl_subcap = (dl_capability_sub_t *)mp->b_wptr;
1912 dl_subcap->dl_cap = DL_CAPAB_ZEROCOPY;
1913 dl_subcap->dl_length = sizeof (*zerocopy_subcap);
1914
1915 zerocopy_subcap = (dl_capab_zerocopy_t *)(dl_subcap + 1);
1916 zerocopy_subcap->zerocopy_version =
1917 ill->ill_zerocopy_capab->ill_zerocopy_version;
1918 zerocopy_subcap->zerocopy_flags = 0;
1919
1920 mp->b_wptr += sizeof (*dl_subcap) + sizeof (*zerocopy_subcap);
1921 }
1922
1923 /*
1924 * DLD capability
1925 * Refer to dld.h for more information regarding the purpose and usage
1926 * of this capability.
1927 */
1928 static void
1929 ill_capability_dld_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *isub)
1930 {
1931 dl_capab_dld_t *dld_ic, dld;
1932 uint_t sub_dl_cap = isub->dl_cap;
1933 uint8_t *capend;
1934 ill_dld_capab_t *idc;
1935
1936 ASSERT(IAM_WRITER_ILL(ill));
1937 ASSERT(sub_dl_cap == DL_CAPAB_DLD);
1938
1939 /*
1940 * Note: range checks here are not absolutely sufficient to
1941 * make us robust against malformed messages sent by drivers;
1942 * this is in keeping with the rest of IP's dlpi handling.
1943 * (Remember, it's coming from something else in the kernel
1944 * address space)
1945 */
1946 capend = (uint8_t *)(isub + 1) + isub->dl_length;
1947 if (capend > mp->b_wptr) {
1948 cmn_err(CE_WARN, "ill_capability_dld_ack: "
1949 "malformed sub-capability too long for mblk");
1950 return;
1951 }
1952 dld_ic = (dl_capab_dld_t *)(isub + 1);
1953 if (dld_ic->dld_version != DLD_CURRENT_VERSION) {
1954 cmn_err(CE_CONT, "ill_capability_dld_ack: "
1955 "unsupported DLD sub-capability (version %d, "
1956 "expected %d)", dld_ic->dld_version,
1957 DLD_CURRENT_VERSION);
1958 return;
1959 }
1960 if (!dlcapabcheckqid(&dld_ic->dld_mid, ill->ill_lmod_rq)) {
1961 ip1dbg(("ill_capability_dld_ack: mid token for dld "
1962 "capability isn't as expected; pass-thru module(s) "
1963 "detected, discarding capability\n"));
1964 return;
1965 }
1966
1967 /*
1968 * Copy locally to ensure alignment.
1969 */
1970 bcopy(dld_ic, &dld, sizeof (dl_capab_dld_t));
1971
1972 if ((idc = ill->ill_dld_capab) == NULL) {
1973 idc = kmem_zalloc(sizeof (ill_dld_capab_t), KM_NOSLEEP);
1974 if (idc == NULL) {
1975 cmn_err(CE_WARN, "ill_capability_dld_ack: "
1976 "could not enable DLD version %d "
1977 "for %s (ENOMEM)\n", DLD_CURRENT_VERSION,
1978 ill->ill_name);
1979 return;
1980 }
1981 ill->ill_dld_capab = idc;
1982 }
1983 idc->idc_capab_df = (ip_capab_func_t)dld.dld_capab;
1984 idc->idc_capab_dh = (void *)dld.dld_capab_handle;
1985 ip1dbg(("ill_capability_dld_ack: interface %s "
1986 "supports DLD version %d\n", ill->ill_name, DLD_CURRENT_VERSION));
1987
1988 ill_capability_dld_enable(ill);
1989 }
1990
1991 /*
1992 * Typically capability negotiation between IP and the driver happens via
1993 * DLPI message exchange. However GLD also offers a direct function call
1994 * mechanism to exchange the DLD_DIRECT_CAPAB and DLD_POLL_CAPAB capabilities,
1995 * But arbitrary function calls into IP or GLD are not permitted, since both
1996 * of them are protected by their own perimeter mechanism. The perimeter can
1997 * be viewed as a coarse lock or serialization mechanism. The hierarchy of
1998 * these perimeters is IP -> MAC. Thus for example to enable the squeue
1999 * polling, IP needs to enter its perimeter, then call ill_mac_perim_enter
2000 * to enter the mac perimeter and then do the direct function calls into
2001 * GLD to enable squeue polling. The ring related callbacks from the mac into
2002 * the stack to add, bind, quiesce, restart or cleanup a ring are all
2003 * protected by the mac perimeter.
2004 */
2005 static void
2006 ill_mac_perim_enter(ill_t *ill, mac_perim_handle_t *mphp)
2007 {
2008 ill_dld_capab_t *idc = ill->ill_dld_capab;
2009 int err;
2010
2011 err = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_PERIM, mphp,
2012 DLD_ENABLE);
2013 ASSERT(err == 0);
2014 }
2015
2016 static void
2017 ill_mac_perim_exit(ill_t *ill, mac_perim_handle_t mph)
2018 {
2019 ill_dld_capab_t *idc = ill->ill_dld_capab;
2020 int err;
2021
2022 err = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_PERIM, mph,
2023 DLD_DISABLE);
2024 ASSERT(err == 0);
2025 }
2026
2027 boolean_t
2028 ill_mac_perim_held(ill_t *ill)
2029 {
2030 ill_dld_capab_t *idc = ill->ill_dld_capab;
2031
2032 return (idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_PERIM, NULL,
2033 DLD_QUERY));
2034 }
2035
2036 static void
2037 ill_capability_direct_enable(ill_t *ill)
2038 {
2039 ill_dld_capab_t *idc = ill->ill_dld_capab;
2040 ill_dld_direct_t *idd = &idc->idc_direct;
2041 dld_capab_direct_t direct;
2042 int rc;
2043
2044 ASSERT(!ill->ill_isv6 && IAM_WRITER_ILL(ill));
2045
2046 bzero(&direct, sizeof (direct));
2047 direct.di_rx_cf = (uintptr_t)ip_input;
2048 direct.di_rx_ch = ill;
2049
2050 rc = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_DIRECT, &direct,
2051 DLD_ENABLE);
2052 if (rc == 0) {
2053 idd->idd_tx_df = (ip_dld_tx_t)direct.di_tx_df;
2054 idd->idd_tx_dh = direct.di_tx_dh;
2055 idd->idd_tx_cb_df = (ip_dld_callb_t)direct.di_tx_cb_df;
2056 idd->idd_tx_cb_dh = direct.di_tx_cb_dh;
2057 idd->idd_tx_fctl_df = (ip_dld_fctl_t)direct.di_tx_fctl_df;
2058 idd->idd_tx_fctl_dh = direct.di_tx_fctl_dh;
2059 ASSERT(idd->idd_tx_cb_df != NULL);
2060 ASSERT(idd->idd_tx_fctl_df != NULL);
2061 ASSERT(idd->idd_tx_df != NULL);
2062 /*
2063 * One time registration of flow enable callback function
2064 */
2065 ill->ill_flownotify_mh = idd->idd_tx_cb_df(idd->idd_tx_cb_dh,
2066 ill_flow_enable, ill);
2067 ill->ill_capabilities |= ILL_CAPAB_DLD_DIRECT;
2068 DTRACE_PROBE1(direct_on, (ill_t *), ill);
2069 } else {
2070 cmn_err(CE_WARN, "warning: could not enable DIRECT "
2071 "capability, rc = %d\n", rc);
2072 DTRACE_PROBE2(direct_off, (ill_t *), ill, (int), rc);
2073 }
2074 }
2075
2076 static void
2077 ill_capability_poll_enable(ill_t *ill)
2078 {
2079 ill_dld_capab_t *idc = ill->ill_dld_capab;
2080 dld_capab_poll_t poll;
2081 int rc;
2082
2083 ASSERT(!ill->ill_isv6 && IAM_WRITER_ILL(ill));
2084
2085 bzero(&poll, sizeof (poll));
2086 poll.poll_ring_add_cf = (uintptr_t)ip_squeue_add_ring;
2087 poll.poll_ring_remove_cf = (uintptr_t)ip_squeue_clean_ring;
2088 poll.poll_ring_quiesce_cf = (uintptr_t)ip_squeue_quiesce_ring;
2089 poll.poll_ring_restart_cf = (uintptr_t)ip_squeue_restart_ring;
2090 poll.poll_ring_bind_cf = (uintptr_t)ip_squeue_bind_ring;
2091 poll.poll_ring_ch = ill;
2092 rc = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_POLL, &poll,
2093 DLD_ENABLE);
2094 if (rc == 0) {
2095 ill->ill_capabilities |= ILL_CAPAB_DLD_POLL;
2096 DTRACE_PROBE1(poll_on, (ill_t *), ill);
2097 } else {
2098 ip1dbg(("warning: could not enable POLL "
2099 "capability, rc = %d\n", rc));
2100 DTRACE_PROBE2(poll_off, (ill_t *), ill, (int), rc);
2101 }
2102 }
2103
2104 /*
2105 * Enable the LSO capability.
2106 */
2107 static void
2108 ill_capability_lso_enable(ill_t *ill)
2109 {
2110 ill_dld_capab_t *idc = ill->ill_dld_capab;
2111 dld_capab_lso_t lso;
2112 int rc;
2113
2114 ASSERT(!ill->ill_isv6 && IAM_WRITER_ILL(ill));
2115
2116 if (ill->ill_lso_capab == NULL) {
2117 ill->ill_lso_capab = kmem_zalloc(sizeof (ill_lso_capab_t),
2118 KM_NOSLEEP);
2119 if (ill->ill_lso_capab == NULL) {
2120 cmn_err(CE_WARN, "ill_capability_lso_enable: "
2121 "could not enable LSO for %s (ENOMEM)\n",
2122 ill->ill_name);
2123 return;
2124 }
2125 }
2126
2127 bzero(&lso, sizeof (lso));
2128 if ((rc = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_LSO, &lso,
2129 DLD_ENABLE)) == 0) {
2130 ill->ill_lso_capab->ill_lso_flags = lso.lso_flags;
2131 ill->ill_lso_capab->ill_lso_max = lso.lso_max;
2132 ill->ill_capabilities |= ILL_CAPAB_LSO;
2133 ip1dbg(("ill_capability_lso_enable: interface %s "
2134 "has enabled LSO\n ", ill->ill_name));
2135 } else {
2136 kmem_free(ill->ill_lso_capab, sizeof (ill_lso_capab_t));
2137 ill->ill_lso_capab = NULL;
2138 DTRACE_PROBE2(lso_off, (ill_t *), ill, (int), rc);
2139 }
2140 }
2141
2142 /*
2143 * Check whether or not mac will prevent us from sending with a given IP
2144 * address. This requires having the IPCHECK capability, which we should
2145 * always be able to successfully negotiate, but if it's somehow missing
2146 * then we just permit the caller to use the address, since mac does the
2147 * actual enforcement and ip is just performing a courtesy check to help
2148 * prevent users from unwittingly setting and attempting to use blocked
2149 * addresses.
2150 */
2151 static boolean_t
2152 ill_ipcheck_addr(ill_t *ill, in6_addr_t *v6addr)
2153 {
2154 if ((ill->ill_capabilities & ILL_CAPAB_DLD_IPCHECK) == 0)
2155 return (B_TRUE);
2156
2157 ill_dld_ipcheck_t *idi = &ill->ill_dld_capab->idc_ipcheck;
2158 ip_mac_ipcheck_t ipcheck = idi->idi_allowed_df;
2159 return (ipcheck(idi->idi_allowed_dh, ill->ill_isv6, v6addr));
2160 }
2161
2162 static void
2163 ill_capability_ipcheck_enable(ill_t *ill)
2164 {
2165 ill_dld_capab_t *idc = ill->ill_dld_capab;
2166 ill_dld_ipcheck_t *idi = &idc->idc_ipcheck;
2167 dld_capab_ipcheck_t spoof;
2168 int rc;
2169
2170 ASSERT(IAM_WRITER_ILL(ill));
2171
2172 bzero(&spoof, sizeof (spoof));
2173 if ((rc = idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_IPCHECK,
2174 &spoof, DLD_ENABLE)) == 0) {
2175 idi->idi_allowed_df = (ip_mac_ipcheck_t)spoof.ipc_allowed_df;
2176 idi->idi_allowed_dh = spoof.ipc_allowed_dh;
2177 ill->ill_capabilities |= ILL_CAPAB_DLD_IPCHECK;
2178 } else {
2179 cmn_err(CE_WARN, "warning: could not enable IPCHECK "
2180 "capability, rc = %d\n", rc);
2181 DTRACE_PROBE2(ipcheck__off, (ill_t *), ill, (int), rc);
2182 }
2183 }
2184
2185 static void
2186 ill_capability_dld_enable(ill_t *ill)
2187 {
2188 mac_perim_handle_t mph;
2189
2190 ASSERT(IAM_WRITER_ILL(ill));
2191
2192 ill_mac_perim_enter(ill, &mph);
2193 if (!ill->ill_isv6) {
2194 ill_capability_direct_enable(ill);
2195 ill_capability_poll_enable(ill);
2196 ill_capability_lso_enable(ill);
2197 }
2198
2199 ill_capability_ipcheck_enable(ill);
2200
2201 ill->ill_capabilities |= ILL_CAPAB_DLD;
2202 ill_mac_perim_exit(ill, mph);
2203 }
2204
2205 static void
2206 ill_capability_dld_disable(ill_t *ill)
2207 {
2208 ill_dld_capab_t *idc;
2209 ill_dld_direct_t *idd;
2210 mac_perim_handle_t mph;
2211
2212 ASSERT(IAM_WRITER_ILL(ill));
2213
2214 if (!(ill->ill_capabilities & ILL_CAPAB_DLD))
2215 return;
2216
2217 ill_mac_perim_enter(ill, &mph);
2218
2219 idc = ill->ill_dld_capab;
2220 if ((ill->ill_capabilities & ILL_CAPAB_DLD_DIRECT) != 0) {
2221 /*
2222 * For performance we avoid locks in the transmit data path
2223 * and don't maintain a count of the number of threads using
2224 * direct calls. Thus some threads could be using direct
2225 * transmit calls to GLD, even after the capability mechanism
2226 * turns it off. This is still safe since the handles used in
2227 * the direct calls continue to be valid until the unplumb is
2228 * completed. Remove the callback that was added (1-time) at
2229 * capab enable time.
2230 */
2231 mutex_enter(&ill->ill_lock);
2232 ill->ill_capabilities &= ~ILL_CAPAB_DLD_DIRECT;
2233 mutex_exit(&ill->ill_lock);
2234 if (ill->ill_flownotify_mh != NULL) {
2235 idd = &idc->idc_direct;
2236 idd->idd_tx_cb_df(idd->idd_tx_cb_dh, NULL,
2237 ill->ill_flownotify_mh);
2238 ill->ill_flownotify_mh = NULL;
2239 }
2240 (void) idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_DIRECT,
2241 NULL, DLD_DISABLE);
2242 }
2243
2244 if ((ill->ill_capabilities & ILL_CAPAB_DLD_POLL) != 0) {
2245 ill->ill_capabilities &= ~ILL_CAPAB_DLD_POLL;
2246 ip_squeue_clean_all(ill);
2247 (void) idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_POLL,
2248 NULL, DLD_DISABLE);
2249 }
2250
2251 if ((ill->ill_capabilities & ILL_CAPAB_LSO) != 0) {
2252 ASSERT(ill->ill_lso_capab != NULL);
2253 /*
2254 * Clear the capability flag for LSO but retain the
2255 * ill_lso_capab structure since it's possible that another
2256 * thread is still referring to it. The structure only gets
2257 * deallocated when we destroy the ill.
2258 */
2259
2260 ill->ill_capabilities &= ~ILL_CAPAB_LSO;
2261 (void) idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_LSO,
2262 NULL, DLD_DISABLE);
2263 }
2264
2265 if ((ill->ill_capabilities & ILL_CAPAB_DLD_IPCHECK) != 0) {
2266 ASSERT(ill->ill_dld_capab->idc_ipcheck.idi_allowed_df != NULL);
2267 ASSERT(ill->ill_dld_capab->idc_ipcheck.idi_allowed_dh != NULL);
2268
2269 ill->ill_capabilities &= ~ILL_CAPAB_DLD_IPCHECK;
2270 (void) idc->idc_capab_df(idc->idc_capab_dh, DLD_CAPAB_IPCHECK,
2271 NULL, DLD_DISABLE);
2272 }
2273
2274 ill->ill_capabilities &= ~ILL_CAPAB_DLD;
2275 ill_mac_perim_exit(ill, mph);
2276 }
2277
2278 /*
2279 * Capability Negotiation protocol
2280 *
2281 * We don't wait for DLPI capability operations to finish during interface
2282 * bringup or teardown. Doing so would introduce more asynchrony and the
2283 * interface up/down operations will need multiple return and restarts.
2284 * Instead the 'ipsq_current_ipif' of the ipsq is not cleared as long as
2285 * the 'ill_dlpi_deferred' chain is non-empty. This ensures that the next
2286 * exclusive operation won't start until the DLPI operations of the previous
2287 * exclusive operation complete.
2288 *
2289 * The capability state machine is shown below.
2290 *
2291 * state next state event, action
2292 *
2293 * IDCS_UNKNOWN IDCS_PROBE_SENT ill_capability_probe
2294 * IDCS_PROBE_SENT IDCS_OK ill_capability_ack
2295 * IDCS_PROBE_SENT IDCS_FAILED ip_rput_dlpi_writer (nack)
2296 * IDCS_OK IDCS_RENEG Receipt of DL_NOTE_CAPAB_RENEG
2297 * IDCS_OK IDCS_RESET_SENT ill_capability_reset
2298 * IDCS_RESET_SENT IDCS_UNKNOWN ill_capability_ack_thr
2299 * IDCS_RENEG IDCS_PROBE_SENT ill_capability_ack_thr ->
2300 * ill_capability_probe.
2301 */
2302
2303 /*
2304 * Dedicated thread started from ip_stack_init that handles capability
2305 * disable. This thread ensures the taskq dispatch does not fail by waiting
2306 * for resources using TQ_SLEEP. The taskq mechanism is used to ensure
2307 * that direct calls to DLD are done in a cv_waitable context.
2308 */
2309 void
2310 ill_taskq_dispatch(ip_stack_t *ipst)
2311 {
2312 callb_cpr_t cprinfo;
2313 char name[64];
2314 mblk_t *mp;
2315
2316 (void) snprintf(name, sizeof (name), "ill_taskq_dispatch_%d",
2317 ipst->ips_netstack->netstack_stackid);
2318 CALLB_CPR_INIT(&cprinfo, &ipst->ips_capab_taskq_lock, callb_generic_cpr,
2319 name);
2320 mutex_enter(&ipst->ips_capab_taskq_lock);
2321
2322 for (;;) {
2323 mp = ipst->ips_capab_taskq_head;
2324 while (mp != NULL) {
2325 ipst->ips_capab_taskq_head = mp->b_next;
2326 if (ipst->ips_capab_taskq_head == NULL)
2327 ipst->ips_capab_taskq_tail = NULL;
2328 mutex_exit(&ipst->ips_capab_taskq_lock);
2329 mp->b_next = NULL;
2330
2331 VERIFY(taskq_dispatch(system_taskq,
2332 ill_capability_ack_thr, mp, TQ_SLEEP) !=
2333 TASKQID_INVALID);
2334 mutex_enter(&ipst->ips_capab_taskq_lock);
2335 mp = ipst->ips_capab_taskq_head;
2336 }
2337
2338 if (ipst->ips_capab_taskq_quit)
2339 break;
2340 CALLB_CPR_SAFE_BEGIN(&cprinfo);
2341 cv_wait(&ipst->ips_capab_taskq_cv, &ipst->ips_capab_taskq_lock);
2342 CALLB_CPR_SAFE_END(&cprinfo, &ipst->ips_capab_taskq_lock);
2343 }
2344 VERIFY(ipst->ips_capab_taskq_head == NULL);
2345 VERIFY(ipst->ips_capab_taskq_tail == NULL);
2346 CALLB_CPR_EXIT(&cprinfo);
2347 thread_exit();
2348 }
2349
2350 /*
2351 * Consume a new-style hardware capabilities negotiation ack.
2352 * Called via taskq on receipt of DL_CAPABILITY_ACK.
2353 */
2354 static void
2355 ill_capability_ack_thr(void *arg)
2356 {
2357 mblk_t *mp = arg;
2358 dl_capability_ack_t *capp;
2359 dl_capability_sub_t *subp, *endp;
2360 ill_t *ill;
2361 boolean_t reneg;
2362
2363 ill = (ill_t *)mp->b_prev;
2364 mp->b_prev = NULL;
2365
2366 VERIFY(ipsq_enter(ill, B_FALSE, CUR_OP) == B_TRUE);
2367
2368 if (ill->ill_dlpi_capab_state == IDCS_RESET_SENT ||
2369 ill->ill_dlpi_capab_state == IDCS_RENEG) {
2370 /*
2371 * We have received the ack for our DL_CAPAB reset request.
2372 * There isnt' anything in the message that needs processing.
2373 * All message based capabilities have been disabled, now
2374 * do the function call based capability disable.
2375 */
2376 reneg = ill->ill_dlpi_capab_state == IDCS_RENEG;
2377 ill_capability_dld_disable(ill);
2378 ill->ill_dlpi_capab_state = IDCS_UNKNOWN;
2379 if (reneg)
2380 ill_capability_probe(ill);
2381 goto done;
2382 }
2383
2384 if (ill->ill_dlpi_capab_state == IDCS_PROBE_SENT)
2385 ill->ill_dlpi_capab_state = IDCS_OK;
2386
2387 capp = (dl_capability_ack_t *)mp->b_rptr;
2388
2389 if (capp->dl_sub_length == 0) {
2390 /* no new-style capabilities */
2391 goto done;
2392 }
2393
2394 /* make sure the driver supplied correct dl_sub_length */
2395 if ((sizeof (*capp) + capp->dl_sub_length) > MBLKL(mp)) {
2396 ip0dbg(("ill_capability_ack: bad DL_CAPABILITY_ACK, "
2397 "invalid dl_sub_length (%d)\n", capp->dl_sub_length));
2398 goto done;
2399 }
2400
2401 #define SC(base, offset) (dl_capability_sub_t *)(((uchar_t *)(base))+(offset))
2402 /*
2403 * There are sub-capabilities. Process the ones we know about.
2404 * Loop until we don't have room for another sub-cap header..
2405 */
2406 for (subp = SC(capp, capp->dl_sub_offset),
2407 endp = SC(subp, capp->dl_sub_length - sizeof (*subp));
2408 subp <= endp;
2409 subp = SC(subp, sizeof (dl_capability_sub_t) + subp->dl_length)) {
2410
2411 switch (subp->dl_cap) {
2412 case DL_CAPAB_ID_WRAPPER:
2413 ill_capability_id_ack(ill, mp, subp);
2414 break;
2415 default:
2416 ill_capability_dispatch(ill, mp, subp);
2417 break;
2418 }
2419 }
2420 #undef SC
2421 done:
2422 inet_freemsg(mp);
2423 ill_capability_done(ill);
2424 ipsq_exit(ill->ill_phyint->phyint_ipsq);
2425 }
2426
2427 /*
2428 * This needs to be started in a taskq thread to provide a cv_waitable
2429 * context.
2430 */
2431 void
2432 ill_capability_ack(ill_t *ill, mblk_t *mp)
2433 {
2434 ip_stack_t *ipst = ill->ill_ipst;
2435
2436 mp->b_prev = (mblk_t *)ill;
2437 ASSERT(mp->b_next == NULL);
2438
2439 if (taskq_dispatch(system_taskq, ill_capability_ack_thr, mp,
2440 TQ_NOSLEEP) != TASKQID_INVALID)
2441 return;
2442
2443 /*
2444 * The taskq dispatch failed. Signal the ill_taskq_dispatch thread
2445 * which will do the dispatch using TQ_SLEEP to guarantee success.
2446 */
2447 mutex_enter(&ipst->ips_capab_taskq_lock);
2448 if (ipst->ips_capab_taskq_head == NULL) {
2449 ASSERT(ipst->ips_capab_taskq_tail == NULL);
2450 ipst->ips_capab_taskq_head = mp;
2451 } else {
2452 ipst->ips_capab_taskq_tail->b_next = mp;
2453 }
2454 ipst->ips_capab_taskq_tail = mp;
2455
2456 cv_signal(&ipst->ips_capab_taskq_cv);
2457 mutex_exit(&ipst->ips_capab_taskq_lock);
2458 }
2459
2460 /*
2461 * This routine is called to scan the fragmentation reassembly table for
2462 * the specified ILL for any packets that are starting to smell.
2463 * dead_interval is the maximum time in seconds that will be tolerated. It
2464 * will either be the value specified in ip_g_frag_timeout, or zero if the
2465 * ILL is shutting down and it is time to blow everything off.
2466 *
2467 * It returns the number of seconds (as a time_t) that the next frag timer
2468 * should be scheduled for, 0 meaning that the timer doesn't need to be
2469 * re-started. Note that the method of calculating next_timeout isn't
2470 * entirely accurate since time will flow between the time we grab
2471 * current_time and the time we schedule the next timeout. This isn't a
2472 * big problem since this is the timer for sending an ICMP reassembly time
2473 * exceeded messages, and it doesn't have to be exactly accurate.
2474 *
2475 * This function is
2476 * sometimes called as writer, although this is not required.
2477 */
2478 time_t
2479 ill_frag_timeout(ill_t *ill, time_t dead_interval)
2480 {
2481 ipfb_t *ipfb;
2482 ipfb_t *endp;
2483 ipf_t *ipf;
2484 ipf_t *ipfnext;
2485 mblk_t *mp;
2486 time_t current_time = gethrestime_sec();
2487 time_t next_timeout = 0;
2488 uint32_t hdr_length;
2489 mblk_t *send_icmp_head;
2490 mblk_t *send_icmp_head_v6;
2491 ip_stack_t *ipst = ill->ill_ipst;
2492 ip_recv_attr_t iras;
2493
2494 bzero(&iras, sizeof (iras));
2495 iras.ira_flags = 0;
2496 iras.ira_ill = iras.ira_rill = ill;
2497 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
2498 iras.ira_rifindex = iras.ira_ruifindex;
2499
2500 ipfb = ill->ill_frag_hash_tbl;
2501 if (ipfb == NULL)
2502 return (B_FALSE);
2503 endp = &ipfb[ILL_FRAG_HASH_TBL_COUNT];
2504 /* Walk the frag hash table. */
2505 for (; ipfb < endp; ipfb++) {
2506 send_icmp_head = NULL;
2507 send_icmp_head_v6 = NULL;
2508 mutex_enter(&ipfb->ipfb_lock);
2509 while ((ipf = ipfb->ipfb_ipf) != 0) {
2510 time_t frag_time = current_time - ipf->ipf_timestamp;
2511 time_t frag_timeout;
2512
2513 if (frag_time < dead_interval) {
2514 /*
2515 * There are some outstanding fragments
2516 * that will timeout later. Make note of
2517 * the time so that we can reschedule the
2518 * next timeout appropriately.
2519 */
2520 frag_timeout = dead_interval - frag_time;
2521 if (next_timeout == 0 ||
2522 frag_timeout < next_timeout) {
2523 next_timeout = frag_timeout;
2524 }
2525 break;
2526 }
2527 /* Time's up. Get it out of here. */
2528 hdr_length = ipf->ipf_nf_hdr_len;
2529 ipfnext = ipf->ipf_hash_next;
2530 if (ipfnext)
2531 ipfnext->ipf_ptphn = ipf->ipf_ptphn;
2532 *ipf->ipf_ptphn = ipfnext;
2533 mp = ipf->ipf_mp->b_cont;
2534 for (; mp; mp = mp->b_cont) {
2535 /* Extra points for neatness. */
2536 IP_REASS_SET_START(mp, 0);
2537 IP_REASS_SET_END(mp, 0);
2538 }
2539 mp = ipf->ipf_mp->b_cont;
2540 atomic_add_32(&ill->ill_frag_count, -ipf->ipf_count);
2541 ASSERT(ipfb->ipfb_count >= ipf->ipf_count);
2542 ipfb->ipfb_count -= ipf->ipf_count;
2543 ASSERT(ipfb->ipfb_frag_pkts > 0);
2544 ipfb->ipfb_frag_pkts--;
2545 /*
2546 * We do not send any icmp message from here because
2547 * we currently are holding the ipfb_lock for this
2548 * hash chain. If we try and send any icmp messages
2549 * from here we may end up via a put back into ip
2550 * trying to get the same lock, causing a recursive
2551 * mutex panic. Instead we build a list and send all
2552 * the icmp messages after we have dropped the lock.
2553 */
2554 if (ill->ill_isv6) {
2555 if (hdr_length != 0) {
2556 mp->b_next = send_icmp_head_v6;
2557 send_icmp_head_v6 = mp;
2558 } else {
2559 freemsg(mp);
2560 }
2561 } else {
2562 if (hdr_length != 0) {
2563 mp->b_next = send_icmp_head;
2564 send_icmp_head = mp;
2565 } else {
2566 freemsg(mp);
2567 }
2568 }
2569 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmFails);
2570 ip_drop_input("ipIfStatsReasmFails", ipf->ipf_mp, ill);
2571 freeb(ipf->ipf_mp);
2572 }
2573 mutex_exit(&ipfb->ipfb_lock);
2574 /*
2575 * Now need to send any icmp messages that we delayed from
2576 * above.
2577 */
2578 while (send_icmp_head_v6 != NULL) {
2579 ip6_t *ip6h;
2580
2581 mp = send_icmp_head_v6;
2582 send_icmp_head_v6 = send_icmp_head_v6->b_next;
2583 mp->b_next = NULL;
2584 ip6h = (ip6_t *)mp->b_rptr;
2585 iras.ira_flags = 0;
2586 /*
2587 * This will result in an incorrect ALL_ZONES zoneid
2588 * for multicast packets, but we
2589 * don't send ICMP errors for those in any case.
2590 */
2591 iras.ira_zoneid =
2592 ipif_lookup_addr_zoneid_v6(&ip6h->ip6_dst,
2593 ill, ipst);
2594 ip_drop_input("ICMP_TIME_EXCEEDED reass", mp, ill);
2595 icmp_time_exceeded_v6(mp,
2596 ICMP_REASSEMBLY_TIME_EXCEEDED, B_FALSE,
2597 &iras);
2598 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
2599 }
2600 while (send_icmp_head != NULL) {
2601 ipaddr_t dst;
2602
2603 mp = send_icmp_head;
2604 send_icmp_head = send_icmp_head->b_next;
2605 mp->b_next = NULL;
2606
2607 dst = ((ipha_t *)mp->b_rptr)->ipha_dst;
2608
2609 iras.ira_flags = IRAF_IS_IPV4;
2610 /*
2611 * This will result in an incorrect ALL_ZONES zoneid
2612 * for broadcast and multicast packets, but we
2613 * don't send ICMP errors for those in any case.
2614 */
2615 iras.ira_zoneid = ipif_lookup_addr_zoneid(dst,
2616 ill, ipst);
2617 ip_drop_input("ICMP_TIME_EXCEEDED reass", mp, ill);
2618 icmp_time_exceeded(mp,
2619 ICMP_REASSEMBLY_TIME_EXCEEDED, &iras);
2620 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
2621 }
2622 }
2623 /*
2624 * A non-dying ILL will use the return value to decide whether to
2625 * restart the frag timer, and for how long.
2626 */
2627 return (next_timeout);
2628 }
2629
2630 /*
2631 * This routine is called when the approximate count of mblk memory used
2632 * for the specified ILL has exceeded max_count.
2633 */
2634 void
2635 ill_frag_prune(ill_t *ill, uint_t max_count)
2636 {
2637 ipfb_t *ipfb;
2638 ipf_t *ipf;
2639 size_t count;
2640 clock_t now;
2641
2642 /*
2643 * If we are here within ip_min_frag_prune_time msecs remove
2644 * ill_frag_free_num_pkts oldest packets from each bucket and increment
2645 * ill_frag_free_num_pkts.
2646 */
2647 mutex_enter(&ill->ill_lock);
2648 now = ddi_get_lbolt();
2649 if (TICK_TO_MSEC(now - ill->ill_last_frag_clean_time) <=
2650 (ip_min_frag_prune_time != 0 ?
2651 ip_min_frag_prune_time : msec_per_tick)) {
2652
2653 ill->ill_frag_free_num_pkts++;
2654
2655 } else {
2656 ill->ill_frag_free_num_pkts = 0;
2657 }
2658 ill->ill_last_frag_clean_time = now;
2659 mutex_exit(&ill->ill_lock);
2660
2661 /*
2662 * free ill_frag_free_num_pkts oldest packets from each bucket.
2663 */
2664 if (ill->ill_frag_free_num_pkts != 0) {
2665 int ix;
2666
2667 for (ix = 0; ix < ILL_FRAG_HASH_TBL_COUNT; ix++) {
2668 ipfb = &ill->ill_frag_hash_tbl[ix];
2669 mutex_enter(&ipfb->ipfb_lock);
2670 if (ipfb->ipfb_ipf != NULL) {
2671 ill_frag_free_pkts(ill, ipfb, ipfb->ipfb_ipf,
2672 ill->ill_frag_free_num_pkts);
2673 }
2674 mutex_exit(&ipfb->ipfb_lock);
2675 }
2676 }
2677 /*
2678 * While the reassembly list for this ILL is too big, prune a fragment
2679 * queue by age, oldest first.
2680 */
2681 while (ill->ill_frag_count > max_count) {
2682 int ix;
2683 ipfb_t *oipfb = NULL;
2684 uint_t oldest = UINT_MAX;
2685
2686 count = 0;
2687 for (ix = 0; ix < ILL_FRAG_HASH_TBL_COUNT; ix++) {
2688 ipfb = &ill->ill_frag_hash_tbl[ix];
2689 mutex_enter(&ipfb->ipfb_lock);
2690 ipf = ipfb->ipfb_ipf;
2691 if (ipf != NULL && ipf->ipf_gen < oldest) {
2692 oldest = ipf->ipf_gen;
2693 oipfb = ipfb;
2694 }
2695 count += ipfb->ipfb_count;
2696 mutex_exit(&ipfb->ipfb_lock);
2697 }
2698 if (oipfb == NULL)
2699 break;
2700
2701 if (count <= max_count)
2702 return; /* Somebody beat us to it, nothing to do */
2703 mutex_enter(&oipfb->ipfb_lock);
2704 ipf = oipfb->ipfb_ipf;
2705 if (ipf != NULL) {
2706 ill_frag_free_pkts(ill, oipfb, ipf, 1);
2707 }
2708 mutex_exit(&oipfb->ipfb_lock);
2709 }
2710 }
2711
2712 /*
2713 * free 'free_cnt' fragmented packets starting at ipf.
2714 */
2715 void
2716 ill_frag_free_pkts(ill_t *ill, ipfb_t *ipfb, ipf_t *ipf, int free_cnt)
2717 {
2718 size_t count;
2719 mblk_t *mp;
2720 mblk_t *tmp;
2721 ipf_t **ipfp = ipf->ipf_ptphn;
2722
2723 ASSERT(MUTEX_HELD(&ipfb->ipfb_lock));
2724 ASSERT(ipfp != NULL);
2725 ASSERT(ipf != NULL);
2726
2727 while (ipf != NULL && free_cnt-- > 0) {
2728 count = ipf->ipf_count;
2729 mp = ipf->ipf_mp;
2730 ipf = ipf->ipf_hash_next;
2731 for (tmp = mp; tmp; tmp = tmp->b_cont) {
2732 IP_REASS_SET_START(tmp, 0);
2733 IP_REASS_SET_END(tmp, 0);
2734 }
2735 atomic_add_32(&ill->ill_frag_count, -count);
2736 ASSERT(ipfb->ipfb_count >= count);
2737 ipfb->ipfb_count -= count;
2738 ASSERT(ipfb->ipfb_frag_pkts > 0);
2739 ipfb->ipfb_frag_pkts--;
2740 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmFails);
2741 ip_drop_input("ipIfStatsReasmFails", mp, ill);
2742 freemsg(mp);
2743 }
2744
2745 if (ipf)
2746 ipf->ipf_ptphn = ipfp;
2747 ipfp[0] = ipf;
2748 }
2749
2750 /*
2751 * Helper function for ill_forward_set().
2752 */
2753 static void
2754 ill_forward_set_on_ill(ill_t *ill, boolean_t enable)
2755 {
2756 ip_stack_t *ipst = ill->ill_ipst;
2757
2758 ASSERT(IAM_WRITER_ILL(ill) || RW_READ_HELD(&ipst->ips_ill_g_lock));
2759
2760 ip1dbg(("ill_forward_set: %s %s forwarding on %s",
2761 (enable ? "Enabling" : "Disabling"),
2762 (ill->ill_isv6 ? "IPv6" : "IPv4"), ill->ill_name));
2763 mutex_enter(&ill->ill_lock);
2764 if (enable)
2765 ill->ill_flags |= ILLF_ROUTER;
2766 else
2767 ill->ill_flags &= ~ILLF_ROUTER;
2768 mutex_exit(&ill->ill_lock);
2769 if (ill->ill_isv6)
2770 ill_set_nce_router_flags(ill, enable);
2771 /* Notify routing socket listeners of this change. */
2772 if (ill->ill_ipif != NULL)
2773 ip_rts_ifmsg(ill->ill_ipif, RTSQ_DEFAULT);
2774 }
2775
2776 /*
2777 * Set an ill's ILLF_ROUTER flag appropriately. Send up RTS_IFINFO routing
2778 * socket messages for each interface whose flags we change.
2779 */
2780 int
2781 ill_forward_set(ill_t *ill, boolean_t enable)
2782 {
2783 ipmp_illgrp_t *illg;
2784 ip_stack_t *ipst = ill->ill_ipst;
2785
2786 ASSERT(IAM_WRITER_ILL(ill) || RW_READ_HELD(&ipst->ips_ill_g_lock));
2787
2788 if ((enable && (ill->ill_flags & ILLF_ROUTER)) ||
2789 (!enable && !(ill->ill_flags & ILLF_ROUTER)))
2790 return (0);
2791
2792 if (IS_LOOPBACK(ill))
2793 return (EINVAL);
2794
2795 if (enable && ill->ill_allowed_ips_cnt > 0)
2796 return (EPERM);
2797
2798 if (IS_IPMP(ill) || IS_UNDER_IPMP(ill)) {
2799 /*
2800 * Update all of the interfaces in the group.
2801 */
2802 illg = ill->ill_grp;
2803 ill = list_head(&illg->ig_if);
2804 for (; ill != NULL; ill = list_next(&illg->ig_if, ill))
2805 ill_forward_set_on_ill(ill, enable);
2806
2807 /*
2808 * Update the IPMP meta-interface.
2809 */
2810 ill_forward_set_on_ill(ipmp_illgrp_ipmp_ill(illg), enable);
2811 return (0);
2812 }
2813
2814 ill_forward_set_on_ill(ill, enable);
2815 return (0);
2816 }
2817
2818 /*
2819 * Based on the ILLF_ROUTER flag of an ill, make sure all local nce's for
2820 * addresses assigned to the ill have the NCE_F_ISROUTER flag appropriately
2821 * set or clear.
2822 */
2823 static void
2824 ill_set_nce_router_flags(ill_t *ill, boolean_t enable)
2825 {
2826 ipif_t *ipif;
2827 ncec_t *ncec;
2828 nce_t *nce;
2829
2830 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
2831 /*
2832 * NOTE: we match across the illgrp because nce's for
2833 * addresses on IPMP interfaces have an nce_ill that points to
2834 * the bound underlying ill.
2835 */
2836 nce = nce_lookup_v6(ill, &ipif->ipif_v6lcl_addr);
2837 if (nce != NULL) {
2838 ncec = nce->nce_common;
2839 mutex_enter(&ncec->ncec_lock);
2840 if (enable)
2841 ncec->ncec_flags |= NCE_F_ISROUTER;
2842 else
2843 ncec->ncec_flags &= ~NCE_F_ISROUTER;
2844 mutex_exit(&ncec->ncec_lock);
2845 nce_refrele(nce);
2846 }
2847 }
2848 }
2849
2850 /*
2851 * Intializes the context structure and returns the first ill in the list
2852 * cuurently start_list and end_list can have values:
2853 * MAX_G_HEADS Traverse both IPV4 and IPV6 lists.
2854 * IP_V4_G_HEAD Traverse IPV4 list only.
2855 * IP_V6_G_HEAD Traverse IPV6 list only.
2856 */
2857
2858 /*
2859 * We don't check for CONDEMNED ills here. Caller must do that if
2860 * necessary under the ill lock.
2861 */
2862 ill_t *
2863 ill_first(int start_list, int end_list, ill_walk_context_t *ctx,
2864 ip_stack_t *ipst)
2865 {
2866 ill_if_t *ifp;
2867 ill_t *ill;
2868 avl_tree_t *avl_tree;
2869
2870 ASSERT(RW_LOCK_HELD(&ipst->ips_ill_g_lock));
2871 ASSERT(end_list <= MAX_G_HEADS && start_list >= 0);
2872
2873 /*
2874 * setup the lists to search
2875 */
2876 if (end_list != MAX_G_HEADS) {
2877 ctx->ctx_current_list = start_list;
2878 ctx->ctx_last_list = end_list;
2879 } else {
2880 ctx->ctx_last_list = MAX_G_HEADS - 1;
2881 ctx->ctx_current_list = 0;
2882 }
2883
2884 while (ctx->ctx_current_list <= ctx->ctx_last_list) {
2885 ifp = IP_VX_ILL_G_LIST(ctx->ctx_current_list, ipst);
2886 if (ifp != (ill_if_t *)
2887 &IP_VX_ILL_G_LIST(ctx->ctx_current_list, ipst)) {
2888 avl_tree = &ifp->illif_avl_by_ppa;
2889 ill = avl_first(avl_tree);
2890 /*
2891 * ill is guaranteed to be non NULL or ifp should have
2892 * not existed.
2893 */
2894 ASSERT(ill != NULL);
2895 return (ill);
2896 }
2897 ctx->ctx_current_list++;
2898 }
2899
2900 return (NULL);
2901 }
2902
2903 /*
2904 * returns the next ill in the list. ill_first() must have been called
2905 * before calling ill_next() or bad things will happen.
2906 */
2907
2908 /*
2909 * We don't check for CONDEMNED ills here. Caller must do that if
2910 * necessary under the ill lock.
2911 */
2912 ill_t *
2913 ill_next(ill_walk_context_t *ctx, ill_t *lastill)
2914 {
2915 ill_if_t *ifp;
2916 ill_t *ill;
2917 ip_stack_t *ipst = lastill->ill_ipst;
2918
2919 ASSERT(lastill->ill_ifptr != (ill_if_t *)
2920 &IP_VX_ILL_G_LIST(ctx->ctx_current_list, ipst));
2921 if ((ill = avl_walk(&lastill->ill_ifptr->illif_avl_by_ppa, lastill,
2922 AVL_AFTER)) != NULL) {
2923 return (ill);
2924 }
2925
2926 /* goto next ill_ifp in the list. */
2927 ifp = lastill->ill_ifptr->illif_next;
2928
2929 /* make sure not at end of circular list */
2930 while (ifp ==
2931 (ill_if_t *)&IP_VX_ILL_G_LIST(ctx->ctx_current_list, ipst)) {
2932 if (++ctx->ctx_current_list > ctx->ctx_last_list)
2933 return (NULL);
2934 ifp = IP_VX_ILL_G_LIST(ctx->ctx_current_list, ipst);
2935 }
2936
2937 return (avl_first(&ifp->illif_avl_by_ppa));
2938 }
2939
2940 /*
2941 * Check interface name for correct format: [a-zA-Z]+[a-zA-Z0-9._]*[0-9]+
2942 * The final number (PPA) must not have any leading zeros. Upon success, a
2943 * pointer to the start of the PPA is returned; otherwise NULL is returned.
2944 */
2945 static char *
2946 ill_get_ppa_ptr(char *name)
2947 {
2948 int namelen = strlen(name);
2949 int end_ndx = namelen - 1;
2950 int ppa_ndx, i;
2951
2952 /*
2953 * Check that the first character is [a-zA-Z], and that the last
2954 * character is [0-9].
2955 */
2956 if (namelen == 0 || !isalpha(name[0]) || !isdigit(name[end_ndx]))
2957 return (NULL);
2958
2959 /*
2960 * Set `ppa_ndx' to the PPA start, and check for leading zeroes.
2961 */
2962 for (ppa_ndx = end_ndx; ppa_ndx > 0; ppa_ndx--)
2963 if (!isdigit(name[ppa_ndx - 1]))
2964 break;
2965
2966 if (name[ppa_ndx] == '0' && ppa_ndx < end_ndx)
2967 return (NULL);
2968
2969 /*
2970 * Check that the intermediate characters are [a-z0-9.]
2971 */
2972 for (i = 1; i < ppa_ndx; i++) {
2973 if (!isalpha(name[i]) && !isdigit(name[i]) &&
2974 name[i] != '.' && name[i] != '_') {
2975 return (NULL);
2976 }
2977 }
2978
2979 return (name + ppa_ndx);
2980 }
2981
2982 /*
2983 * use avl tree to locate the ill.
2984 */
2985 static ill_t *
2986 ill_find_by_name(char *name, boolean_t isv6, ip_stack_t *ipst)
2987 {
2988 char *ppa_ptr = NULL;
2989 int len;
2990 uint_t ppa;
2991 ill_t *ill = NULL;
2992 ill_if_t *ifp;
2993 int list;
2994
2995 /*
2996 * get ppa ptr
2997 */
2998 if (isv6)
2999 list = IP_V6_G_HEAD;
3000 else
3001 list = IP_V4_G_HEAD;
3002
3003 if ((ppa_ptr = ill_get_ppa_ptr(name)) == NULL) {
3004 return (NULL);
3005 }
3006
3007 len = ppa_ptr - name + 1;
3008
3009 ppa = stoi(&ppa_ptr);
3010
3011 ifp = IP_VX_ILL_G_LIST(list, ipst);
3012
3013 while (ifp != (ill_if_t *)&IP_VX_ILL_G_LIST(list, ipst)) {
3014 /*
3015 * match is done on len - 1 as the name is not null
3016 * terminated it contains ppa in addition to the interface
3017 * name.
3018 */
3019 if ((ifp->illif_name_len == len) &&
3020 bcmp(ifp->illif_name, name, len - 1) == 0) {
3021 break;
3022 } else {
3023 ifp = ifp->illif_next;
3024 }
3025 }
3026
3027 if (ifp == (ill_if_t *)&IP_VX_ILL_G_LIST(list, ipst)) {
3028 /*
3029 * Even the interface type does not exist.
3030 */
3031 return (NULL);
3032 }
3033
3034 ill = avl_find(&ifp->illif_avl_by_ppa, (void *) &ppa, NULL);
3035 if (ill != NULL) {
3036 mutex_enter(&ill->ill_lock);
3037 if (ILL_CAN_LOOKUP(ill)) {
3038 ill_refhold_locked(ill);
3039 mutex_exit(&ill->ill_lock);
3040 return (ill);
3041 }
3042 mutex_exit(&ill->ill_lock);
3043 }
3044 return (NULL);
3045 }
3046
3047 /*
3048 * comparison function for use with avl.
3049 */
3050 static int
3051 ill_compare_ppa(const void *ppa_ptr, const void *ill_ptr)
3052 {
3053 uint_t ppa;
3054 uint_t ill_ppa;
3055
3056 ASSERT(ppa_ptr != NULL && ill_ptr != NULL);
3057
3058 ppa = *((uint_t *)ppa_ptr);
3059 ill_ppa = ((const ill_t *)ill_ptr)->ill_ppa;
3060 /*
3061 * We want the ill with the lowest ppa to be on the
3062 * top.
3063 */
3064 if (ill_ppa < ppa)
3065 return (1);
3066 if (ill_ppa > ppa)
3067 return (-1);
3068 return (0);
3069 }
3070
3071 /*
3072 * remove an interface type from the global list.
3073 */
3074 static void
3075 ill_delete_interface_type(ill_if_t *interface)
3076 {
3077 ASSERT(interface != NULL);
3078 ASSERT(avl_numnodes(&interface->illif_avl_by_ppa) == 0);
3079
3080 avl_destroy(&interface->illif_avl_by_ppa);
3081 if (interface->illif_ppa_arena != NULL)
3082 vmem_destroy(interface->illif_ppa_arena);
3083
3084 remque(interface);
3085
3086 mi_free(interface);
3087 }
3088
3089 /*
3090 * remove ill from the global list.
3091 */
3092 static void
3093 ill_glist_delete(ill_t *ill)
3094 {
3095 ip_stack_t *ipst;
3096 phyint_t *phyi;
3097
3098 if (ill == NULL)
3099 return;
3100 ipst = ill->ill_ipst;
3101 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
3102
3103 /*
3104 * If the ill was never inserted into the AVL tree
3105 * we skip the if branch.
3106 */
3107 if (ill->ill_ifptr != NULL) {
3108 /*
3109 * remove from AVL tree and free ppa number
3110 */
3111 avl_remove(&ill->ill_ifptr->illif_avl_by_ppa, ill);
3112
3113 if (ill->ill_ifptr->illif_ppa_arena != NULL) {
3114 vmem_free(ill->ill_ifptr->illif_ppa_arena,
3115 (void *)(uintptr_t)(ill->ill_ppa+1), 1);
3116 }
3117 if (avl_numnodes(&ill->ill_ifptr->illif_avl_by_ppa) == 0) {
3118 ill_delete_interface_type(ill->ill_ifptr);
3119 }
3120
3121 /*
3122 * Indicate ill is no longer in the list.
3123 */
3124 ill->ill_ifptr = NULL;
3125 ill->ill_name_length = 0;
3126 ill->ill_name[0] = '\0';
3127 ill->ill_ppa = UINT_MAX;
3128 }
3129
3130 /* Generate one last event for this ill. */
3131 ill_nic_event_dispatch(ill, 0, NE_UNPLUMB, ill->ill_name,
3132 ill->ill_name_length);
3133
3134 ASSERT(ill->ill_phyint != NULL);
3135 phyi = ill->ill_phyint;
3136 ill->ill_phyint = NULL;
3137
3138 /*
3139 * ill_init allocates a phyint always to store the copy
3140 * of flags relevant to phyint. At that point in time, we could
3141 * not assign the name and hence phyint_illv4/v6 could not be
3142 * initialized. Later in ipif_set_values, we assign the name to
3143 * the ill, at which point in time we assign phyint_illv4/v6.
3144 * Thus we don't rely on phyint_illv6 to be initialized always.
3145 */
3146 if (ill->ill_flags & ILLF_IPV6)
3147 phyi->phyint_illv6 = NULL;
3148 else
3149 phyi->phyint_illv4 = NULL;
3150
3151 if (phyi->phyint_illv4 != NULL || phyi->phyint_illv6 != NULL) {
3152 rw_exit(&ipst->ips_ill_g_lock);
3153 return;
3154 }
3155
3156 /*
3157 * There are no ills left on this phyint; pull it out of the phyint
3158 * avl trees, and free it.
3159 */
3160 if (phyi->phyint_ifindex > 0) {
3161 avl_remove(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
3162 phyi);
3163 avl_remove(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
3164 phyi);
3165 }
3166 rw_exit(&ipst->ips_ill_g_lock);
3167
3168 phyint_free(phyi);
3169 }
3170
3171 /*
3172 * allocate a ppa, if the number of plumbed interfaces of this type are
3173 * less than ill_no_arena do a linear search to find a unused ppa.
3174 * When the number goes beyond ill_no_arena switch to using an arena.
3175 * Note: ppa value of zero cannot be allocated from vmem_arena as it
3176 * is the return value for an error condition, so allocation starts at one
3177 * and is decremented by one.
3178 */
3179 static int
3180 ill_alloc_ppa(ill_if_t *ifp, ill_t *ill)
3181 {
3182 ill_t *tmp_ill;
3183 uint_t start, end;
3184 int ppa;
3185
3186 if (ifp->illif_ppa_arena == NULL &&
3187 (avl_numnodes(&ifp->illif_avl_by_ppa) + 1 > ill_no_arena)) {
3188 /*
3189 * Create an arena.
3190 */
3191 ifp->illif_ppa_arena = vmem_create(ifp->illif_name,
3192 (void *)1, UINT_MAX - 1, 1, NULL, NULL,
3193 NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
3194 /* allocate what has already been assigned */
3195 for (tmp_ill = avl_first(&ifp->illif_avl_by_ppa);
3196 tmp_ill != NULL; tmp_ill = avl_walk(&ifp->illif_avl_by_ppa,
3197 tmp_ill, AVL_AFTER)) {
3198 ppa = (int)(uintptr_t)vmem_xalloc(ifp->illif_ppa_arena,
3199 1, /* size */
3200 1, /* align/quantum */
3201 0, /* phase */
3202 0, /* nocross */
3203 /* minaddr */
3204 (void *)((uintptr_t)tmp_ill->ill_ppa + 1),
3205 /* maxaddr */
3206 (void *)((uintptr_t)tmp_ill->ill_ppa + 2),
3207 VM_NOSLEEP|VM_FIRSTFIT);
3208 if (ppa == 0) {
3209 ip1dbg(("ill_alloc_ppa: ppa allocation"
3210 " failed while switching"));
3211 vmem_destroy(ifp->illif_ppa_arena);
3212 ifp->illif_ppa_arena = NULL;
3213 break;
3214 }
3215 }
3216 }
3217
3218 if (ifp->illif_ppa_arena != NULL) {
3219 if (ill->ill_ppa == UINT_MAX) {
3220 ppa = (int)(uintptr_t)vmem_alloc(ifp->illif_ppa_arena,
3221 1, VM_NOSLEEP|VM_FIRSTFIT);
3222 if (ppa == 0)
3223 return (EAGAIN);
3224 ill->ill_ppa = --ppa;
3225 } else {
3226 ppa = (int)(uintptr_t)vmem_xalloc(ifp->illif_ppa_arena,
3227 1, /* size */
3228 1, /* align/quantum */
3229 0, /* phase */
3230 0, /* nocross */
3231 (void *)(uintptr_t)(ill->ill_ppa + 1), /* minaddr */
3232 (void *)(uintptr_t)(ill->ill_ppa + 2), /* maxaddr */
3233 VM_NOSLEEP|VM_FIRSTFIT);
3234 /*
3235 * Most likely the allocation failed because
3236 * the requested ppa was in use.
3237 */
3238 if (ppa == 0)
3239 return (EEXIST);
3240 }
3241 return (0);
3242 }
3243
3244 /*
3245 * No arena is in use and not enough (>ill_no_arena) interfaces have
3246 * been plumbed to create one. Do a linear search to get a unused ppa.
3247 */
3248 if (ill->ill_ppa == UINT_MAX) {
3249 end = UINT_MAX - 1;
3250 start = 0;
3251 } else {
3252 end = start = ill->ill_ppa;
3253 }
3254
3255 tmp_ill = avl_find(&ifp->illif_avl_by_ppa, (void *)&start, NULL);
3256 while (tmp_ill != NULL && tmp_ill->ill_ppa == start) {
3257 if (start++ >= end) {
3258 if (ill->ill_ppa == UINT_MAX)
3259 return (EAGAIN);
3260 else
3261 return (EEXIST);
3262 }
3263 tmp_ill = avl_walk(&ifp->illif_avl_by_ppa, tmp_ill, AVL_AFTER);
3264 }
3265 ill->ill_ppa = start;
3266 return (0);
3267 }
3268
3269 /*
3270 * Insert ill into the list of configured ill's. Once this function completes,
3271 * the ill is globally visible and is available through lookups. More precisely
3272 * this happens after the caller drops the ill_g_lock.
3273 */
3274 static int
3275 ill_glist_insert(ill_t *ill, char *name, boolean_t isv6)
3276 {
3277 ill_if_t *ill_interface;
3278 avl_index_t where = 0;
3279 int error;
3280 int name_length;
3281 int index;
3282 boolean_t check_length = B_FALSE;
3283 ip_stack_t *ipst = ill->ill_ipst;
3284
3285 ASSERT(RW_WRITE_HELD(&ipst->ips_ill_g_lock));
3286
3287 name_length = mi_strlen(name) + 1;
3288
3289 if (isv6)
3290 index = IP_V6_G_HEAD;
3291 else
3292 index = IP_V4_G_HEAD;
3293
3294 ill_interface = IP_VX_ILL_G_LIST(index, ipst);
3295 /*
3296 * Search for interface type based on name
3297 */
3298 while (ill_interface != (ill_if_t *)&IP_VX_ILL_G_LIST(index, ipst)) {
3299 if ((ill_interface->illif_name_len == name_length) &&
3300 (strcmp(ill_interface->illif_name, name) == 0)) {
3301 break;
3302 }
3303 ill_interface = ill_interface->illif_next;
3304 }
3305
3306 /*
3307 * Interface type not found, create one.
3308 */
3309 if (ill_interface == (ill_if_t *)&IP_VX_ILL_G_LIST(index, ipst)) {
3310 ill_g_head_t ghead;
3311
3312 /*
3313 * allocate ill_if_t structure
3314 */
3315 ill_interface = (ill_if_t *)mi_zalloc(sizeof (ill_if_t));
3316 if (ill_interface == NULL) {
3317 return (ENOMEM);
3318 }
3319
3320 (void) strcpy(ill_interface->illif_name, name);
3321 ill_interface->illif_name_len = name_length;
3322
3323 avl_create(&ill_interface->illif_avl_by_ppa,
3324 ill_compare_ppa, sizeof (ill_t),
3325 offsetof(struct ill_s, ill_avl_byppa));
3326
3327 /*
3328 * link the structure in the back to maintain order
3329 * of configuration for ifconfig output.
3330 */
3331 ghead = ipst->ips_ill_g_heads[index];
3332 insque(ill_interface, ghead.ill_g_list_tail);
3333 }
3334
3335 if (ill->ill_ppa == UINT_MAX)
3336 check_length = B_TRUE;
3337
3338 error = ill_alloc_ppa(ill_interface, ill);
3339 if (error != 0) {
3340 if (avl_numnodes(&ill_interface->illif_avl_by_ppa) == 0)
3341 ill_delete_interface_type(ill->ill_ifptr);
3342 return (error);
3343 }
3344
3345 /*
3346 * When the ppa is choosen by the system, check that there is
3347 * enough space to insert ppa. if a specific ppa was passed in this
3348 * check is not required as the interface name passed in will have
3349 * the right ppa in it.
3350 */
3351 if (check_length) {
3352 /*
3353 * UINT_MAX - 1 should fit in 10 chars, alloc 12 chars.
3354 */
3355 char buf[sizeof (uint_t) * 3];
3356
3357 /*
3358 * convert ppa to string to calculate the amount of space
3359 * required for it in the name.
3360 */
3361 numtos(ill->ill_ppa, buf);
3362
3363 /* Do we have enough space to insert ppa ? */
3364
3365 if ((mi_strlen(name) + mi_strlen(buf) + 1) > LIFNAMSIZ) {
3366 /* Free ppa and interface type struct */
3367 if (ill_interface->illif_ppa_arena != NULL) {
3368 vmem_free(ill_interface->illif_ppa_arena,
3369 (void *)(uintptr_t)(ill->ill_ppa+1), 1);
3370 }
3371 if (avl_numnodes(&ill_interface->illif_avl_by_ppa) == 0)
3372 ill_delete_interface_type(ill->ill_ifptr);
3373
3374 return (EINVAL);
3375 }
3376 }
3377
3378 (void) sprintf(ill->ill_name, "%s%u", name, ill->ill_ppa);
3379 ill->ill_name_length = mi_strlen(ill->ill_name) + 1;
3380
3381 (void) avl_find(&ill_interface->illif_avl_by_ppa, &ill->ill_ppa,
3382 &where);
3383 ill->ill_ifptr = ill_interface;
3384 avl_insert(&ill_interface->illif_avl_by_ppa, ill, where);
3385
3386 ill_phyint_reinit(ill);
3387 return (0);
3388 }
3389
3390 /* Initialize the per phyint ipsq used for serialization */
3391 static boolean_t
3392 ipsq_init(ill_t *ill, boolean_t enter)
3393 {
3394 ipsq_t *ipsq;
3395 ipxop_t *ipx;
3396
3397 if ((ipsq = kmem_zalloc(sizeof (ipsq_t), KM_NOSLEEP)) == NULL)
3398 return (B_FALSE);
3399
3400 ill->ill_phyint->phyint_ipsq = ipsq;
3401 ipx = ipsq->ipsq_xop = &ipsq->ipsq_ownxop;
3402 ipx->ipx_ipsq = ipsq;
3403 ipsq->ipsq_next = ipsq;
3404 ipsq->ipsq_phyint = ill->ill_phyint;
3405 mutex_init(&ipsq->ipsq_lock, NULL, MUTEX_DEFAULT, 0);
3406 mutex_init(&ipx->ipx_lock, NULL, MUTEX_DEFAULT, 0);
3407 ipsq->ipsq_ipst = ill->ill_ipst; /* No netstack_hold */
3408 if (enter) {
3409 ipx->ipx_writer = curthread;
3410 ipx->ipx_forced = B_FALSE;
3411 ipx->ipx_reentry_cnt = 1;
3412 #ifdef DEBUG
3413 ipx->ipx_depth = getpcstack(ipx->ipx_stack, IPX_STACK_DEPTH);
3414 #endif
3415 }
3416 return (B_TRUE);
3417 }
3418
3419 /*
3420 * Here we perform initialisation of the ill_t common to both regular
3421 * interface ILLs and the special loopback ILL created by ill_lookup_on_name.
3422 */
3423 static int
3424 ill_init_common(ill_t *ill, queue_t *q, boolean_t isv6, boolean_t is_loopback,
3425 boolean_t ipsq_enter)
3426 {
3427 int count;
3428 uchar_t *frag_ptr;
3429
3430 mutex_init(&ill->ill_lock, NULL, MUTEX_DEFAULT, 0);
3431 mutex_init(&ill->ill_saved_ire_lock, NULL, MUTEX_DEFAULT, NULL);
3432 ill->ill_saved_ire_cnt = 0;
3433
3434 if (is_loopback) {
3435 ill->ill_max_frag = isv6 ? ip_loopback_mtu_v6plus :
3436 ip_loopback_mtuplus;
3437 /*
3438 * No resolver here.
3439 */
3440 ill->ill_net_type = IRE_LOOPBACK;
3441 } else {
3442 ill->ill_rq = q;
3443 ill->ill_wq = WR(q);
3444 ill->ill_ppa = UINT_MAX;
3445 }
3446
3447 ill->ill_isv6 = isv6;
3448
3449 /*
3450 * Allocate sufficient space to contain our fragment hash table and
3451 * the device name.
3452 */
3453 frag_ptr = (uchar_t *)mi_zalloc(ILL_FRAG_HASH_TBL_SIZE + 2 * LIFNAMSIZ);
3454 if (frag_ptr == NULL)
3455 return (ENOMEM);
3456 ill->ill_frag_ptr = frag_ptr;
3457 ill->ill_frag_free_num_pkts = 0;
3458 ill->ill_last_frag_clean_time = 0;
3459 ill->ill_frag_hash_tbl = (ipfb_t *)frag_ptr;
3460 ill->ill_name = (char *)(frag_ptr + ILL_FRAG_HASH_TBL_SIZE);
3461 for (count = 0; count < ILL_FRAG_HASH_TBL_COUNT; count++) {
3462 mutex_init(&ill->ill_frag_hash_tbl[count].ipfb_lock,
3463 NULL, MUTEX_DEFAULT, NULL);
3464 }
3465
3466 ill->ill_phyint = (phyint_t *)mi_zalloc(sizeof (phyint_t));
3467 if (ill->ill_phyint == NULL) {
3468 mi_free(frag_ptr);
3469 return (ENOMEM);
3470 }
3471
3472 mutex_init(&ill->ill_phyint->phyint_lock, NULL, MUTEX_DEFAULT, 0);
3473 if (isv6) {
3474 ill->ill_phyint->phyint_illv6 = ill;
3475 } else {
3476 ill->ill_phyint->phyint_illv4 = ill;
3477 }
3478 if (is_loopback) {
3479 phyint_flags_init(ill->ill_phyint, DL_LOOP);
3480 }
3481
3482 list_create(&ill->ill_nce, sizeof (nce_t), offsetof(nce_t, nce_node));
3483
3484 ill_set_inputfn(ill);
3485
3486 if (!ipsq_init(ill, ipsq_enter)) {
3487 mi_free(frag_ptr);
3488 mi_free(ill->ill_phyint);
3489 return (ENOMEM);
3490 }
3491
3492 /* Frag queue limit stuff */
3493 ill->ill_frag_count = 0;
3494 ill->ill_ipf_gen = 0;
3495
3496 rw_init(&ill->ill_mcast_lock, NULL, RW_DEFAULT, NULL);
3497 mutex_init(&ill->ill_mcast_serializer, NULL, MUTEX_DEFAULT, NULL);
3498 ill->ill_global_timer = INFINITY;
3499 ill->ill_mcast_v1_time = ill->ill_mcast_v2_time = 0;
3500 ill->ill_mcast_v1_tset = ill->ill_mcast_v2_tset = 0;
3501 ill->ill_mcast_rv = MCAST_DEF_ROBUSTNESS;
3502 ill->ill_mcast_qi = MCAST_DEF_QUERY_INTERVAL;
3503
3504 /*
3505 * Initialize IPv6 configuration variables. The IP module is always
3506 * opened as an IPv4 module. Instead tracking down the cases where
3507 * it switches to do ipv6, we'll just initialize the IPv6 configuration
3508 * here for convenience, this has no effect until the ill is set to do
3509 * IPv6.
3510 */
3511 ill->ill_reachable_time = ND_REACHABLE_TIME;
3512 ill->ill_xmit_count = ND_MAX_MULTICAST_SOLICIT;
3513 ill->ill_max_buf = ND_MAX_Q;
3514 ill->ill_refcnt = 0;
3515
3516 cv_init(&ill->ill_dlpi_capab_cv, NULL, CV_DEFAULT, NULL);
3517 mutex_init(&ill->ill_dlpi_capab_lock, NULL, MUTEX_DEFAULT, NULL);
3518
3519 return (0);
3520 }
3521
3522 /*
3523 * ill_init is called by ip_open when a device control stream is opened.
3524 * It does a few initializations, and shoots a DL_INFO_REQ message down
3525 * to the driver. The response is later picked up in ip_rput_dlpi and
3526 * used to set up default mechanisms for talking to the driver. (Always
3527 * called as writer.)
3528 *
3529 * If this function returns error, ip_open will call ip_close which in
3530 * turn will call ill_delete to clean up any memory allocated here that
3531 * is not yet freed.
3532 *
3533 * Note: ill_ipst and ill_zoneid must be set before calling ill_init.
3534 */
3535 int
3536 ill_init(queue_t *q, ill_t *ill)
3537 {
3538 int ret;
3539 dl_info_req_t *dlir;
3540 mblk_t *info_mp;
3541
3542 info_mp = allocb(MAX(sizeof (dl_info_req_t), sizeof (dl_info_ack_t)),
3543 BPRI_HI);
3544 if (info_mp == NULL)
3545 return (ENOMEM);
3546
3547 /*
3548 * For now pretend this is a v4 ill. We need to set phyint_ill*
3549 * at this point because of the following reason. If we can't
3550 * enter the ipsq at some point and cv_wait, the writer that
3551 * wakes us up tries to locate us using the list of all phyints
3552 * in an ipsq and the ills from the phyint thru the phyint_ill*.
3553 * If we don't set it now, we risk a missed wakeup.
3554 */
3555 if ((ret = ill_init_common(ill, q, B_FALSE, B_FALSE, B_TRUE)) != 0) {
3556 freemsg(info_mp);
3557 return (ret);
3558 }
3559
3560 ill->ill_state_flags |= ILL_LL_SUBNET_PENDING;
3561
3562 /* Send down the Info Request to the driver. */
3563 info_mp->b_datap->db_type = M_PCPROTO;
3564 dlir = (dl_info_req_t *)info_mp->b_rptr;
3565 info_mp->b_wptr = (uchar_t *)&dlir[1];
3566 dlir->dl_primitive = DL_INFO_REQ;
3567
3568 ill->ill_dlpi_pending = DL_PRIM_INVAL;
3569
3570 qprocson(q);
3571 ill_dlpi_send(ill, info_mp);
3572
3573 return (0);
3574 }
3575
3576 /*
3577 * ill_dls_info
3578 * creates datalink socket info from the device.
3579 */
3580 int
3581 ill_dls_info(struct sockaddr_dl *sdl, const ill_t *ill)
3582 {
3583 size_t len;
3584
3585 sdl->sdl_family = AF_LINK;
3586 sdl->sdl_index = ill_get_upper_ifindex(ill);
3587 sdl->sdl_type = ill->ill_type;
3588 ill_get_name(ill, sdl->sdl_data, sizeof (sdl->sdl_data));
3589 len = strlen(sdl->sdl_data);
3590 ASSERT(len < 256);
3591 sdl->sdl_nlen = (uchar_t)len;
3592 sdl->sdl_alen = ill->ill_phys_addr_length;
3593 sdl->sdl_slen = 0;
3594 if (ill->ill_phys_addr_length != 0 && ill->ill_phys_addr != NULL)
3595 bcopy(ill->ill_phys_addr, &sdl->sdl_data[len], sdl->sdl_alen);
3596
3597 return (sizeof (struct sockaddr_dl));
3598 }
3599
3600 /*
3601 * ill_xarp_info
3602 * creates xarp info from the device.
3603 */
3604 static int
3605 ill_xarp_info(struct sockaddr_dl *sdl, ill_t *ill)
3606 {
3607 sdl->sdl_family = AF_LINK;
3608 sdl->sdl_index = ill->ill_phyint->phyint_ifindex;
3609 sdl->sdl_type = ill->ill_type;
3610 ill_get_name(ill, sdl->sdl_data, sizeof (sdl->sdl_data));
3611 sdl->sdl_nlen = (uchar_t)mi_strlen(sdl->sdl_data);
3612 sdl->sdl_alen = ill->ill_phys_addr_length;
3613 sdl->sdl_slen = 0;
3614 return (sdl->sdl_nlen);
3615 }
3616
3617 static int
3618 loopback_kstat_update(kstat_t *ksp, int rw)
3619 {
3620 kstat_named_t *kn;
3621 netstackid_t stackid;
3622 netstack_t *ns;
3623 ip_stack_t *ipst;
3624
3625 if (ksp == NULL || ksp->ks_data == NULL)
3626 return (EIO);
3627
3628 if (rw == KSTAT_WRITE)
3629 return (EACCES);
3630
3631 kn = KSTAT_NAMED_PTR(ksp);
3632 stackid = (zoneid_t)(uintptr_t)ksp->ks_private;
3633
3634 ns = netstack_find_by_stackid(stackid);
3635 if (ns == NULL)
3636 return (-1);
3637
3638 ipst = ns->netstack_ip;
3639 if (ipst == NULL) {
3640 netstack_rele(ns);
3641 return (-1);
3642 }
3643 kn[0].value.ui32 = ipst->ips_loopback_packets;
3644 kn[1].value.ui32 = ipst->ips_loopback_packets;
3645 netstack_rele(ns);
3646 return (0);
3647 }
3648
3649 /*
3650 * Has ifindex been plumbed already?
3651 */
3652 static boolean_t
3653 phyint_exists(uint_t index, ip_stack_t *ipst)
3654 {
3655 ASSERT(index != 0);
3656 ASSERT(RW_LOCK_HELD(&ipst->ips_ill_g_lock));
3657
3658 return (avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
3659 &index, NULL) != NULL);
3660 }
3661
3662 /*
3663 * Pick a unique ifindex.
3664 * When the index counter passes IF_INDEX_MAX for the first time, the wrap
3665 * flag is set so that next time time ip_assign_ifindex() is called, it
3666 * falls through and resets the index counter back to 1, the minimum value
3667 * for the interface index. The logic below assumes that ips_ill_index
3668 * can hold a value of IF_INDEX_MAX+1 without there being any loss
3669 * (i.e. reset back to 0.)
3670 */
3671 boolean_t
3672 ip_assign_ifindex(uint_t *indexp, ip_stack_t *ipst)
3673 {
3674 uint_t loops;
3675
3676 if (!ipst->ips_ill_index_wrap) {
3677 *indexp = ipst->ips_ill_index++;
3678 if (ipst->ips_ill_index > IF_INDEX_MAX) {
3679 /*
3680 * Reached the maximum ifindex value, set the wrap
3681 * flag to indicate that it is no longer possible
3682 * to assume that a given index is unallocated.
3683 */
3684 ipst->ips_ill_index_wrap = B_TRUE;
3685 }
3686 return (B_TRUE);
3687 }
3688
3689 if (ipst->ips_ill_index > IF_INDEX_MAX)
3690 ipst->ips_ill_index = 1;
3691
3692 /*
3693 * Start reusing unused indexes. Note that we hold the ill_g_lock
3694 * at this point and don't want to call any function that attempts
3695 * to get the lock again.
3696 */
3697 for (loops = IF_INDEX_MAX; loops > 0; loops--) {
3698 if (!phyint_exists(ipst->ips_ill_index, ipst)) {
3699 /* found unused index - use it */
3700 *indexp = ipst->ips_ill_index;
3701 return (B_TRUE);
3702 }
3703
3704 ipst->ips_ill_index++;
3705 if (ipst->ips_ill_index > IF_INDEX_MAX)
3706 ipst->ips_ill_index = 1;
3707 }
3708
3709 /*
3710 * all interface indicies are inuse.
3711 */
3712 return (B_FALSE);
3713 }
3714
3715 /*
3716 * Assign a unique interface index for the phyint.
3717 */
3718 static boolean_t
3719 phyint_assign_ifindex(phyint_t *phyi, ip_stack_t *ipst)
3720 {
3721 ASSERT(phyi->phyint_ifindex == 0);
3722 return (ip_assign_ifindex(&phyi->phyint_ifindex, ipst));
3723 }
3724
3725 /*
3726 * Initialize the flags on `phyi' as per the provided mactype.
3727 */
3728 static void
3729 phyint_flags_init(phyint_t *phyi, t_uscalar_t mactype)
3730 {
3731 uint64_t flags = 0;
3732
3733 /*
3734 * Initialize PHYI_RUNNING and PHYI_FAILED. For non-IPMP interfaces,
3735 * we always presume the underlying hardware is working and set
3736 * PHYI_RUNNING (if it's not, the driver will subsequently send a
3737 * DL_NOTE_LINK_DOWN message). For IPMP interfaces, at initialization
3738 * there are no active interfaces in the group so we set PHYI_FAILED.
3739 */
3740 if (mactype == SUNW_DL_IPMP)
3741 flags |= PHYI_FAILED;
3742 else
3743 flags |= PHYI_RUNNING;
3744
3745 switch (mactype) {
3746 case SUNW_DL_VNI:
3747 flags |= PHYI_VIRTUAL;
3748 break;
3749 case SUNW_DL_IPMP:
3750 flags |= PHYI_IPMP;
3751 break;
3752 case DL_LOOP:
3753 flags |= (PHYI_LOOPBACK | PHYI_VIRTUAL);
3754 break;
3755 }
3756
3757 mutex_enter(&phyi->phyint_lock);
3758 phyi->phyint_flags |= flags;
3759 mutex_exit(&phyi->phyint_lock);
3760 }
3761
3762 /*
3763 * Return a pointer to the ill which matches the supplied name. Note that
3764 * the ill name length includes the null termination character. (May be
3765 * called as writer.)
3766 * If do_alloc and the interface is "lo0" it will be automatically created.
3767 * Cannot bump up reference on condemned ills. So dup detect can't be done
3768 * using this func.
3769 */
3770 ill_t *
3771 ill_lookup_on_name(char *name, boolean_t do_alloc, boolean_t isv6,
3772 boolean_t *did_alloc, ip_stack_t *ipst)
3773 {
3774 ill_t *ill;
3775 ipif_t *ipif;
3776 ipsq_t *ipsq;
3777 kstat_named_t *kn;
3778 boolean_t isloopback;
3779 in6_addr_t ov6addr;
3780
3781 isloopback = mi_strcmp(name, ipif_loopback_name) == 0;
3782
3783 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
3784 ill = ill_find_by_name(name, isv6, ipst);
3785 rw_exit(&ipst->ips_ill_g_lock);
3786 if (ill != NULL)
3787 return (ill);
3788
3789 /*
3790 * Couldn't find it. Does this happen to be a lookup for the
3791 * loopback device and are we allowed to allocate it?
3792 */
3793 if (!isloopback || !do_alloc)
3794 return (NULL);
3795
3796 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
3797 ill = ill_find_by_name(name, isv6, ipst);
3798 if (ill != NULL) {
3799 rw_exit(&ipst->ips_ill_g_lock);
3800 return (ill);
3801 }
3802
3803 /* Create the loopback device on demand */
3804 ill = (ill_t *)(mi_alloc(sizeof (ill_t) +
3805 sizeof (ipif_loopback_name), BPRI_MED));
3806 if (ill == NULL)
3807 goto done;
3808
3809 bzero(ill, sizeof (*ill));
3810 ill->ill_ipst = ipst;
3811 netstack_hold(ipst->ips_netstack);
3812 /*
3813 * For exclusive stacks we set the zoneid to zero
3814 * to make IP operate as if in the global zone.
3815 */
3816 ill->ill_zoneid = GLOBAL_ZONEID;
3817
3818 if (ill_init_common(ill, NULL, isv6, B_TRUE, B_FALSE) != 0)
3819 goto done;
3820
3821 if (!ill_allocate_mibs(ill))
3822 goto done;
3823
3824 ill->ill_current_frag = ill->ill_max_frag;
3825 ill->ill_mtu = ill->ill_max_frag; /* Initial value */
3826 ill->ill_mc_mtu = ill->ill_mtu;
3827 /*
3828 * ipif_loopback_name can't be pointed at directly because its used
3829 * by both the ipv4 and ipv6 interfaces. When the ill is removed
3830 * from the glist, ill_glist_delete() sets the first character of
3831 * ill_name to '\0'.
3832 */
3833 ill->ill_name = (char *)ill + sizeof (*ill);
3834 (void) strcpy(ill->ill_name, ipif_loopback_name);
3835 ill->ill_name_length = sizeof (ipif_loopback_name);
3836 /* Set ill_dlpi_pending for ipsq_current_finish() to work properly */
3837 ill->ill_dlpi_pending = DL_PRIM_INVAL;
3838
3839 ipif = ipif_allocate(ill, 0L, IRE_LOOPBACK, B_TRUE, B_TRUE, NULL);
3840 if (ipif == NULL)
3841 goto done;
3842
3843 ill->ill_flags = ILLF_MULTICAST;
3844
3845 ov6addr = ipif->ipif_v6lcl_addr;
3846 /* Set up default loopback address and mask. */
3847 if (!isv6) {
3848 ipaddr_t inaddr_loopback = htonl(INADDR_LOOPBACK);
3849
3850 IN6_IPADDR_TO_V4MAPPED(inaddr_loopback, &ipif->ipif_v6lcl_addr);
3851 V4MASK_TO_V6(htonl(IN_CLASSA_NET), ipif->ipif_v6net_mask);
3852 V6_MASK_COPY(ipif->ipif_v6lcl_addr, ipif->ipif_v6net_mask,
3853 ipif->ipif_v6subnet);
3854 ill->ill_flags |= ILLF_IPV4;
3855 } else {
3856 ipif->ipif_v6lcl_addr = ipv6_loopback;
3857 ipif->ipif_v6net_mask = ipv6_all_ones;
3858 V6_MASK_COPY(ipif->ipif_v6lcl_addr, ipif->ipif_v6net_mask,
3859 ipif->ipif_v6subnet);
3860 ill->ill_flags |= ILLF_IPV6;
3861 }
3862
3863 /*
3864 * Chain us in at the end of the ill list. hold the ill
3865 * before we make it globally visible. 1 for the lookup.
3866 */
3867 ill_refhold(ill);
3868
3869 ipsq = ill->ill_phyint->phyint_ipsq;
3870
3871 if (ill_glist_insert(ill, "lo", isv6) != 0)
3872 cmn_err(CE_PANIC, "cannot insert loopback interface");
3873
3874 /* Let SCTP know so that it can add this to its list */
3875 sctp_update_ill(ill, SCTP_ILL_INSERT);
3876
3877 /*
3878 * We have already assigned ipif_v6lcl_addr above, but we need to
3879 * call sctp_update_ipif_addr() after SCTP_ILL_INSERT, which
3880 * requires to be after ill_glist_insert() since we need the
3881 * ill_index set. Pass on ipv6_loopback as the old address.
3882 */
3883 sctp_update_ipif_addr(ipif, ov6addr);
3884
3885 ip_rts_newaddrmsg(RTM_CHGADDR, 0, ipif, RTSQ_DEFAULT);
3886
3887 /*
3888 * ill_glist_insert() -> ill_phyint_reinit() may have merged IPSQs.
3889 * If so, free our original one.
3890 */
3891 if (ipsq != ill->ill_phyint->phyint_ipsq)
3892 ipsq_delete(ipsq);
3893
3894 if (ipst->ips_loopback_ksp == NULL) {
3895 /* Export loopback interface statistics */
3896 ipst->ips_loopback_ksp = kstat_create_netstack("lo", 0,
3897 ipif_loopback_name, "net",
3898 KSTAT_TYPE_NAMED, 2, 0,
3899 ipst->ips_netstack->netstack_stackid);
3900 if (ipst->ips_loopback_ksp != NULL) {
3901 ipst->ips_loopback_ksp->ks_update =
3902 loopback_kstat_update;
3903 kn = KSTAT_NAMED_PTR(ipst->ips_loopback_ksp);
3904 kstat_named_init(&kn[0], "ipackets", KSTAT_DATA_UINT32);
3905 kstat_named_init(&kn[1], "opackets", KSTAT_DATA_UINT32);
3906 ipst->ips_loopback_ksp->ks_private =
3907 (void *)(uintptr_t)ipst->ips_netstack->
3908 netstack_stackid;
3909 kstat_install(ipst->ips_loopback_ksp);
3910 }
3911 }
3912
3913 *did_alloc = B_TRUE;
3914 rw_exit(&ipst->ips_ill_g_lock);
3915 ill_nic_event_dispatch(ill, MAP_IPIF_ID(ill->ill_ipif->ipif_id),
3916 NE_PLUMB, ill->ill_name, ill->ill_name_length);
3917 return (ill);
3918 done:
3919 if (ill != NULL) {
3920 if (ill->ill_phyint != NULL) {
3921 ipsq = ill->ill_phyint->phyint_ipsq;
3922 if (ipsq != NULL) {
3923 ipsq->ipsq_phyint = NULL;
3924 ipsq_delete(ipsq);
3925 }
3926 mi_free(ill->ill_phyint);
3927 }
3928 ill_free_mib(ill);
3929 if (ill->ill_ipst != NULL)
3930 netstack_rele(ill->ill_ipst->ips_netstack);
3931 mi_free(ill);
3932 }
3933 rw_exit(&ipst->ips_ill_g_lock);
3934 return (NULL);
3935 }
3936
3937 /*
3938 * For IPP calls - use the ip_stack_t for global stack.
3939 */
3940 ill_t *
3941 ill_lookup_on_ifindex_global_instance(uint_t index, boolean_t isv6)
3942 {
3943 ip_stack_t *ipst;
3944 ill_t *ill;
3945 netstack_t *ns;
3946
3947 ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
3948
3949 if ((ipst = ns->netstack_ip) == NULL) {
3950 cmn_err(CE_WARN, "No ip_stack_t for zoneid zero!\n");
3951 netstack_rele(ns);
3952 return (NULL);
3953 }
3954
3955 ill = ill_lookup_on_ifindex(index, isv6, ipst);
3956 netstack_rele(ns);
3957 return (ill);
3958 }
3959
3960 /*
3961 * Return a pointer to the ill which matches the index and IP version type.
3962 */
3963 ill_t *
3964 ill_lookup_on_ifindex(uint_t index, boolean_t isv6, ip_stack_t *ipst)
3965 {
3966 ill_t *ill;
3967 phyint_t *phyi;
3968
3969 /*
3970 * Indexes are stored in the phyint - a common structure
3971 * to both IPv4 and IPv6.
3972 */
3973 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
3974 phyi = avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
3975 (void *) &index, NULL);
3976 if (phyi != NULL) {
3977 ill = isv6 ? phyi->phyint_illv6: phyi->phyint_illv4;
3978 if (ill != NULL) {
3979 mutex_enter(&ill->ill_lock);
3980 if (!ILL_IS_CONDEMNED(ill)) {
3981 ill_refhold_locked(ill);
3982 mutex_exit(&ill->ill_lock);
3983 rw_exit(&ipst->ips_ill_g_lock);
3984 return (ill);
3985 }
3986 mutex_exit(&ill->ill_lock);
3987 }
3988 }
3989 rw_exit(&ipst->ips_ill_g_lock);
3990 return (NULL);
3991 }
3992
3993 /*
3994 * Verify whether or not an interface index is valid for the specified zoneid
3995 * to transmit packets.
3996 * It can be zero (meaning "reset") or an interface index assigned
3997 * to a non-VNI interface. (We don't use VNI interface to send packets.)
3998 */
3999 boolean_t
4000 ip_xmit_ifindex_valid(uint_t ifindex, zoneid_t zoneid, boolean_t isv6,
4001 ip_stack_t *ipst)
4002 {
4003 ill_t *ill;
4004
4005 if (ifindex == 0)
4006 return (B_TRUE);
4007
4008 ill = ill_lookup_on_ifindex_zoneid(ifindex, zoneid, isv6, ipst);
4009 if (ill == NULL)
4010 return (B_FALSE);
4011 if (IS_VNI(ill)) {
4012 ill_refrele(ill);
4013 return (B_FALSE);
4014 }
4015 ill_refrele(ill);
4016 return (B_TRUE);
4017 }
4018
4019 /*
4020 * Return the ifindex next in sequence after the passed in ifindex.
4021 * If there is no next ifindex for the given protocol, return 0.
4022 */
4023 uint_t
4024 ill_get_next_ifindex(uint_t index, boolean_t isv6, ip_stack_t *ipst)
4025 {
4026 phyint_t *phyi;
4027 phyint_t *phyi_initial;
4028 uint_t ifindex;
4029
4030 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4031
4032 if (index == 0) {
4033 phyi = avl_first(
4034 &ipst->ips_phyint_g_list->phyint_list_avl_by_index);
4035 } else {
4036 phyi = phyi_initial = avl_find(
4037 &ipst->ips_phyint_g_list->phyint_list_avl_by_index,
4038 (void *) &index, NULL);
4039 }
4040
4041 for (; phyi != NULL;
4042 phyi = avl_walk(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
4043 phyi, AVL_AFTER)) {
4044 /*
4045 * If we're not returning the first interface in the tree
4046 * and we still haven't moved past the phyint_t that
4047 * corresponds to index, avl_walk needs to be called again
4048 */
4049 if (!((index != 0) && (phyi == phyi_initial))) {
4050 if (isv6) {
4051 if ((phyi->phyint_illv6) &&
4052 ILL_CAN_LOOKUP(phyi->phyint_illv6) &&
4053 (phyi->phyint_illv6->ill_isv6 == 1))
4054 break;
4055 } else {
4056 if ((phyi->phyint_illv4) &&
4057 ILL_CAN_LOOKUP(phyi->phyint_illv4) &&
4058 (phyi->phyint_illv4->ill_isv6 == 0))
4059 break;
4060 }
4061 }
4062 }
4063
4064 rw_exit(&ipst->ips_ill_g_lock);
4065
4066 if (phyi != NULL)
4067 ifindex = phyi->phyint_ifindex;
4068 else
4069 ifindex = 0;
4070
4071 return (ifindex);
4072 }
4073
4074 /*
4075 * Return the ifindex for the named interface.
4076 * If there is no next ifindex for the interface, return 0.
4077 */
4078 uint_t
4079 ill_get_ifindex_by_name(char *name, ip_stack_t *ipst)
4080 {
4081 phyint_t *phyi;
4082 avl_index_t where = 0;
4083 uint_t ifindex;
4084
4085 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4086
4087 if ((phyi = avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
4088 name, &where)) == NULL) {
4089 rw_exit(&ipst->ips_ill_g_lock);
4090 return (0);
4091 }
4092
4093 ifindex = phyi->phyint_ifindex;
4094
4095 rw_exit(&ipst->ips_ill_g_lock);
4096
4097 return (ifindex);
4098 }
4099
4100 /*
4101 * Return the ifindex to be used by upper layer protocols for instance
4102 * for IPV6_RECVPKTINFO. If IPMP this is the one for the upper ill.
4103 */
4104 uint_t
4105 ill_get_upper_ifindex(const ill_t *ill)
4106 {
4107 if (IS_UNDER_IPMP(ill))
4108 return (ipmp_ill_get_ipmp_ifindex(ill));
4109 else
4110 return (ill->ill_phyint->phyint_ifindex);
4111 }
4112
4113
4114 /*
4115 * Obtain a reference to the ill. The ill_refcnt is a dynamic refcnt
4116 * that gives a running thread a reference to the ill. This reference must be
4117 * released by the thread when it is done accessing the ill and related
4118 * objects. ill_refcnt can not be used to account for static references
4119 * such as other structures pointing to an ill. Callers must generally
4120 * check whether an ill can be refheld by using ILL_CAN_LOOKUP macros
4121 * or be sure that the ill is not being deleted or changing state before
4122 * calling the refhold functions. A non-zero ill_refcnt ensures that the
4123 * ill won't change any of its critical state such as address, netmask etc.
4124 */
4125 void
4126 ill_refhold(ill_t *ill)
4127 {
4128 mutex_enter(&ill->ill_lock);
4129 ill->ill_refcnt++;
4130 ILL_TRACE_REF(ill);
4131 mutex_exit(&ill->ill_lock);
4132 }
4133
4134 void
4135 ill_refhold_locked(ill_t *ill)
4136 {
4137 ASSERT(MUTEX_HELD(&ill->ill_lock));
4138 ill->ill_refcnt++;
4139 ILL_TRACE_REF(ill);
4140 }
4141
4142 /* Returns true if we managed to get a refhold */
4143 boolean_t
4144 ill_check_and_refhold(ill_t *ill)
4145 {
4146 mutex_enter(&ill->ill_lock);
4147 if (!ILL_IS_CONDEMNED(ill)) {
4148 ill_refhold_locked(ill);
4149 mutex_exit(&ill->ill_lock);
4150 return (B_TRUE);
4151 }
4152 mutex_exit(&ill->ill_lock);
4153 return (B_FALSE);
4154 }
4155
4156 /*
4157 * Must not be called while holding any locks. Otherwise if this is
4158 * the last reference to be released, there is a chance of recursive mutex
4159 * panic due to ill_refrele -> ipif_ill_refrele_tail -> qwriter_ip trying
4160 * to restart an ioctl.
4161 */
4162 void
4163 ill_refrele(ill_t *ill)
4164 {
4165 mutex_enter(&ill->ill_lock);
4166 ASSERT(ill->ill_refcnt != 0);
4167 ill->ill_refcnt--;
4168 ILL_UNTRACE_REF(ill);
4169 if (ill->ill_refcnt != 0) {
4170 /* Every ire pointing to the ill adds 1 to ill_refcnt */
4171 mutex_exit(&ill->ill_lock);
4172 return;
4173 }
4174
4175 /* Drops the ill_lock */
4176 ipif_ill_refrele_tail(ill);
4177 }
4178
4179 /*
4180 * Obtain a weak reference count on the ill. This reference ensures the
4181 * ill won't be freed, but the ill may change any of its critical state
4182 * such as netmask, address etc. Returns an error if the ill has started
4183 * closing.
4184 */
4185 boolean_t
4186 ill_waiter_inc(ill_t *ill)
4187 {
4188 mutex_enter(&ill->ill_lock);
4189 if (ill->ill_state_flags & ILL_CONDEMNED) {
4190 mutex_exit(&ill->ill_lock);
4191 return (B_FALSE);
4192 }
4193 ill->ill_waiters++;
4194 mutex_exit(&ill->ill_lock);
4195 return (B_TRUE);
4196 }
4197
4198 void
4199 ill_waiter_dcr(ill_t *ill)
4200 {
4201 mutex_enter(&ill->ill_lock);
4202 ill->ill_waiters--;
4203 if (ill->ill_waiters == 0)
4204 cv_broadcast(&ill->ill_cv);
4205 mutex_exit(&ill->ill_lock);
4206 }
4207
4208 /*
4209 * ip_ll_subnet_defaults is called when we get the DL_INFO_ACK back from the
4210 * driver. We construct best guess defaults for lower level information that
4211 * we need. If an interface is brought up without injection of any overriding
4212 * information from outside, we have to be ready to go with these defaults.
4213 * When we get the first DL_INFO_ACK (from ip_open() sending a DL_INFO_REQ)
4214 * we primarely want the dl_provider_style.
4215 * The subsequent DL_INFO_ACK is received after doing a DL_ATTACH and DL_BIND
4216 * at which point we assume the other part of the information is valid.
4217 */
4218 void
4219 ip_ll_subnet_defaults(ill_t *ill, mblk_t *mp)
4220 {
4221 uchar_t *brdcst_addr;
4222 uint_t brdcst_addr_length, phys_addr_length;
4223 t_scalar_t sap_length;
4224 dl_info_ack_t *dlia;
4225 ip_m_t *ipm;
4226 dl_qos_cl_sel1_t *sel1;
4227 int min_mtu;
4228
4229 ASSERT(IAM_WRITER_ILL(ill));
4230
4231 /*
4232 * Till the ill is fully up the ill is not globally visible.
4233 * So no need for a lock.
4234 */
4235 dlia = (dl_info_ack_t *)mp->b_rptr;
4236 ill->ill_mactype = dlia->dl_mac_type;
4237
4238 ipm = ip_m_lookup(dlia->dl_mac_type);
4239 if (ipm == NULL) {
4240 ipm = ip_m_lookup(DL_OTHER);
4241 ASSERT(ipm != NULL);
4242 }
4243 ill->ill_media = ipm;
4244
4245 /*
4246 * When the new DLPI stuff is ready we'll pull lengths
4247 * from dlia.
4248 */
4249 if (dlia->dl_version == DL_VERSION_2) {
4250 brdcst_addr_length = dlia->dl_brdcst_addr_length;
4251 brdcst_addr = mi_offset_param(mp, dlia->dl_brdcst_addr_offset,
4252 brdcst_addr_length);
4253 if (brdcst_addr == NULL) {
4254 brdcst_addr_length = 0;
4255 }
4256 sap_length = dlia->dl_sap_length;
4257 phys_addr_length = dlia->dl_addr_length - ABS(sap_length);
4258 ip1dbg(("ip: bcast_len %d, sap_len %d, phys_len %d\n",
4259 brdcst_addr_length, sap_length, phys_addr_length));
4260 } else {
4261 brdcst_addr_length = 6;
4262 brdcst_addr = ip_six_byte_all_ones;
4263 sap_length = -2;
4264 phys_addr_length = brdcst_addr_length;
4265 }
4266
4267 ill->ill_bcast_addr_length = brdcst_addr_length;
4268 ill->ill_phys_addr_length = phys_addr_length;
4269 ill->ill_sap_length = sap_length;
4270
4271 /*
4272 * Synthetic DLPI types such as SUNW_DL_IPMP specify a zero SDU,
4273 * but we must ensure a minimum IP MTU is used since other bits of
4274 * IP will fly apart otherwise.
4275 */
4276 min_mtu = ill->ill_isv6 ? IPV6_MIN_MTU : IP_MIN_MTU;
4277 ill->ill_max_frag = MAX(min_mtu, dlia->dl_max_sdu);
4278 ill->ill_current_frag = ill->ill_max_frag;
4279 ill->ill_mtu = ill->ill_max_frag;
4280 ill->ill_mc_mtu = ill->ill_mtu; /* Overridden by DL_NOTE_SDU_SIZE2 */
4281
4282 ill->ill_type = ipm->ip_m_type;
4283
4284 if (!ill->ill_dlpi_style_set) {
4285 if (dlia->dl_provider_style == DL_STYLE2)
4286 ill->ill_needs_attach = 1;
4287
4288 phyint_flags_init(ill->ill_phyint, ill->ill_mactype);
4289
4290 /*
4291 * Allocate the first ipif on this ill. We don't delay it
4292 * further as ioctl handling assumes at least one ipif exists.
4293 *
4294 * At this point we don't know whether the ill is v4 or v6.
4295 * We will know this whan the SIOCSLIFNAME happens and
4296 * the correct value for ill_isv6 will be assigned in
4297 * ipif_set_values(). We need to hold the ill lock and
4298 * clear the ILL_LL_SUBNET_PENDING flag and atomically do
4299 * the wakeup.
4300 */
4301 (void) ipif_allocate(ill, 0, IRE_LOCAL,
4302 dlia->dl_provider_style != DL_STYLE2, B_TRUE, NULL);
4303 mutex_enter(&ill->ill_lock);
4304 ASSERT(ill->ill_dlpi_style_set == 0);
4305 ill->ill_dlpi_style_set = 1;
4306 ill->ill_state_flags &= ~ILL_LL_SUBNET_PENDING;
4307 cv_broadcast(&ill->ill_cv);
4308 mutex_exit(&ill->ill_lock);
4309 freemsg(mp);
4310 return;
4311 }
4312 ASSERT(ill->ill_ipif != NULL);
4313 /*
4314 * We know whether it is IPv4 or IPv6 now, as this is the
4315 * second DL_INFO_ACK we are recieving in response to the
4316 * DL_INFO_REQ sent in ipif_set_values.
4317 */
4318 ill->ill_sap = (ill->ill_isv6) ? ipm->ip_m_ipv6sap : ipm->ip_m_ipv4sap;
4319 /*
4320 * Clear all the flags that were set based on ill_bcast_addr_length
4321 * and ill_phys_addr_length (in ipif_set_values) as these could have
4322 * changed now and we need to re-evaluate.
4323 */
4324 ill->ill_flags &= ~(ILLF_MULTICAST | ILLF_NONUD | ILLF_NOARP);
4325 ill->ill_ipif->ipif_flags &= ~(IPIF_BROADCAST | IPIF_POINTOPOINT);
4326
4327 /*
4328 * Free ill_bcast_mp as things could have changed now.
4329 *
4330 * NOTE: The IPMP meta-interface is special-cased because it starts
4331 * with no underlying interfaces (and thus an unknown broadcast
4332 * address length), but we enforce that an interface is broadcast-
4333 * capable as part of allowing it to join a group.
4334 */
4335 if (ill->ill_bcast_addr_length == 0 && !IS_IPMP(ill)) {
4336 if (ill->ill_bcast_mp != NULL)
4337 freemsg(ill->ill_bcast_mp);
4338 ill->ill_net_type = IRE_IF_NORESOLVER;
4339
4340 ill->ill_bcast_mp = ill_dlur_gen(NULL,
4341 ill->ill_phys_addr_length,
4342 ill->ill_sap,
4343 ill->ill_sap_length);
4344
4345 if (ill->ill_isv6)
4346 /*
4347 * Note: xresolv interfaces will eventually need NOARP
4348 * set here as well, but that will require those
4349 * external resolvers to have some knowledge of
4350 * that flag and act appropriately. Not to be changed
4351 * at present.
4352 */
4353 ill->ill_flags |= ILLF_NONUD;
4354 else
4355 ill->ill_flags |= ILLF_NOARP;
4356
4357 if (ill->ill_mactype == SUNW_DL_VNI) {
4358 ill->ill_ipif->ipif_flags |= IPIF_NOXMIT;
4359 } else if (ill->ill_phys_addr_length == 0 ||
4360 ill->ill_mactype == DL_IPV4 ||
4361 ill->ill_mactype == DL_IPV6) {
4362 /*
4363 * The underying link is point-to-point, so mark the
4364 * interface as such. We can do IP multicast over
4365 * such a link since it transmits all network-layer
4366 * packets to the remote side the same way.
4367 */
4368 ill->ill_flags |= ILLF_MULTICAST;
4369 ill->ill_ipif->ipif_flags |= IPIF_POINTOPOINT;
4370 }
4371 } else {
4372 ill->ill_net_type = IRE_IF_RESOLVER;
4373 if (ill->ill_bcast_mp != NULL)
4374 freemsg(ill->ill_bcast_mp);
4375 ill->ill_bcast_mp = ill_dlur_gen(brdcst_addr,
4376 ill->ill_bcast_addr_length, ill->ill_sap,
4377 ill->ill_sap_length);
4378 /*
4379 * Later detect lack of DLPI driver multicast
4380 * capability by catching DL_ENABMULTI errors in
4381 * ip_rput_dlpi.
4382 */
4383 ill->ill_flags |= ILLF_MULTICAST;
4384 if (!ill->ill_isv6)
4385 ill->ill_ipif->ipif_flags |= IPIF_BROADCAST;
4386 }
4387
4388 /* For IPMP, PHYI_IPMP should already be set by phyint_flags_init() */
4389 if (ill->ill_mactype == SUNW_DL_IPMP)
4390 ASSERT(ill->ill_phyint->phyint_flags & PHYI_IPMP);
4391
4392 /* By default an interface does not support any CoS marking */
4393 ill->ill_flags &= ~ILLF_COS_ENABLED;
4394
4395 /*
4396 * If we get QoS information in DL_INFO_ACK, the device supports
4397 * some form of CoS marking, set ILLF_COS_ENABLED.
4398 */
4399 sel1 = (dl_qos_cl_sel1_t *)mi_offset_param(mp, dlia->dl_qos_offset,
4400 dlia->dl_qos_length);
4401 if ((sel1 != NULL) && (sel1->dl_qos_type == DL_QOS_CL_SEL1)) {
4402 ill->ill_flags |= ILLF_COS_ENABLED;
4403 }
4404
4405 /* Clear any previous error indication. */
4406 ill->ill_error = 0;
4407 freemsg(mp);
4408 }
4409
4410 /*
4411 * Perform various checks to verify that an address would make sense as a
4412 * local, remote, or subnet interface address.
4413 */
4414 static boolean_t
4415 ip_addr_ok_v4(ipaddr_t addr, ipaddr_t subnet_mask)
4416 {
4417 ipaddr_t net_mask;
4418
4419 /*
4420 * Don't allow all zeroes, or all ones, but allow
4421 * all ones netmask.
4422 */
4423 if ((net_mask = ip_net_mask(addr)) == 0)
4424 return (B_FALSE);
4425 /* A given netmask overrides the "guess" netmask */
4426 if (subnet_mask != 0)
4427 net_mask = subnet_mask;
4428 if ((net_mask != ~(ipaddr_t)0) && ((addr == (addr & net_mask)) ||
4429 (addr == (addr | ~net_mask)))) {
4430 return (B_FALSE);
4431 }
4432
4433 /*
4434 * Even if the netmask is all ones, we do not allow address to be
4435 * 255.255.255.255
4436 */
4437 if (addr == INADDR_BROADCAST)
4438 return (B_FALSE);
4439
4440 if (CLASSD(addr))
4441 return (B_FALSE);
4442
4443 return (B_TRUE);
4444 }
4445
4446 #define V6_IPIF_LINKLOCAL(p) \
4447 IN6_IS_ADDR_LINKLOCAL(&(p)->ipif_v6lcl_addr)
4448
4449 /*
4450 * Compare two given ipifs and check if the second one is better than
4451 * the first one using the order of preference (not taking deprecated
4452 * into acount) specified in ipif_lookup_multicast().
4453 */
4454 static boolean_t
4455 ipif_comp_multi(ipif_t *old_ipif, ipif_t *new_ipif, boolean_t isv6)
4456 {
4457 /* Check the least preferred first. */
4458 if (IS_LOOPBACK(old_ipif->ipif_ill)) {
4459 /* If both ipifs are the same, use the first one. */
4460 if (IS_LOOPBACK(new_ipif->ipif_ill))
4461 return (B_FALSE);
4462 else
4463 return (B_TRUE);
4464 }
4465
4466 /* For IPv6, check for link local address. */
4467 if (isv6 && V6_IPIF_LINKLOCAL(old_ipif)) {
4468 if (IS_LOOPBACK(new_ipif->ipif_ill) ||
4469 V6_IPIF_LINKLOCAL(new_ipif)) {
4470 /* The second one is equal or less preferred. */
4471 return (B_FALSE);
4472 } else {
4473 return (B_TRUE);
4474 }
4475 }
4476
4477 /* Then check for point to point interface. */
4478 if (old_ipif->ipif_flags & IPIF_POINTOPOINT) {
4479 if (IS_LOOPBACK(new_ipif->ipif_ill) ||
4480 (isv6 && V6_IPIF_LINKLOCAL(new_ipif)) ||
4481 (new_ipif->ipif_flags & IPIF_POINTOPOINT)) {
4482 return (B_FALSE);
4483 } else {
4484 return (B_TRUE);
4485 }
4486 }
4487
4488 /* old_ipif is a normal interface, so no need to use the new one. */
4489 return (B_FALSE);
4490 }
4491
4492 /*
4493 * Find a mulitcast-capable ipif given an IP instance and zoneid.
4494 * The ipif must be up, and its ill must multicast-capable, not
4495 * condemned, not an underlying interface in an IPMP group, and
4496 * not a VNI interface. Order of preference:
4497 *
4498 * 1a. normal
4499 * 1b. normal, but deprecated
4500 * 2a. point to point
4501 * 2b. point to point, but deprecated
4502 * 3a. link local
4503 * 3b. link local, but deprecated
4504 * 4. loopback.
4505 */
4506 static ipif_t *
4507 ipif_lookup_multicast(ip_stack_t *ipst, zoneid_t zoneid, boolean_t isv6)
4508 {
4509 ill_t *ill;
4510 ill_walk_context_t ctx;
4511 ipif_t *ipif;
4512 ipif_t *saved_ipif = NULL;
4513 ipif_t *dep_ipif = NULL;
4514
4515 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4516 if (isv6)
4517 ill = ILL_START_WALK_V6(&ctx, ipst);
4518 else
4519 ill = ILL_START_WALK_V4(&ctx, ipst);
4520
4521 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
4522 mutex_enter(&ill->ill_lock);
4523 if (IS_VNI(ill) || IS_UNDER_IPMP(ill) ||
4524 ILL_IS_CONDEMNED(ill) ||
4525 !(ill->ill_flags & ILLF_MULTICAST)) {
4526 mutex_exit(&ill->ill_lock);
4527 continue;
4528 }
4529 for (ipif = ill->ill_ipif; ipif != NULL;
4530 ipif = ipif->ipif_next) {
4531 if (zoneid != ipif->ipif_zoneid &&
4532 zoneid != ALL_ZONES &&
4533 ipif->ipif_zoneid != ALL_ZONES) {
4534 continue;
4535 }
4536 if (!(ipif->ipif_flags & IPIF_UP) ||
4537 IPIF_IS_CONDEMNED(ipif)) {
4538 continue;
4539 }
4540
4541 /*
4542 * Found one candidate. If it is deprecated,
4543 * remember it in dep_ipif. If it is not deprecated,
4544 * remember it in saved_ipif.
4545 */
4546 if (ipif->ipif_flags & IPIF_DEPRECATED) {
4547 if (dep_ipif == NULL) {
4548 dep_ipif = ipif;
4549 } else if (ipif_comp_multi(dep_ipif, ipif,
4550 isv6)) {
4551 /*
4552 * If the previous dep_ipif does not
4553 * belong to the same ill, we've done
4554 * a ipif_refhold() on it. So we need
4555 * to release it.
4556 */
4557 if (dep_ipif->ipif_ill != ill)
4558 ipif_refrele(dep_ipif);
4559 dep_ipif = ipif;
4560 }
4561 continue;
4562 }
4563 if (saved_ipif == NULL) {
4564 saved_ipif = ipif;
4565 } else {
4566 if (ipif_comp_multi(saved_ipif, ipif, isv6)) {
4567 if (saved_ipif->ipif_ill != ill)
4568 ipif_refrele(saved_ipif);
4569 saved_ipif = ipif;
4570 }
4571 }
4572 }
4573 /*
4574 * Before going to the next ill, do a ipif_refhold() on the
4575 * saved ones.
4576 */
4577 if (saved_ipif != NULL && saved_ipif->ipif_ill == ill)
4578 ipif_refhold_locked(saved_ipif);
4579 if (dep_ipif != NULL && dep_ipif->ipif_ill == ill)
4580 ipif_refhold_locked(dep_ipif);
4581 mutex_exit(&ill->ill_lock);
4582 }
4583 rw_exit(&ipst->ips_ill_g_lock);
4584
4585 /*
4586 * If we have only the saved_ipif, return it. But if we have both
4587 * saved_ipif and dep_ipif, check to see which one is better.
4588 */
4589 if (saved_ipif != NULL) {
4590 if (dep_ipif != NULL) {
4591 if (ipif_comp_multi(saved_ipif, dep_ipif, isv6)) {
4592 ipif_refrele(saved_ipif);
4593 return (dep_ipif);
4594 } else {
4595 ipif_refrele(dep_ipif);
4596 return (saved_ipif);
4597 }
4598 }
4599 return (saved_ipif);
4600 } else {
4601 return (dep_ipif);
4602 }
4603 }
4604
4605 ill_t *
4606 ill_lookup_multicast(ip_stack_t *ipst, zoneid_t zoneid, boolean_t isv6)
4607 {
4608 ipif_t *ipif;
4609 ill_t *ill;
4610
4611 ipif = ipif_lookup_multicast(ipst, zoneid, isv6);
4612 if (ipif == NULL)
4613 return (NULL);
4614
4615 ill = ipif->ipif_ill;
4616 ill_refhold(ill);
4617 ipif_refrele(ipif);
4618 return (ill);
4619 }
4620
4621 /*
4622 * This function is called when an application does not specify an interface
4623 * to be used for multicast traffic (joining a group/sending data). It
4624 * calls ire_lookup_multi() to look for an interface route for the
4625 * specified multicast group. Doing this allows the administrator to add
4626 * prefix routes for multicast to indicate which interface to be used for
4627 * multicast traffic in the above scenario. The route could be for all
4628 * multicast (224.0/4), for a single multicast group (a /32 route) or
4629 * anything in between. If there is no such multicast route, we just find
4630 * any multicast capable interface and return it. The returned ipif
4631 * is refhold'ed.
4632 *
4633 * We support MULTIRT and RTF_SETSRC on the multicast routes added to the
4634 * unicast table. This is used by CGTP.
4635 */
4636 ill_t *
4637 ill_lookup_group_v4(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst,
4638 boolean_t *multirtp, ipaddr_t *setsrcp)
4639 {
4640 ill_t *ill;
4641
4642 ill = ire_lookup_multi_ill_v4(group, zoneid, ipst, multirtp, setsrcp);
4643 if (ill != NULL)
4644 return (ill);
4645
4646 return (ill_lookup_multicast(ipst, zoneid, B_FALSE));
4647 }
4648
4649 /*
4650 * Look for an ipif with the specified interface address and destination.
4651 * The destination address is used only for matching point-to-point interfaces.
4652 */
4653 ipif_t *
4654 ipif_lookup_interface(ipaddr_t if_addr, ipaddr_t dst, ip_stack_t *ipst)
4655 {
4656 ipif_t *ipif;
4657 ill_t *ill;
4658 ill_walk_context_t ctx;
4659
4660 /*
4661 * First match all the point-to-point interfaces
4662 * before looking at non-point-to-point interfaces.
4663 * This is done to avoid returning non-point-to-point
4664 * ipif instead of unnumbered point-to-point ipif.
4665 */
4666 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4667 ill = ILL_START_WALK_V4(&ctx, ipst);
4668 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
4669 mutex_enter(&ill->ill_lock);
4670 for (ipif = ill->ill_ipif; ipif != NULL;
4671 ipif = ipif->ipif_next) {
4672 /* Allow the ipif to be down */
4673 if ((ipif->ipif_flags & IPIF_POINTOPOINT) &&
4674 (ipif->ipif_lcl_addr == if_addr) &&
4675 (ipif->ipif_pp_dst_addr == dst)) {
4676 if (!IPIF_IS_CONDEMNED(ipif)) {
4677 ipif_refhold_locked(ipif);
4678 mutex_exit(&ill->ill_lock);
4679 rw_exit(&ipst->ips_ill_g_lock);
4680 return (ipif);
4681 }
4682 }
4683 }
4684 mutex_exit(&ill->ill_lock);
4685 }
4686 rw_exit(&ipst->ips_ill_g_lock);
4687
4688 /* lookup the ipif based on interface address */
4689 ipif = ipif_lookup_addr(if_addr, NULL, ALL_ZONES, ipst);
4690 ASSERT(ipif == NULL || !ipif->ipif_isv6);
4691 return (ipif);
4692 }
4693
4694 /*
4695 * Common function for ipif_lookup_addr() and ipif_lookup_addr_exact().
4696 */
4697 static ipif_t *
4698 ipif_lookup_addr_common(ipaddr_t addr, ill_t *match_ill, uint32_t match_flags,
4699 zoneid_t zoneid, ip_stack_t *ipst)
4700 {
4701 ipif_t *ipif;
4702 ill_t *ill;
4703 boolean_t ptp = B_FALSE;
4704 ill_walk_context_t ctx;
4705 boolean_t match_illgrp = (match_flags & IPIF_MATCH_ILLGRP);
4706 boolean_t no_duplicate = (match_flags & IPIF_MATCH_NONDUP);
4707
4708 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4709 /*
4710 * Repeat twice, first based on local addresses and
4711 * next time for pointopoint.
4712 */
4713 repeat:
4714 ill = ILL_START_WALK_V4(&ctx, ipst);
4715 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
4716 if (match_ill != NULL && ill != match_ill &&
4717 (!match_illgrp || !IS_IN_SAME_ILLGRP(ill, match_ill))) {
4718 continue;
4719 }
4720 mutex_enter(&ill->ill_lock);
4721 for (ipif = ill->ill_ipif; ipif != NULL;
4722 ipif = ipif->ipif_next) {
4723 if (zoneid != ALL_ZONES &&
4724 zoneid != ipif->ipif_zoneid &&
4725 ipif->ipif_zoneid != ALL_ZONES)
4726 continue;
4727
4728 if (no_duplicate && !(ipif->ipif_flags & IPIF_UP))
4729 continue;
4730
4731 /* Allow the ipif to be down */
4732 if ((!ptp && (ipif->ipif_lcl_addr == addr) &&
4733 ((ipif->ipif_flags & IPIF_UNNUMBERED) == 0)) ||
4734 (ptp && (ipif->ipif_flags & IPIF_POINTOPOINT) &&
4735 (ipif->ipif_pp_dst_addr == addr))) {
4736 if (!IPIF_IS_CONDEMNED(ipif)) {
4737 ipif_refhold_locked(ipif);
4738 mutex_exit(&ill->ill_lock);
4739 rw_exit(&ipst->ips_ill_g_lock);
4740 return (ipif);
4741 }
4742 }
4743 }
4744 mutex_exit(&ill->ill_lock);
4745 }
4746
4747 /* If we already did the ptp case, then we are done */
4748 if (ptp) {
4749 rw_exit(&ipst->ips_ill_g_lock);
4750 return (NULL);
4751 }
4752 ptp = B_TRUE;
4753 goto repeat;
4754 }
4755
4756 /*
4757 * Lookup an ipif with the specified address. For point-to-point links we
4758 * look for matches on either the destination address or the local address,
4759 * but we skip the local address check if IPIF_UNNUMBERED is set. If the
4760 * `match_ill' argument is non-NULL, the lookup is restricted to that ill
4761 * (or illgrp if `match_ill' is in an IPMP group).
4762 */
4763 ipif_t *
4764 ipif_lookup_addr(ipaddr_t addr, ill_t *match_ill, zoneid_t zoneid,
4765 ip_stack_t *ipst)
4766 {
4767 return (ipif_lookup_addr_common(addr, match_ill, IPIF_MATCH_ILLGRP,
4768 zoneid, ipst));
4769 }
4770
4771 /*
4772 * Lookup an ipif with the specified address. Similar to ipif_lookup_addr,
4773 * except that we will only return an address if it is not marked as
4774 * IPIF_DUPLICATE
4775 */
4776 ipif_t *
4777 ipif_lookup_addr_nondup(ipaddr_t addr, ill_t *match_ill, zoneid_t zoneid,
4778 ip_stack_t *ipst)
4779 {
4780 return (ipif_lookup_addr_common(addr, match_ill,
4781 (IPIF_MATCH_ILLGRP | IPIF_MATCH_NONDUP),
4782 zoneid, ipst));
4783 }
4784
4785 /*
4786 * Special abbreviated version of ipif_lookup_addr() that doesn't match
4787 * `match_ill' across the IPMP group. This function is only needed in some
4788 * corner-cases; almost everything should use ipif_lookup_addr().
4789 */
4790 ipif_t *
4791 ipif_lookup_addr_exact(ipaddr_t addr, ill_t *match_ill, ip_stack_t *ipst)
4792 {
4793 ASSERT(match_ill != NULL);
4794 return (ipif_lookup_addr_common(addr, match_ill, 0, ALL_ZONES,
4795 ipst));
4796 }
4797
4798 /*
4799 * Look for an ipif with the specified address. For point-point links
4800 * we look for matches on either the destination address and the local
4801 * address, but we ignore the check on the local address if IPIF_UNNUMBERED
4802 * is set.
4803 * If the `match_ill' argument is non-NULL, the lookup is restricted to that
4804 * ill (or illgrp if `match_ill' is in an IPMP group).
4805 * Return the zoneid for the ipif which matches. ALL_ZONES if no match.
4806 */
4807 zoneid_t
4808 ipif_lookup_addr_zoneid(ipaddr_t addr, ill_t *match_ill, ip_stack_t *ipst)
4809 {
4810 zoneid_t zoneid;
4811 ipif_t *ipif;
4812 ill_t *ill;
4813 boolean_t ptp = B_FALSE;
4814 ill_walk_context_t ctx;
4815
4816 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
4817 /*
4818 * Repeat twice, first based on local addresses and
4819 * next time for pointopoint.
4820 */
4821 repeat:
4822 ill = ILL_START_WALK_V4(&ctx, ipst);
4823 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
4824 if (match_ill != NULL && ill != match_ill &&
4825 !IS_IN_SAME_ILLGRP(ill, match_ill)) {
4826 continue;
4827 }
4828 mutex_enter(&ill->ill_lock);
4829 for (ipif = ill->ill_ipif; ipif != NULL;
4830 ipif = ipif->ipif_next) {
4831 /* Allow the ipif to be down */
4832 if ((!ptp && (ipif->ipif_lcl_addr == addr) &&
4833 ((ipif->ipif_flags & IPIF_UNNUMBERED) == 0)) ||
4834 (ptp && (ipif->ipif_flags & IPIF_POINTOPOINT) &&
4835 (ipif->ipif_pp_dst_addr == addr)) &&
4836 !(ipif->ipif_state_flags & IPIF_CONDEMNED)) {
4837 zoneid = ipif->ipif_zoneid;
4838 mutex_exit(&ill->ill_lock);
4839 rw_exit(&ipst->ips_ill_g_lock);
4840 /*
4841 * If ipif_zoneid was ALL_ZONES then we have
4842 * a trusted extensions shared IP address.
4843 * In that case GLOBAL_ZONEID works to send.
4844 */
4845 if (zoneid == ALL_ZONES)
4846 zoneid = GLOBAL_ZONEID;
4847 return (zoneid);
4848 }
4849 }
4850 mutex_exit(&ill->ill_lock);
4851 }
4852
4853 /* If we already did the ptp case, then we are done */
4854 if (ptp) {
4855 rw_exit(&ipst->ips_ill_g_lock);
4856 return (ALL_ZONES);
4857 }
4858 ptp = B_TRUE;
4859 goto repeat;
4860 }
4861
4862 /*
4863 * Look for an ipif that matches the specified remote address i.e. the
4864 * ipif that would receive the specified packet.
4865 * First look for directly connected interfaces and then do a recursive
4866 * IRE lookup and pick the first ipif corresponding to the source address in the
4867 * ire.
4868 * Returns: held ipif
4869 *
4870 * This is only used for ICMP_ADDRESS_MASK_REQUESTs
4871 */
4872 ipif_t *
4873 ipif_lookup_remote(ill_t *ill, ipaddr_t addr, zoneid_t zoneid)
4874 {
4875 ipif_t *ipif;
4876
4877 ASSERT(!ill->ill_isv6);
4878
4879 /*
4880 * Someone could be changing this ipif currently or change it
4881 * after we return this. Thus a few packets could use the old
4882 * old values. However structure updates/creates (ire, ilg, ilm etc)
4883 * will atomically be updated or cleaned up with the new value
4884 * Thus we don't need a lock to check the flags or other attrs below.
4885 */
4886 mutex_enter(&ill->ill_lock);
4887 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
4888 if (IPIF_IS_CONDEMNED(ipif))
4889 continue;
4890 if (zoneid != ALL_ZONES && zoneid != ipif->ipif_zoneid &&
4891 ipif->ipif_zoneid != ALL_ZONES)
4892 continue;
4893 /* Allow the ipif to be down */
4894 if (ipif->ipif_flags & IPIF_POINTOPOINT) {
4895 if ((ipif->ipif_pp_dst_addr == addr) ||
4896 (!(ipif->ipif_flags & IPIF_UNNUMBERED) &&
4897 ipif->ipif_lcl_addr == addr)) {
4898 ipif_refhold_locked(ipif);
4899 mutex_exit(&ill->ill_lock);
4900 return (ipif);
4901 }
4902 } else if (ipif->ipif_subnet == (addr & ipif->ipif_net_mask)) {
4903 ipif_refhold_locked(ipif);
4904 mutex_exit(&ill->ill_lock);
4905 return (ipif);
4906 }
4907 }
4908 mutex_exit(&ill->ill_lock);
4909 /*
4910 * For a remote destination it isn't possible to nail down a particular
4911 * ipif.
4912 */
4913
4914 /* Pick the first interface */
4915 ipif = ipif_get_next_ipif(NULL, ill);
4916 return (ipif);
4917 }
4918
4919 /*
4920 * This func does not prevent refcnt from increasing. But if
4921 * the caller has taken steps to that effect, then this func
4922 * can be used to determine whether the ill has become quiescent
4923 */
4924 static boolean_t
4925 ill_is_quiescent(ill_t *ill)
4926 {
4927 ipif_t *ipif;
4928
4929 ASSERT(MUTEX_HELD(&ill->ill_lock));
4930
4931 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
4932 if (ipif->ipif_refcnt != 0)
4933 return (B_FALSE);
4934 }
4935 if (!ILL_DOWN_OK(ill) || ill->ill_refcnt != 0) {
4936 return (B_FALSE);
4937 }
4938 return (B_TRUE);
4939 }
4940
4941 boolean_t
4942 ill_is_freeable(ill_t *ill)
4943 {
4944 ipif_t *ipif;
4945
4946 ASSERT(MUTEX_HELD(&ill->ill_lock));
4947
4948 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
4949 if (ipif->ipif_refcnt != 0) {
4950 return (B_FALSE);
4951 }
4952 }
4953 if (!ILL_FREE_OK(ill) || ill->ill_refcnt != 0) {
4954 return (B_FALSE);
4955 }
4956 return (B_TRUE);
4957 }
4958
4959 /*
4960 * This func does not prevent refcnt from increasing. But if
4961 * the caller has taken steps to that effect, then this func
4962 * can be used to determine whether the ipif has become quiescent
4963 */
4964 static boolean_t
4965 ipif_is_quiescent(ipif_t *ipif)
4966 {
4967 ill_t *ill;
4968
4969 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
4970
4971 if (ipif->ipif_refcnt != 0)
4972 return (B_FALSE);
4973
4974 ill = ipif->ipif_ill;
4975 if (ill->ill_ipif_up_count != 0 || ill->ill_ipif_dup_count != 0 ||
4976 ill->ill_logical_down) {
4977 return (B_TRUE);
4978 }
4979
4980 /* This is the last ipif going down or being deleted on this ill */
4981 if (ill->ill_ire_cnt != 0 || ill->ill_refcnt != 0) {
4982 return (B_FALSE);
4983 }
4984
4985 return (B_TRUE);
4986 }
4987
4988 /*
4989 * return true if the ipif can be destroyed: the ipif has to be quiescent
4990 * with zero references from ire/ilm to it.
4991 */
4992 static boolean_t
4993 ipif_is_freeable(ipif_t *ipif)
4994 {
4995 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
4996 ASSERT(ipif->ipif_id != 0);
4997 return (ipif->ipif_refcnt == 0);
4998 }
4999
5000 /*
5001 * The ipif/ill/ire has been refreled. Do the tail processing.
5002 * Determine if the ipif or ill in question has become quiescent and if so
5003 * wakeup close and/or restart any queued pending ioctl that is waiting
5004 * for the ipif_down (or ill_down)
5005 */
5006 void
5007 ipif_ill_refrele_tail(ill_t *ill)
5008 {
5009 mblk_t *mp;
5010 conn_t *connp;
5011 ipsq_t *ipsq;
5012 ipxop_t *ipx;
5013 ipif_t *ipif;
5014 dl_notify_ind_t *dlindp;
5015
5016 ASSERT(MUTEX_HELD(&ill->ill_lock));
5017
5018 if ((ill->ill_state_flags & ILL_CONDEMNED) && ill_is_freeable(ill)) {
5019 /* ip_modclose() may be waiting */
5020 cv_broadcast(&ill->ill_cv);
5021 }
5022
5023 ipsq = ill->ill_phyint->phyint_ipsq;
5024 mutex_enter(&ipsq->ipsq_lock);
5025 ipx = ipsq->ipsq_xop;
5026 mutex_enter(&ipx->ipx_lock);
5027 if (ipx->ipx_waitfor == 0) /* no one's waiting; bail */
5028 goto unlock;
5029
5030 ASSERT(ipx->ipx_pending_mp != NULL && ipx->ipx_pending_ipif != NULL);
5031
5032 ipif = ipx->ipx_pending_ipif;
5033 if (ipif->ipif_ill != ill) /* wait is for another ill; bail */
5034 goto unlock;
5035
5036 switch (ipx->ipx_waitfor) {
5037 case IPIF_DOWN:
5038 if (!ipif_is_quiescent(ipif))
5039 goto unlock;
5040 break;
5041 case IPIF_FREE:
5042 if (!ipif_is_freeable(ipif))
5043 goto unlock;
5044 break;
5045 case ILL_DOWN:
5046 if (!ill_is_quiescent(ill))
5047 goto unlock;
5048 break;
5049 case ILL_FREE:
5050 /*
5051 * ILL_FREE is only for loopback; normal ill teardown waits
5052 * synchronously in ip_modclose() without using ipx_waitfor,
5053 * handled by the cv_broadcast() at the top of this function.
5054 */
5055 if (!ill_is_freeable(ill))
5056 goto unlock;
5057 break;
5058 default:
5059 cmn_err(CE_PANIC, "ipsq: %p unknown ipx_waitfor %d\n",
5060 (void *)ipsq, ipx->ipx_waitfor);
5061 }
5062
5063 ill_refhold_locked(ill); /* for qwriter_ip() call below */
5064 mutex_exit(&ipx->ipx_lock);
5065 mp = ipsq_pending_mp_get(ipsq, &connp);
5066 mutex_exit(&ipsq->ipsq_lock);
5067 mutex_exit(&ill->ill_lock);
5068
5069 ASSERT(mp != NULL);
5070 /*
5071 * NOTE: all of the qwriter_ip() calls below use CUR_OP since
5072 * we can only get here when the current operation decides it
5073 * it needs to quiesce via ipsq_pending_mp_add().
5074 */
5075 switch (mp->b_datap->db_type) {
5076 case M_PCPROTO:
5077 case M_PROTO:
5078 /*
5079 * For now, only DL_NOTIFY_IND messages can use this facility.
5080 */
5081 dlindp = (dl_notify_ind_t *)mp->b_rptr;
5082 ASSERT(dlindp->dl_primitive == DL_NOTIFY_IND);
5083
5084 switch (dlindp->dl_notification) {
5085 case DL_NOTE_PHYS_ADDR:
5086 qwriter_ip(ill, ill->ill_rq, mp,
5087 ill_set_phys_addr_tail, CUR_OP, B_TRUE);
5088 return;
5089 case DL_NOTE_REPLUMB:
5090 qwriter_ip(ill, ill->ill_rq, mp,
5091 ill_replumb_tail, CUR_OP, B_TRUE);
5092 return;
5093 default:
5094 ASSERT(0);
5095 ill_refrele(ill);
5096 }
5097 break;
5098
5099 case M_ERROR:
5100 case M_HANGUP:
5101 qwriter_ip(ill, ill->ill_rq, mp, ipif_all_down_tail, CUR_OP,
5102 B_TRUE);
5103 return;
5104
5105 case M_IOCTL:
5106 case M_IOCDATA:
5107 qwriter_ip(ill, (connp != NULL ? CONNP_TO_WQ(connp) :
5108 ill->ill_wq), mp, ip_reprocess_ioctl, CUR_OP, B_TRUE);
5109 return;
5110
5111 default:
5112 cmn_err(CE_PANIC, "ipif_ill_refrele_tail mp %p "
5113 "db_type %d\n", (void *)mp, mp->b_datap->db_type);
5114 }
5115 return;
5116 unlock:
5117 mutex_exit(&ipsq->ipsq_lock);
5118 mutex_exit(&ipx->ipx_lock);
5119 mutex_exit(&ill->ill_lock);
5120 }
5121
5122 #ifdef DEBUG
5123 /* Reuse trace buffer from beginning (if reached the end) and record trace */
5124 static void
5125 th_trace_rrecord(th_trace_t *th_trace)
5126 {
5127 tr_buf_t *tr_buf;
5128 uint_t lastref;
5129
5130 lastref = th_trace->th_trace_lastref;
5131 lastref++;
5132 if (lastref == TR_BUF_MAX)
5133 lastref = 0;
5134 th_trace->th_trace_lastref = lastref;
5135 tr_buf = &th_trace->th_trbuf[lastref];
5136 tr_buf->tr_time = ddi_get_lbolt();
5137 tr_buf->tr_depth = getpcstack(tr_buf->tr_stack, TR_STACK_DEPTH);
5138 }
5139
5140 static void
5141 th_trace_free(void *value)
5142 {
5143 th_trace_t *th_trace = value;
5144
5145 ASSERT(th_trace->th_refcnt == 0);
5146 kmem_free(th_trace, sizeof (*th_trace));
5147 }
5148
5149 /*
5150 * Find or create the per-thread hash table used to track object references.
5151 * The ipst argument is NULL if we shouldn't allocate.
5152 *
5153 * Accesses per-thread data, so there's no need to lock here.
5154 */
5155 static mod_hash_t *
5156 th_trace_gethash(ip_stack_t *ipst)
5157 {
5158 th_hash_t *thh;
5159
5160 if ((thh = tsd_get(ip_thread_data)) == NULL && ipst != NULL) {
5161 mod_hash_t *mh;
5162 char name[256];
5163 size_t objsize, rshift;
5164 int retv;
5165
5166 if ((thh = kmem_alloc(sizeof (*thh), KM_NOSLEEP)) == NULL)
5167 return (NULL);
5168 (void) snprintf(name, sizeof (name), "th_trace_%p",
5169 (void *)curthread);
5170
5171 /*
5172 * We use mod_hash_create_extended here rather than the more
5173 * obvious mod_hash_create_ptrhash because the latter has a
5174 * hard-coded KM_SLEEP, and we'd prefer to fail rather than
5175 * block.
5176 */
5177 objsize = MAX(MAX(sizeof (ill_t), sizeof (ipif_t)),
5178 MAX(sizeof (ire_t), sizeof (ncec_t)));
5179 rshift = highbit(objsize);
5180 mh = mod_hash_create_extended(name, 64, mod_hash_null_keydtor,
5181 th_trace_free, mod_hash_byptr, (void *)rshift,
5182 mod_hash_ptrkey_cmp, KM_NOSLEEP);
5183 if (mh == NULL) {
5184 kmem_free(thh, sizeof (*thh));
5185 return (NULL);
5186 }
5187 thh->thh_hash = mh;
5188 thh->thh_ipst = ipst;
5189 /*
5190 * We trace ills, ipifs, ires, and nces. All of these are
5191 * per-IP-stack, so the lock on the thread list is as well.
5192 */
5193 rw_enter(&ip_thread_rwlock, RW_WRITER);
5194 list_insert_tail(&ip_thread_list, thh);
5195 rw_exit(&ip_thread_rwlock);
5196 retv = tsd_set(ip_thread_data, thh);
5197 ASSERT(retv == 0);
5198 }
5199 return (thh != NULL ? thh->thh_hash : NULL);
5200 }
5201
5202 boolean_t
5203 th_trace_ref(const void *obj, ip_stack_t *ipst)
5204 {
5205 th_trace_t *th_trace;
5206 mod_hash_t *mh;
5207 mod_hash_val_t val;
5208
5209 if ((mh = th_trace_gethash(ipst)) == NULL)
5210 return (B_FALSE);
5211
5212 /*
5213 * Attempt to locate the trace buffer for this obj and thread.
5214 * If it does not exist, then allocate a new trace buffer and
5215 * insert into the hash.
5216 */
5217 if (mod_hash_find(mh, (mod_hash_key_t)obj, &val) == MH_ERR_NOTFOUND) {
5218 th_trace = kmem_zalloc(sizeof (th_trace_t), KM_NOSLEEP);
5219 if (th_trace == NULL)
5220 return (B_FALSE);
5221
5222 th_trace->th_id = curthread;
5223 if (mod_hash_insert(mh, (mod_hash_key_t)obj,
5224 (mod_hash_val_t)th_trace) != 0) {
5225 kmem_free(th_trace, sizeof (th_trace_t));
5226 return (B_FALSE);
5227 }
5228 } else {
5229 th_trace = (th_trace_t *)val;
5230 }
5231
5232 ASSERT(th_trace->th_refcnt >= 0 &&
5233 th_trace->th_refcnt < TR_BUF_MAX - 1);
5234
5235 th_trace->th_refcnt++;
5236 th_trace_rrecord(th_trace);
5237 return (B_TRUE);
5238 }
5239
5240 /*
5241 * For the purpose of tracing a reference release, we assume that global
5242 * tracing is always on and that the same thread initiated the reference hold
5243 * is releasing.
5244 */
5245 void
5246 th_trace_unref(const void *obj)
5247 {
5248 int retv;
5249 mod_hash_t *mh;
5250 th_trace_t *th_trace;
5251 mod_hash_val_t val;
5252
5253 mh = th_trace_gethash(NULL);
5254 retv = mod_hash_find(mh, (mod_hash_key_t)obj, &val);
5255 ASSERT(retv == 0);
5256 th_trace = (th_trace_t *)val;
5257
5258 ASSERT(th_trace->th_refcnt > 0);
5259 th_trace->th_refcnt--;
5260 th_trace_rrecord(th_trace);
5261 }
5262
5263 /*
5264 * If tracing has been disabled, then we assume that the reference counts are
5265 * now useless, and we clear them out before destroying the entries.
5266 */
5267 void
5268 th_trace_cleanup(const void *obj, boolean_t trace_disable)
5269 {
5270 th_hash_t *thh;
5271 mod_hash_t *mh;
5272 mod_hash_val_t val;
5273 th_trace_t *th_trace;
5274 int retv;
5275
5276 rw_enter(&ip_thread_rwlock, RW_READER);
5277 for (thh = list_head(&ip_thread_list); thh != NULL;
5278 thh = list_next(&ip_thread_list, thh)) {
5279 if (mod_hash_find(mh = thh->thh_hash, (mod_hash_key_t)obj,
5280 &val) == 0) {
5281 th_trace = (th_trace_t *)val;
5282 if (trace_disable)
5283 th_trace->th_refcnt = 0;
5284 retv = mod_hash_destroy(mh, (mod_hash_key_t)obj);
5285 ASSERT(retv == 0);
5286 }
5287 }
5288 rw_exit(&ip_thread_rwlock);
5289 }
5290
5291 void
5292 ipif_trace_ref(ipif_t *ipif)
5293 {
5294 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
5295
5296 if (ipif->ipif_trace_disable)
5297 return;
5298
5299 if (!th_trace_ref(ipif, ipif->ipif_ill->ill_ipst)) {
5300 ipif->ipif_trace_disable = B_TRUE;
5301 ipif_trace_cleanup(ipif);
5302 }
5303 }
5304
5305 void
5306 ipif_untrace_ref(ipif_t *ipif)
5307 {
5308 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
5309
5310 if (!ipif->ipif_trace_disable)
5311 th_trace_unref(ipif);
5312 }
5313
5314 void
5315 ill_trace_ref(ill_t *ill)
5316 {
5317 ASSERT(MUTEX_HELD(&ill->ill_lock));
5318
5319 if (ill->ill_trace_disable)
5320 return;
5321
5322 if (!th_trace_ref(ill, ill->ill_ipst)) {
5323 ill->ill_trace_disable = B_TRUE;
5324 ill_trace_cleanup(ill);
5325 }
5326 }
5327
5328 void
5329 ill_untrace_ref(ill_t *ill)
5330 {
5331 ASSERT(MUTEX_HELD(&ill->ill_lock));
5332
5333 if (!ill->ill_trace_disable)
5334 th_trace_unref(ill);
5335 }
5336
5337 /*
5338 * Called when ipif is unplumbed or when memory alloc fails. Note that on
5339 * failure, ipif_trace_disable is set.
5340 */
5341 static void
5342 ipif_trace_cleanup(const ipif_t *ipif)
5343 {
5344 th_trace_cleanup(ipif, ipif->ipif_trace_disable);
5345 }
5346
5347 /*
5348 * Called when ill is unplumbed or when memory alloc fails. Note that on
5349 * failure, ill_trace_disable is set.
5350 */
5351 static void
5352 ill_trace_cleanup(const ill_t *ill)
5353 {
5354 th_trace_cleanup(ill, ill->ill_trace_disable);
5355 }
5356 #endif /* DEBUG */
5357
5358 void
5359 ipif_refhold_locked(ipif_t *ipif)
5360 {
5361 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
5362 ipif->ipif_refcnt++;
5363 IPIF_TRACE_REF(ipif);
5364 }
5365
5366 void
5367 ipif_refhold(ipif_t *ipif)
5368 {
5369 ill_t *ill;
5370
5371 ill = ipif->ipif_ill;
5372 mutex_enter(&ill->ill_lock);
5373 ipif->ipif_refcnt++;
5374 IPIF_TRACE_REF(ipif);
5375 mutex_exit(&ill->ill_lock);
5376 }
5377
5378 /*
5379 * Must not be called while holding any locks. Otherwise if this is
5380 * the last reference to be released there is a chance of recursive mutex
5381 * panic due to ipif_refrele -> ipif_ill_refrele_tail -> qwriter_ip trying
5382 * to restart an ioctl.
5383 */
5384 void
5385 ipif_refrele(ipif_t *ipif)
5386 {
5387 ill_t *ill;
5388
5389 ill = ipif->ipif_ill;
5390
5391 mutex_enter(&ill->ill_lock);
5392 ASSERT(ipif->ipif_refcnt != 0);
5393 ipif->ipif_refcnt--;
5394 IPIF_UNTRACE_REF(ipif);
5395 if (ipif->ipif_refcnt != 0) {
5396 mutex_exit(&ill->ill_lock);
5397 return;
5398 }
5399
5400 /* Drops the ill_lock */
5401 ipif_ill_refrele_tail(ill);
5402 }
5403
5404 ipif_t *
5405 ipif_get_next_ipif(ipif_t *curr, ill_t *ill)
5406 {
5407 ipif_t *ipif;
5408
5409 mutex_enter(&ill->ill_lock);
5410 for (ipif = (curr == NULL ? ill->ill_ipif : curr->ipif_next);
5411 ipif != NULL; ipif = ipif->ipif_next) {
5412 if (IPIF_IS_CONDEMNED(ipif))
5413 continue;
5414 ipif_refhold_locked(ipif);
5415 mutex_exit(&ill->ill_lock);
5416 return (ipif);
5417 }
5418 mutex_exit(&ill->ill_lock);
5419 return (NULL);
5420 }
5421
5422 /*
5423 * TODO: make this table extendible at run time
5424 * Return a pointer to the mac type info for 'mac_type'
5425 */
5426 static ip_m_t *
5427 ip_m_lookup(t_uscalar_t mac_type)
5428 {
5429 ip_m_t *ipm;
5430
5431 for (ipm = ip_m_tbl; ipm < A_END(ip_m_tbl); ipm++)
5432 if (ipm->ip_m_mac_type == mac_type)
5433 return (ipm);
5434 return (NULL);
5435 }
5436
5437 /*
5438 * Make a link layer address from the multicast IP address *addr.
5439 * To form the link layer address, invoke the ip_m_v*mapping function
5440 * associated with the link-layer type.
5441 */
5442 void
5443 ip_mcast_mapping(ill_t *ill, uchar_t *addr, uchar_t *hwaddr)
5444 {
5445 ip_m_t *ipm;
5446
5447 if (ill->ill_net_type == IRE_IF_NORESOLVER)
5448 return;
5449
5450 ASSERT(addr != NULL);
5451
5452 ipm = ip_m_lookup(ill->ill_mactype);
5453 if (ipm == NULL ||
5454 (ill->ill_isv6 && ipm->ip_m_v6mapping == NULL) ||
5455 (!ill->ill_isv6 && ipm->ip_m_v4mapping == NULL)) {
5456 ip0dbg(("no mapping for ill %s mactype 0x%x\n",
5457 ill->ill_name, ill->ill_mactype));
5458 return;
5459 }
5460 if (ill->ill_isv6)
5461 (*ipm->ip_m_v6mapping)(ill, addr, hwaddr);
5462 else
5463 (*ipm->ip_m_v4mapping)(ill, addr, hwaddr);
5464 }
5465
5466 /*
5467 * Returns B_FALSE if the IPv4 netmask pointed by `mask' is non-contiguous.
5468 * Otherwise returns B_TRUE.
5469 *
5470 * The netmask can be verified to be contiguous with 32 shifts and or
5471 * operations. Take the contiguous mask (in host byte order) and compute
5472 * mask | mask << 1 | mask << 2 | ... | mask << 31
5473 * the result will be the same as the 'mask' for contiguous mask.
5474 */
5475 static boolean_t
5476 ip_contiguous_mask(uint32_t mask)
5477 {
5478 uint32_t m = mask;
5479 int i;
5480
5481 for (i = 1; i < 32; i++)
5482 m |= (mask << i);
5483
5484 return (m == mask);
5485 }
5486
5487 /*
5488 * ip_rt_add is called to add an IPv4 route to the forwarding table.
5489 * ill is passed in to associate it with the correct interface.
5490 * If ire_arg is set, then we return the held IRE in that location.
5491 */
5492 int
5493 ip_rt_add(ipaddr_t dst_addr, ipaddr_t mask, ipaddr_t gw_addr,
5494 ipaddr_t src_addr, int flags, ill_t *ill, ire_t **ire_arg,
5495 boolean_t ioctl_msg, struct rtsa_s *sp, ip_stack_t *ipst, zoneid_t zoneid)
5496 {
5497 ire_t *ire, *nire;
5498 ire_t *gw_ire = NULL;
5499 ipif_t *ipif = NULL;
5500 uint_t type;
5501 int match_flags = MATCH_IRE_TYPE;
5502 tsol_gc_t *gc = NULL;
5503 tsol_gcgrp_t *gcgrp = NULL;
5504 boolean_t gcgrp_xtraref = B_FALSE;
5505 boolean_t cgtp_broadcast;
5506 boolean_t unbound = B_FALSE;
5507
5508 ip1dbg(("ip_rt_add:"));
5509
5510 if (ire_arg != NULL)
5511 *ire_arg = NULL;
5512
5513 /* disallow non-contiguous netmasks */
5514 if (!ip_contiguous_mask(ntohl(mask)))
5515 return (ENOTSUP);
5516
5517 /*
5518 * If this is the case of RTF_HOST being set, then we set the netmask
5519 * to all ones (regardless if one was supplied).
5520 */
5521 if (flags & RTF_HOST)
5522 mask = IP_HOST_MASK;
5523
5524 /*
5525 * Prevent routes with a zero gateway from being created (since
5526 * interfaces can currently be plumbed and brought up no assigned
5527 * address).
5528 */
5529 if (gw_addr == 0)
5530 return (ENETUNREACH);
5531 /*
5532 * Get the ipif, if any, corresponding to the gw_addr
5533 * If -ifp was specified we restrict ourselves to the ill, otherwise
5534 * we match on the gatway and destination to handle unnumbered pt-pt
5535 * interfaces.
5536 */
5537 if (ill != NULL)
5538 ipif = ipif_lookup_addr(gw_addr, ill, ALL_ZONES, ipst);
5539 else
5540 ipif = ipif_lookup_interface(gw_addr, dst_addr, ipst);
5541 if (ipif != NULL) {
5542 if (IS_VNI(ipif->ipif_ill)) {
5543 ipif_refrele(ipif);
5544 return (EINVAL);
5545 }
5546 }
5547
5548 /*
5549 * GateD will attempt to create routes with a loopback interface
5550 * address as the gateway and with RTF_GATEWAY set. We allow
5551 * these routes to be added, but create them as interface routes
5552 * since the gateway is an interface address.
5553 */
5554 if ((ipif != NULL) && (ipif->ipif_ire_type == IRE_LOOPBACK)) {
5555 flags &= ~RTF_GATEWAY;
5556 if (gw_addr == INADDR_LOOPBACK && dst_addr == INADDR_LOOPBACK &&
5557 mask == IP_HOST_MASK) {
5558 ire = ire_ftable_lookup_v4(dst_addr, 0, 0, IRE_LOOPBACK,
5559 NULL, ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst,
5560 NULL);
5561 if (ire != NULL) {
5562 ire_refrele(ire);
5563 ipif_refrele(ipif);
5564 return (EEXIST);
5565 }
5566 ip1dbg(("ip_rt_add: 0x%p creating IRE 0x%x"
5567 "for 0x%x\n", (void *)ipif,
5568 ipif->ipif_ire_type,
5569 ntohl(ipif->ipif_lcl_addr)));
5570 ire = ire_create(
5571 (uchar_t *)&dst_addr, /* dest address */
5572 (uchar_t *)&mask, /* mask */
5573 NULL, /* no gateway */
5574 ipif->ipif_ire_type, /* LOOPBACK */
5575 ipif->ipif_ill,
5576 zoneid,
5577 (ipif->ipif_flags & IPIF_PRIVATE) ? RTF_PRIVATE : 0,
5578 NULL,
5579 ipst);
5580
5581 if (ire == NULL) {
5582 ipif_refrele(ipif);
5583 return (ENOMEM);
5584 }
5585 /* src address assigned by the caller? */
5586 if ((src_addr != INADDR_ANY) && (flags & RTF_SETSRC))
5587 ire->ire_setsrc_addr = src_addr;
5588
5589 nire = ire_add(ire);
5590 if (nire == NULL) {
5591 /*
5592 * In the result of failure, ire_add() will have
5593 * already deleted the ire in question, so there
5594 * is no need to do that here.
5595 */
5596 ipif_refrele(ipif);
5597 return (ENOMEM);
5598 }
5599 /*
5600 * Check if it was a duplicate entry. This handles
5601 * the case of two racing route adds for the same route
5602 */
5603 if (nire != ire) {
5604 ASSERT(nire->ire_identical_ref > 1);
5605 ire_delete(nire);
5606 ire_refrele(nire);
5607 ipif_refrele(ipif);
5608 return (EEXIST);
5609 }
5610 ire = nire;
5611 goto save_ire;
5612 }
5613 }
5614
5615 /*
5616 * The routes for multicast with CGTP are quite special in that
5617 * the gateway is the local interface address, yet RTF_GATEWAY
5618 * is set. We turn off RTF_GATEWAY to provide compatibility with
5619 * this undocumented and unusual use of multicast routes.
5620 */
5621 if ((flags & RTF_MULTIRT) && ipif != NULL)
5622 flags &= ~RTF_GATEWAY;
5623
5624 /*
5625 * Traditionally, interface routes are ones where RTF_GATEWAY isn't set
5626 * and the gateway address provided is one of the system's interface
5627 * addresses. By using the routing socket interface and supplying an
5628 * RTA_IFP sockaddr with an interface index, an alternate method of
5629 * specifying an interface route to be created is available which uses
5630 * the interface index that specifies the outgoing interface rather than
5631 * the address of an outgoing interface (which may not be able to
5632 * uniquely identify an interface). When coupled with the RTF_GATEWAY
5633 * flag, routes can be specified which not only specify the next-hop to
5634 * be used when routing to a certain prefix, but also which outgoing
5635 * interface should be used.
5636 *
5637 * Previously, interfaces would have unique addresses assigned to them
5638 * and so the address assigned to a particular interface could be used
5639 * to identify a particular interface. One exception to this was the
5640 * case of an unnumbered interface (where IPIF_UNNUMBERED was set).
5641 *
5642 * With the advent of IPv6 and its link-local addresses, this
5643 * restriction was relaxed and interfaces could share addresses between
5644 * themselves. In fact, typically all of the link-local interfaces on
5645 * an IPv6 node or router will have the same link-local address. In
5646 * order to differentiate between these interfaces, the use of an
5647 * interface index is necessary and this index can be carried inside a
5648 * RTA_IFP sockaddr (which is actually a sockaddr_dl). One restriction
5649 * of using the interface index, however, is that all of the ipif's that
5650 * are part of an ill have the same index and so the RTA_IFP sockaddr
5651 * cannot be used to differentiate between ipif's (or logical
5652 * interfaces) that belong to the same ill (physical interface).
5653 *
5654 * For example, in the following case involving IPv4 interfaces and
5655 * logical interfaces
5656 *
5657 * 192.0.2.32 255.255.255.224 192.0.2.33 U if0
5658 * 192.0.2.32 255.255.255.224 192.0.2.34 U if0
5659 * 192.0.2.32 255.255.255.224 192.0.2.35 U if0
5660 *
5661 * the ipif's corresponding to each of these interface routes can be
5662 * uniquely identified by the "gateway" (actually interface address).
5663 *
5664 * In this case involving multiple IPv6 default routes to a particular
5665 * link-local gateway, the use of RTA_IFP is necessary to specify which
5666 * default route is of interest:
5667 *
5668 * default fe80::123:4567:89ab:cdef U if0
5669 * default fe80::123:4567:89ab:cdef U if1
5670 */
5671
5672 /* RTF_GATEWAY not set */
5673 if (!(flags & RTF_GATEWAY)) {
5674 if (sp != NULL) {
5675 ip2dbg(("ip_rt_add: gateway security attributes "
5676 "cannot be set with interface route\n"));
5677 if (ipif != NULL)
5678 ipif_refrele(ipif);
5679 return (EINVAL);
5680 }
5681
5682 /*
5683 * Whether or not ill (RTA_IFP) is set, we require that
5684 * the gateway is one of our local addresses.
5685 */
5686 if (ipif == NULL)
5687 return (ENETUNREACH);
5688
5689 /*
5690 * We use MATCH_IRE_ILL here. If the caller specified an
5691 * interface (from the RTA_IFP sockaddr) we use it, otherwise
5692 * we use the ill derived from the gateway address.
5693 * We can always match the gateway address since we record it
5694 * in ire_gateway_addr.
5695 * We don't allow RTA_IFP to specify a different ill than the
5696 * one matching the ipif to make sure we can delete the route.
5697 */
5698 match_flags |= MATCH_IRE_GW | MATCH_IRE_ILL;
5699 if (ill == NULL) {
5700 ill = ipif->ipif_ill;
5701 } else if (ill != ipif->ipif_ill) {
5702 ipif_refrele(ipif);
5703 return (EINVAL);
5704 }
5705
5706 /*
5707 * We check for an existing entry at this point.
5708 *
5709 * Since a netmask isn't passed in via the ioctl interface
5710 * (SIOCADDRT), we don't check for a matching netmask in that
5711 * case.
5712 */
5713 if (!ioctl_msg)
5714 match_flags |= MATCH_IRE_MASK;
5715 ire = ire_ftable_lookup_v4(dst_addr, mask, gw_addr,
5716 IRE_INTERFACE, ill, ALL_ZONES, NULL, match_flags, 0, ipst,
5717 NULL);
5718 if (ire != NULL) {
5719 ire_refrele(ire);
5720 ipif_refrele(ipif);
5721 return (EEXIST);
5722 }
5723
5724 /*
5725 * Some software (for example, GateD and Sun Cluster) attempts
5726 * to create (what amount to) IRE_PREFIX routes with the
5727 * loopback address as the gateway. This is primarily done to
5728 * set up prefixes with the RTF_REJECT flag set (for example,
5729 * when generating aggregate routes.)
5730 *
5731 * If the IRE type (as defined by ill->ill_net_type) would be
5732 * IRE_LOOPBACK, then we map the request into a
5733 * IRE_IF_NORESOLVER. We also OR in the RTF_BLACKHOLE flag as
5734 * these interface routes, by definition, can only be that.
5735 *
5736 * Needless to say, the real IRE_LOOPBACK is NOT created by this
5737 * routine, but rather using ire_create() directly.
5738 *
5739 */
5740 type = ill->ill_net_type;
5741 if (type == IRE_LOOPBACK) {
5742 type = IRE_IF_NORESOLVER;
5743 flags |= RTF_BLACKHOLE;
5744 }
5745
5746 /*
5747 * Create a copy of the IRE_IF_NORESOLVER or
5748 * IRE_IF_RESOLVER with the modified address, netmask, and
5749 * gateway.
5750 */
5751 ire = ire_create(
5752 (uchar_t *)&dst_addr,
5753 (uint8_t *)&mask,
5754 (uint8_t *)&gw_addr,
5755 type,
5756 ill,
5757 zoneid,
5758 flags,
5759 NULL,
5760 ipst);
5761 if (ire == NULL) {
5762 ipif_refrele(ipif);
5763 return (ENOMEM);
5764 }
5765
5766 /* src address assigned by the caller? */
5767 if ((src_addr != INADDR_ANY) && (flags & RTF_SETSRC))
5768 ire->ire_setsrc_addr = src_addr;
5769
5770 nire = ire_add(ire);
5771 if (nire == NULL) {
5772 /*
5773 * In the result of failure, ire_add() will have
5774 * already deleted the ire in question, so there
5775 * is no need to do that here.
5776 */
5777 ipif_refrele(ipif);
5778 return (ENOMEM);
5779 }
5780 /*
5781 * Check if it was a duplicate entry. This handles
5782 * the case of two racing route adds for the same route
5783 */
5784 if (nire != ire) {
5785 ire_delete(nire);
5786 ire_refrele(nire);
5787 ipif_refrele(ipif);
5788 return (EEXIST);
5789 }
5790 ire = nire;
5791 goto save_ire;
5792 }
5793
5794 /*
5795 * Get an interface IRE for the specified gateway.
5796 * If we don't have an IRE_IF_NORESOLVER or IRE_IF_RESOLVER for the
5797 * gateway, it is currently unreachable and we fail the request
5798 * accordingly. We reject any RTF_GATEWAY routes where the gateway
5799 * is an IRE_LOCAL or IRE_LOOPBACK.
5800 * If RTA_IFP was specified we look on that particular ill.
5801 */
5802 if (ill != NULL)
5803 match_flags |= MATCH_IRE_ILL;
5804
5805 /* Check whether the gateway is reachable. */
5806 again:
5807 type = IRE_INTERFACE | IRE_LOCAL | IRE_LOOPBACK;
5808 if (flags & RTF_INDIRECT)
5809 type |= IRE_OFFLINK;
5810
5811 gw_ire = ire_ftable_lookup_v4(gw_addr, 0, 0, type, ill,
5812 ALL_ZONES, NULL, match_flags, 0, ipst, NULL);
5813 if (gw_ire == NULL) {
5814 /*
5815 * With IPMP, we allow host routes to influence in.mpathd's
5816 * target selection. However, if the test addresses are on
5817 * their own network, the above lookup will fail since the
5818 * underlying IRE_INTERFACEs are marked hidden. So allow
5819 * hidden test IREs to be found and try again.
5820 */
5821 if (!(match_flags & MATCH_IRE_TESTHIDDEN)) {
5822 match_flags |= MATCH_IRE_TESTHIDDEN;
5823 goto again;
5824 }
5825 if (ipif != NULL)
5826 ipif_refrele(ipif);
5827 return (ENETUNREACH);
5828 }
5829 if (gw_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) {
5830 ire_refrele(gw_ire);
5831 if (ipif != NULL)
5832 ipif_refrele(ipif);
5833 return (ENETUNREACH);
5834 }
5835
5836 if (ill == NULL && !(flags & RTF_INDIRECT)) {
5837 unbound = B_TRUE;
5838 if (ipst->ips_ip_strict_src_multihoming > 0)
5839 ill = gw_ire->ire_ill;
5840 }
5841
5842 /*
5843 * We create one of three types of IREs as a result of this request
5844 * based on the netmask. A netmask of all ones (which is automatically
5845 * assumed when RTF_HOST is set) results in an IRE_HOST being created.
5846 * An all zeroes netmask implies a default route so an IRE_DEFAULT is
5847 * created. Otherwise, an IRE_PREFIX route is created for the
5848 * destination prefix.
5849 */
5850 if (mask == IP_HOST_MASK)
5851 type = IRE_HOST;
5852 else if (mask == 0)
5853 type = IRE_DEFAULT;
5854 else
5855 type = IRE_PREFIX;
5856
5857 /* check for a duplicate entry */
5858 ire = ire_ftable_lookup_v4(dst_addr, mask, gw_addr, type, ill,
5859 ALL_ZONES, NULL, match_flags | MATCH_IRE_MASK | MATCH_IRE_GW,
5860 0, ipst, NULL);
5861 if (ire != NULL) {
5862 if (ipif != NULL)
5863 ipif_refrele(ipif);
5864 ire_refrele(gw_ire);
5865 ire_refrele(ire);
5866 return (EEXIST);
5867 }
5868
5869 /* Security attribute exists */
5870 if (sp != NULL) {
5871 tsol_gcgrp_addr_t ga;
5872
5873 /* find or create the gateway credentials group */
5874 ga.ga_af = AF_INET;
5875 IN6_IPADDR_TO_V4MAPPED(gw_addr, &ga.ga_addr);
5876
5877 /* we hold reference to it upon success */
5878 gcgrp = gcgrp_lookup(&ga, B_TRUE);
5879 if (gcgrp == NULL) {
5880 if (ipif != NULL)
5881 ipif_refrele(ipif);
5882 ire_refrele(gw_ire);
5883 return (ENOMEM);
5884 }
5885
5886 /*
5887 * Create and add the security attribute to the group; a
5888 * reference to the group is made upon allocating a new
5889 * entry successfully. If it finds an already-existing
5890 * entry for the security attribute in the group, it simply
5891 * returns it and no new reference is made to the group.
5892 */
5893 gc = gc_create(sp, gcgrp, &gcgrp_xtraref);
5894 if (gc == NULL) {
5895 if (ipif != NULL)
5896 ipif_refrele(ipif);
5897 /* release reference held by gcgrp_lookup */
5898 GCGRP_REFRELE(gcgrp);
5899 ire_refrele(gw_ire);
5900 return (ENOMEM);
5901 }
5902 }
5903
5904 /* Create the IRE. */
5905 ire = ire_create(
5906 (uchar_t *)&dst_addr, /* dest address */
5907 (uchar_t *)&mask, /* mask */
5908 (uchar_t *)&gw_addr, /* gateway address */
5909 (ushort_t)type, /* IRE type */
5910 ill,
5911 zoneid,
5912 flags,
5913 gc, /* security attribute */
5914 ipst);
5915
5916 /*
5917 * The ire holds a reference to the 'gc' and the 'gc' holds a
5918 * reference to the 'gcgrp'. We can now release the extra reference
5919 * the 'gcgrp' acquired in the gcgrp_lookup, if it was not used.
5920 */
5921 if (gcgrp_xtraref)
5922 GCGRP_REFRELE(gcgrp);
5923 if (ire == NULL) {
5924 if (gc != NULL)
5925 GC_REFRELE(gc);
5926 if (ipif != NULL)
5927 ipif_refrele(ipif);
5928 ire_refrele(gw_ire);
5929 return (ENOMEM);
5930 }
5931
5932 /* Before we add, check if an extra CGTP broadcast is needed */
5933 cgtp_broadcast = ((flags & RTF_MULTIRT) &&
5934 ip_type_v4(ire->ire_addr, ipst) == IRE_BROADCAST);
5935
5936 /* src address assigned by the caller? */
5937 if ((src_addr != INADDR_ANY) && (flags & RTF_SETSRC))
5938 ire->ire_setsrc_addr = src_addr;
5939
5940 ire->ire_unbound = unbound;
5941
5942 /*
5943 * POLICY: should we allow an RTF_HOST with address INADDR_ANY?
5944 * SUN/OS socket stuff does but do we really want to allow 0.0.0.0?
5945 */
5946
5947 /* Add the new IRE. */
5948 nire = ire_add(ire);
5949 if (nire == NULL) {
5950 /*
5951 * In the result of failure, ire_add() will have
5952 * already deleted the ire in question, so there
5953 * is no need to do that here.
5954 */
5955 if (ipif != NULL)
5956 ipif_refrele(ipif);
5957 ire_refrele(gw_ire);
5958 return (ENOMEM);
5959 }
5960 /*
5961 * Check if it was a duplicate entry. This handles
5962 * the case of two racing route adds for the same route
5963 */
5964 if (nire != ire) {
5965 ire_delete(nire);
5966 ire_refrele(nire);
5967 if (ipif != NULL)
5968 ipif_refrele(ipif);
5969 ire_refrele(gw_ire);
5970 return (EEXIST);
5971 }
5972 ire = nire;
5973
5974 if (flags & RTF_MULTIRT) {
5975 /*
5976 * Invoke the CGTP (multirouting) filtering module
5977 * to add the dst address in the filtering database.
5978 * Replicated inbound packets coming from that address
5979 * will be filtered to discard the duplicates.
5980 * It is not necessary to call the CGTP filter hook
5981 * when the dst address is a broadcast or multicast,
5982 * because an IP source address cannot be a broadcast
5983 * or a multicast.
5984 */
5985 if (cgtp_broadcast) {
5986 ip_cgtp_bcast_add(ire, ipst);
5987 goto save_ire;
5988 }
5989 if (ipst->ips_ip_cgtp_filter_ops != NULL &&
5990 !CLASSD(ire->ire_addr)) {
5991 int res;
5992 ipif_t *src_ipif;
5993
5994 /* Find the source address corresponding to gw_ire */
5995 src_ipif = ipif_lookup_addr(gw_ire->ire_gateway_addr,
5996 NULL, zoneid, ipst);
5997 if (src_ipif != NULL) {
5998 res = ipst->ips_ip_cgtp_filter_ops->
5999 cfo_add_dest_v4(
6000 ipst->ips_netstack->netstack_stackid,
6001 ire->ire_addr,
6002 ire->ire_gateway_addr,
6003 ire->ire_setsrc_addr,
6004 src_ipif->ipif_lcl_addr);
6005 ipif_refrele(src_ipif);
6006 } else {
6007 res = EADDRNOTAVAIL;
6008 }
6009 if (res != 0) {
6010 if (ipif != NULL)
6011 ipif_refrele(ipif);
6012 ire_refrele(gw_ire);
6013 ire_delete(ire);
6014 ire_refrele(ire); /* Held in ire_add */
6015 return (res);
6016 }
6017 }
6018 }
6019
6020 save_ire:
6021 if (gw_ire != NULL) {
6022 ire_refrele(gw_ire);
6023 gw_ire = NULL;
6024 }
6025 if (ill != NULL) {
6026 /*
6027 * Save enough information so that we can recreate the IRE if
6028 * the interface goes down and then up. The metrics associated
6029 * with the route will be saved as well when rts_setmetrics() is
6030 * called after the IRE has been created. In the case where
6031 * memory cannot be allocated, none of this information will be
6032 * saved.
6033 */
6034 ill_save_ire(ill, ire);
6035 }
6036 if (ioctl_msg)
6037 ip_rts_rtmsg(RTM_OLDADD, ire, 0, ipst);
6038 if (ire_arg != NULL) {
6039 /*
6040 * Store the ire that was successfully added into where ire_arg
6041 * points to so that callers don't have to look it up
6042 * themselves (but they are responsible for ire_refrele()ing
6043 * the ire when they are finished with it).
6044 */
6045 *ire_arg = ire;
6046 } else {
6047 ire_refrele(ire); /* Held in ire_add */
6048 }
6049 if (ipif != NULL)
6050 ipif_refrele(ipif);
6051 return (0);
6052 }
6053
6054 /*
6055 * ip_rt_delete is called to delete an IPv4 route.
6056 * ill is passed in to associate it with the correct interface.
6057 */
6058 /* ARGSUSED4 */
6059 int
6060 ip_rt_delete(ipaddr_t dst_addr, ipaddr_t mask, ipaddr_t gw_addr,
6061 uint_t rtm_addrs, int flags, ill_t *ill, boolean_t ioctl_msg,
6062 ip_stack_t *ipst, zoneid_t zoneid)
6063 {
6064 ire_t *ire = NULL;
6065 ipif_t *ipif;
6066 uint_t type;
6067 uint_t match_flags = MATCH_IRE_TYPE;
6068 int err = 0;
6069
6070 ip1dbg(("ip_rt_delete:"));
6071 /*
6072 * If this is the case of RTF_HOST being set, then we set the netmask
6073 * to all ones. Otherwise, we use the netmask if one was supplied.
6074 */
6075 if (flags & RTF_HOST) {
6076 mask = IP_HOST_MASK;
6077 match_flags |= MATCH_IRE_MASK;
6078 } else if (rtm_addrs & RTA_NETMASK) {
6079 match_flags |= MATCH_IRE_MASK;
6080 }
6081
6082 /*
6083 * Note that RTF_GATEWAY is never set on a delete, therefore
6084 * we check if the gateway address is one of our interfaces first,
6085 * and fall back on RTF_GATEWAY routes.
6086 *
6087 * This makes it possible to delete an original
6088 * IRE_IF_NORESOLVER/IRE_IF_RESOLVER - consistent with SunOS 4.1.
6089 * However, we have RTF_KERNEL set on the ones created by ipif_up
6090 * and those can not be deleted here.
6091 *
6092 * We use MATCH_IRE_ILL if we know the interface. If the caller
6093 * specified an interface (from the RTA_IFP sockaddr) we use it,
6094 * otherwise we use the ill derived from the gateway address.
6095 * We can always match the gateway address since we record it
6096 * in ire_gateway_addr.
6097 *
6098 * For more detail on specifying routes by gateway address and by
6099 * interface index, see the comments in ip_rt_add().
6100 */
6101 ipif = ipif_lookup_interface(gw_addr, dst_addr, ipst);
6102 if (ipif != NULL) {
6103 ill_t *ill_match;
6104
6105 if (ill != NULL)
6106 ill_match = ill;
6107 else
6108 ill_match = ipif->ipif_ill;
6109
6110 match_flags |= MATCH_IRE_ILL;
6111 if (ipif->ipif_ire_type == IRE_LOOPBACK) {
6112 ire = ire_ftable_lookup_v4(dst_addr, mask, 0,
6113 IRE_LOOPBACK, ill_match, ALL_ZONES, NULL,
6114 match_flags, 0, ipst, NULL);
6115 }
6116 if (ire == NULL) {
6117 match_flags |= MATCH_IRE_GW;
6118 ire = ire_ftable_lookup_v4(dst_addr, mask, gw_addr,
6119 IRE_INTERFACE, ill_match, ALL_ZONES, NULL,
6120 match_flags, 0, ipst, NULL);
6121 }
6122 /* Avoid deleting routes created by kernel from an ipif */
6123 if (ire != NULL && (ire->ire_flags & RTF_KERNEL)) {
6124 ire_refrele(ire);
6125 ire = NULL;
6126 }
6127
6128 /* Restore in case we didn't find a match */
6129 match_flags &= ~(MATCH_IRE_GW|MATCH_IRE_ILL);
6130 }
6131
6132 if (ire == NULL) {
6133 /*
6134 * At this point, the gateway address is not one of our own
6135 * addresses or a matching interface route was not found. We
6136 * set the IRE type to lookup based on whether
6137 * this is a host route, a default route or just a prefix.
6138 *
6139 * If an ill was passed in, then the lookup is based on an
6140 * interface index so MATCH_IRE_ILL is added to match_flags.
6141 */
6142 match_flags |= MATCH_IRE_GW;
6143 if (ill != NULL)
6144 match_flags |= MATCH_IRE_ILL;
6145 if (mask == IP_HOST_MASK)
6146 type = IRE_HOST;
6147 else if (mask == 0)
6148 type = IRE_DEFAULT;
6149 else
6150 type = IRE_PREFIX;
6151 ire = ire_ftable_lookup_v4(dst_addr, mask, gw_addr, type, ill,
6152 ALL_ZONES, NULL, match_flags, 0, ipst, NULL);
6153 }
6154
6155 if (ipif != NULL) {
6156 ipif_refrele(ipif);
6157 ipif = NULL;
6158 }
6159
6160 if (ire == NULL)
6161 return (ESRCH);
6162
6163 if (ire->ire_flags & RTF_MULTIRT) {
6164 /*
6165 * Invoke the CGTP (multirouting) filtering module
6166 * to remove the dst address from the filtering database.
6167 * Packets coming from that address will no longer be
6168 * filtered to remove duplicates.
6169 */
6170 if (ipst->ips_ip_cgtp_filter_ops != NULL) {
6171 err = ipst->ips_ip_cgtp_filter_ops->cfo_del_dest_v4(
6172 ipst->ips_netstack->netstack_stackid,
6173 ire->ire_addr, ire->ire_gateway_addr);
6174 }
6175 ip_cgtp_bcast_delete(ire, ipst);
6176 }
6177
6178 ill = ire->ire_ill;
6179 if (ill != NULL)
6180 ill_remove_saved_ire(ill, ire);
6181 if (ioctl_msg)
6182 ip_rts_rtmsg(RTM_OLDDEL, ire, 0, ipst);
6183 ire_delete(ire);
6184 ire_refrele(ire);
6185 return (err);
6186 }
6187
6188 /*
6189 * ip_siocaddrt is called to complete processing of an SIOCADDRT IOCTL.
6190 */
6191 /* ARGSUSED */
6192 int
6193 ip_siocaddrt(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
6194 ip_ioctl_cmd_t *ipip, void *dummy_if_req)
6195 {
6196 ipaddr_t dst_addr;
6197 ipaddr_t gw_addr;
6198 ipaddr_t mask;
6199 int error = 0;
6200 mblk_t *mp1;
6201 struct rtentry *rt;
6202 ipif_t *ipif = NULL;
6203 ip_stack_t *ipst;
6204
6205 ASSERT(q->q_next == NULL);
6206 ipst = CONNQ_TO_IPST(q);
6207
6208 ip1dbg(("ip_siocaddrt:"));
6209 /* Existence of mp1 verified in ip_wput_nondata */
6210 mp1 = mp->b_cont->b_cont;
6211 rt = (struct rtentry *)mp1->b_rptr;
6212
6213 dst_addr = ((sin_t *)&rt->rt_dst)->sin_addr.s_addr;
6214 gw_addr = ((sin_t *)&rt->rt_gateway)->sin_addr.s_addr;
6215
6216 /*
6217 * If the RTF_HOST flag is on, this is a request to assign a gateway
6218 * to a particular host address. In this case, we set the netmask to
6219 * all ones for the particular destination address. Otherwise,
6220 * determine the netmask to be used based on dst_addr and the interfaces
6221 * in use.
6222 */
6223 if (rt->rt_flags & RTF_HOST) {
6224 mask = IP_HOST_MASK;
6225 } else {
6226 /*
6227 * Note that ip_subnet_mask returns a zero mask in the case of
6228 * default (an all-zeroes address).
6229 */
6230 mask = ip_subnet_mask(dst_addr, &ipif, ipst);
6231 }
6232
6233 error = ip_rt_add(dst_addr, mask, gw_addr, 0, rt->rt_flags, NULL, NULL,
6234 B_TRUE, NULL, ipst, ALL_ZONES);
6235 if (ipif != NULL)
6236 ipif_refrele(ipif);
6237 return (error);
6238 }
6239
6240 /*
6241 * ip_siocdelrt is called to complete processing of an SIOCDELRT IOCTL.
6242 */
6243 /* ARGSUSED */
6244 int
6245 ip_siocdelrt(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
6246 ip_ioctl_cmd_t *ipip, void *dummy_if_req)
6247 {
6248 ipaddr_t dst_addr;
6249 ipaddr_t gw_addr;
6250 ipaddr_t mask;
6251 int error;
6252 mblk_t *mp1;
6253 struct rtentry *rt;
6254 ipif_t *ipif = NULL;
6255 ip_stack_t *ipst;
6256
6257 ASSERT(q->q_next == NULL);
6258 ipst = CONNQ_TO_IPST(q);
6259
6260 ip1dbg(("ip_siocdelrt:"));
6261 /* Existence of mp1 verified in ip_wput_nondata */
6262 mp1 = mp->b_cont->b_cont;
6263 rt = (struct rtentry *)mp1->b_rptr;
6264
6265 dst_addr = ((sin_t *)&rt->rt_dst)->sin_addr.s_addr;
6266 gw_addr = ((sin_t *)&rt->rt_gateway)->sin_addr.s_addr;
6267
6268 /*
6269 * If the RTF_HOST flag is on, this is a request to delete a gateway
6270 * to a particular host address. In this case, we set the netmask to
6271 * all ones for the particular destination address. Otherwise,
6272 * determine the netmask to be used based on dst_addr and the interfaces
6273 * in use.
6274 */
6275 if (rt->rt_flags & RTF_HOST) {
6276 mask = IP_HOST_MASK;
6277 } else {
6278 /*
6279 * Note that ip_subnet_mask returns a zero mask in the case of
6280 * default (an all-zeroes address).
6281 */
6282 mask = ip_subnet_mask(dst_addr, &ipif, ipst);
6283 }
6284
6285 error = ip_rt_delete(dst_addr, mask, gw_addr,
6286 RTA_DST | RTA_GATEWAY | RTA_NETMASK, rt->rt_flags, NULL, B_TRUE,
6287 ipst, ALL_ZONES);
6288 if (ipif != NULL)
6289 ipif_refrele(ipif);
6290 return (error);
6291 }
6292
6293 /*
6294 * Enqueue the mp onto the ipsq, chained by b_next.
6295 * b_prev stores the function to be executed later, and b_queue the queue
6296 * where this mp originated.
6297 */
6298 void
6299 ipsq_enq(ipsq_t *ipsq, queue_t *q, mblk_t *mp, ipsq_func_t func, int type,
6300 ill_t *pending_ill)
6301 {
6302 conn_t *connp;
6303 ipxop_t *ipx = ipsq->ipsq_xop;
6304
6305 ASSERT(MUTEX_HELD(&ipsq->ipsq_lock));
6306 ASSERT(MUTEX_HELD(&ipx->ipx_lock));
6307 ASSERT(func != NULL);
6308
6309 mp->b_queue = q;
6310 mp->b_prev = (void *)func;
6311 mp->b_next = NULL;
6312
6313 switch (type) {
6314 case CUR_OP:
6315 if (ipx->ipx_mptail != NULL) {
6316 ASSERT(ipx->ipx_mphead != NULL);
6317 ipx->ipx_mptail->b_next = mp;
6318 } else {
6319 ASSERT(ipx->ipx_mphead == NULL);
6320 ipx->ipx_mphead = mp;
6321 }
6322 ipx->ipx_mptail = mp;
6323 break;
6324
6325 case NEW_OP:
6326 if (ipsq->ipsq_xopq_mptail != NULL) {
6327 ASSERT(ipsq->ipsq_xopq_mphead != NULL);
6328 ipsq->ipsq_xopq_mptail->b_next = mp;
6329 } else {
6330 ASSERT(ipsq->ipsq_xopq_mphead == NULL);
6331 ipsq->ipsq_xopq_mphead = mp;
6332 }
6333 ipsq->ipsq_xopq_mptail = mp;
6334 ipx->ipx_ipsq_queued = B_TRUE;
6335 break;
6336
6337 case SWITCH_OP:
6338 ASSERT(ipsq->ipsq_swxop != NULL);
6339 /* only one switch operation is currently allowed */
6340 ASSERT(ipsq->ipsq_switch_mp == NULL);
6341 ipsq->ipsq_switch_mp = mp;
6342 ipx->ipx_ipsq_queued = B_TRUE;
6343 break;
6344 default:
6345 cmn_err(CE_PANIC, "ipsq_enq %d type \n", type);
6346 }
6347
6348 if (CONN_Q(q) && pending_ill != NULL) {
6349 connp = Q_TO_CONN(q);
6350 ASSERT(MUTEX_HELD(&connp->conn_lock));
6351 connp->conn_oper_pending_ill = pending_ill;
6352 }
6353 }
6354
6355 /*
6356 * Dequeue the next message that requested exclusive access to this IPSQ's
6357 * xop. Specifically:
6358 *
6359 * 1. If we're still processing the current operation on `ipsq', then
6360 * dequeue the next message for the operation (from ipx_mphead), or
6361 * return NULL if there are no queued messages for the operation.
6362 * These messages are queued via CUR_OP to qwriter_ip() and friends.
6363 *
6364 * 2. If the current operation on `ipsq' has completed (ipx_current_ipif is
6365 * not set) see if the ipsq has requested an xop switch. If so, switch
6366 * `ipsq' to a different xop. Xop switches only happen when joining or
6367 * leaving IPMP groups and require a careful dance -- see the comments
6368 * in-line below for details. If we're leaving a group xop or if we're
6369 * joining a group xop and become writer on it, then we proceed to (3).
6370 * Otherwise, we return NULL and exit the xop.
6371 *
6372 * 3. For each IPSQ in the xop, return any switch operation stored on
6373 * ipsq_switch_mp (set via SWITCH_OP); these must be processed before
6374 * any other messages queued on the IPSQ. Otherwise, dequeue the next
6375 * exclusive operation (queued via NEW_OP) stored on ipsq_xopq_mphead.
6376 * Note that if the phyint tied to `ipsq' is not using IPMP there will
6377 * only be one IPSQ in the xop. Otherwise, there will be one IPSQ for
6378 * each phyint in the group, including the IPMP meta-interface phyint.
6379 */
6380 static mblk_t *
6381 ipsq_dq(ipsq_t *ipsq)
6382 {
6383 ill_t *illv4, *illv6;
6384 mblk_t *mp;
6385 ipsq_t *xopipsq;
6386 ipsq_t *leftipsq = NULL;
6387 ipxop_t *ipx;
6388 phyint_t *phyi = ipsq->ipsq_phyint;
6389 ip_stack_t *ipst = ipsq->ipsq_ipst;
6390 boolean_t emptied = B_FALSE;
6391
6392 /*
6393 * Grab all the locks we need in the defined order (ill_g_lock ->
6394 * ipsq_lock -> ipx_lock); ill_g_lock is needed to use ipsq_next.
6395 */
6396 rw_enter(&ipst->ips_ill_g_lock,
6397 ipsq->ipsq_swxop != NULL ? RW_WRITER : RW_READER);
6398 mutex_enter(&ipsq->ipsq_lock);
6399 ipx = ipsq->ipsq_xop;
6400 mutex_enter(&ipx->ipx_lock);
6401
6402 /*
6403 * Dequeue the next message associated with the current exclusive
6404 * operation, if any.
6405 */
6406 if ((mp = ipx->ipx_mphead) != NULL) {
6407 ipx->ipx_mphead = mp->b_next;
6408 if (ipx->ipx_mphead == NULL)
6409 ipx->ipx_mptail = NULL;
6410 mp->b_next = (void *)ipsq;
6411 goto out;
6412 }
6413
6414 if (ipx->ipx_current_ipif != NULL)
6415 goto empty;
6416
6417 if (ipsq->ipsq_swxop != NULL) {
6418 /*
6419 * The exclusive operation that is now being completed has
6420 * requested a switch to a different xop. This happens
6421 * when an interface joins or leaves an IPMP group. Joins
6422 * happen through SIOCSLIFGROUPNAME (ip_sioctl_groupname()).
6423 * Leaves happen via SIOCSLIFGROUPNAME, interface unplumb
6424 * (phyint_free()), or interface plumb for an ill type
6425 * not in the IPMP group (ip_rput_dlpi_writer()).
6426 *
6427 * Xop switches are not allowed on the IPMP meta-interface.
6428 */
6429 ASSERT(phyi == NULL || !(phyi->phyint_flags & PHYI_IPMP));
6430 ASSERT(RW_WRITE_HELD(&ipst->ips_ill_g_lock));
6431 DTRACE_PROBE1(ipsq__switch, (ipsq_t *), ipsq);
6432
6433 if (ipsq->ipsq_swxop == &ipsq->ipsq_ownxop) {
6434 /*
6435 * We're switching back to our own xop, so we have two
6436 * xop's to drain/exit: our own, and the group xop
6437 * that we are leaving.
6438 *
6439 * First, pull ourselves out of the group ipsq list.
6440 * This is safe since we're writer on ill_g_lock.
6441 */
6442 ASSERT(ipsq->ipsq_xop != &ipsq->ipsq_ownxop);
6443
6444 xopipsq = ipx->ipx_ipsq;
6445 while (xopipsq->ipsq_next != ipsq)
6446 xopipsq = xopipsq->ipsq_next;
6447
6448 xopipsq->ipsq_next = ipsq->ipsq_next;
6449 ipsq->ipsq_next = ipsq;
6450 ipsq->ipsq_xop = ipsq->ipsq_swxop;
6451 ipsq->ipsq_swxop = NULL;
6452
6453 /*
6454 * Second, prepare to exit the group xop. The actual
6455 * ipsq_exit() is done at the end of this function
6456 * since we cannot hold any locks across ipsq_exit().
6457 * Note that although we drop the group's ipx_lock, no
6458 * threads can proceed since we're still ipx_writer.
6459 */
6460 leftipsq = xopipsq;
6461 mutex_exit(&ipx->ipx_lock);
6462
6463 /*
6464 * Third, set ipx to point to our own xop (which was
6465 * inactive and therefore can be entered).
6466 */
6467 ipx = ipsq->ipsq_xop;
6468 mutex_enter(&ipx->ipx_lock);
6469 ASSERT(ipx->ipx_writer == NULL);
6470 ASSERT(ipx->ipx_current_ipif == NULL);
6471 } else {
6472 /*
6473 * We're switching from our own xop to a group xop.
6474 * The requestor of the switch must ensure that the
6475 * group xop cannot go away (e.g. by ensuring the
6476 * phyint associated with the xop cannot go away).
6477 *
6478 * If we can become writer on our new xop, then we'll
6479 * do the drain. Otherwise, the current writer of our
6480 * new xop will do the drain when it exits.
6481 *
6482 * First, splice ourselves into the group IPSQ list.
6483 * This is safe since we're writer on ill_g_lock.
6484 */
6485 ASSERT(ipsq->ipsq_xop == &ipsq->ipsq_ownxop);
6486
6487 xopipsq = ipsq->ipsq_swxop->ipx_ipsq;
6488 while (xopipsq->ipsq_next != ipsq->ipsq_swxop->ipx_ipsq)
6489 xopipsq = xopipsq->ipsq_next;
6490
6491 xopipsq->ipsq_next = ipsq;
6492 ipsq->ipsq_next = ipsq->ipsq_swxop->ipx_ipsq;
6493 ipsq->ipsq_xop = ipsq->ipsq_swxop;
6494 ipsq->ipsq_swxop = NULL;
6495
6496 /*
6497 * Second, exit our own xop, since it's now unused.
6498 * This is safe since we've got the only reference.
6499 */
6500 ASSERT(ipx->ipx_writer == curthread);
6501 ipx->ipx_writer = NULL;
6502 VERIFY(--ipx->ipx_reentry_cnt == 0);
6503 ipx->ipx_ipsq_queued = B_FALSE;
6504 mutex_exit(&ipx->ipx_lock);
6505
6506 /*
6507 * Third, set ipx to point to our new xop, and check
6508 * if we can become writer on it. If we cannot, then
6509 * the current writer will drain the IPSQ group when
6510 * it exits. Our ipsq_xop is guaranteed to be stable
6511 * because we're still holding ipsq_lock.
6512 */
6513 ipx = ipsq->ipsq_xop;
6514 mutex_enter(&ipx->ipx_lock);
6515 if (ipx->ipx_writer != NULL ||
6516 ipx->ipx_current_ipif != NULL) {
6517 goto out;
6518 }
6519 }
6520
6521 /*
6522 * Fourth, become writer on our new ipx before we continue
6523 * with the drain. Note that we never dropped ipsq_lock
6524 * above, so no other thread could've raced with us to
6525 * become writer first. Also, we're holding ipx_lock, so
6526 * no other thread can examine the ipx right now.
6527 */
6528 ASSERT(ipx->ipx_current_ipif == NULL);
6529 ASSERT(ipx->ipx_mphead == NULL && ipx->ipx_mptail == NULL);
6530 VERIFY(ipx->ipx_reentry_cnt++ == 0);
6531 ipx->ipx_writer = curthread;
6532 ipx->ipx_forced = B_FALSE;
6533 #ifdef DEBUG
6534 ipx->ipx_depth = getpcstack(ipx->ipx_stack, IPX_STACK_DEPTH);
6535 #endif
6536 }
6537
6538 xopipsq = ipsq;
6539 do {
6540 /*
6541 * So that other operations operate on a consistent and
6542 * complete phyint, a switch message on an IPSQ must be
6543 * handled prior to any other operations on that IPSQ.
6544 */
6545 if ((mp = xopipsq->ipsq_switch_mp) != NULL) {
6546 xopipsq->ipsq_switch_mp = NULL;
6547 ASSERT(mp->b_next == NULL);
6548 mp->b_next = (void *)xopipsq;
6549 goto out;
6550 }
6551
6552 if ((mp = xopipsq->ipsq_xopq_mphead) != NULL) {
6553 xopipsq->ipsq_xopq_mphead = mp->b_next;
6554 if (xopipsq->ipsq_xopq_mphead == NULL)
6555 xopipsq->ipsq_xopq_mptail = NULL;
6556 mp->b_next = (void *)xopipsq;
6557 goto out;
6558 }
6559 } while ((xopipsq = xopipsq->ipsq_next) != ipsq);
6560 empty:
6561 /*
6562 * There are no messages. Further, we are holding ipx_lock, hence no
6563 * new messages can end up on any IPSQ in the xop.
6564 */
6565 ipx->ipx_writer = NULL;
6566 ipx->ipx_forced = B_FALSE;
6567 VERIFY(--ipx->ipx_reentry_cnt == 0);
6568 ipx->ipx_ipsq_queued = B_FALSE;
6569 emptied = B_TRUE;
6570 #ifdef DEBUG
6571 ipx->ipx_depth = 0;
6572 #endif
6573 out:
6574 mutex_exit(&ipx->ipx_lock);
6575 mutex_exit(&ipsq->ipsq_lock);
6576
6577 /*
6578 * If we completely emptied the xop, then wake up any threads waiting
6579 * to enter any of the IPSQ's associated with it.
6580 */
6581 if (emptied) {
6582 xopipsq = ipsq;
6583 do {
6584 if ((phyi = xopipsq->ipsq_phyint) == NULL)
6585 continue;
6586
6587 illv4 = phyi->phyint_illv4;
6588 illv6 = phyi->phyint_illv6;
6589
6590 GRAB_ILL_LOCKS(illv4, illv6);
6591 if (illv4 != NULL)
6592 cv_broadcast(&illv4->ill_cv);
6593 if (illv6 != NULL)
6594 cv_broadcast(&illv6->ill_cv);
6595 RELEASE_ILL_LOCKS(illv4, illv6);
6596 } while ((xopipsq = xopipsq->ipsq_next) != ipsq);
6597 }
6598 rw_exit(&ipst->ips_ill_g_lock);
6599
6600 /*
6601 * Now that all locks are dropped, exit the IPSQ we left.
6602 */
6603 if (leftipsq != NULL)
6604 ipsq_exit(leftipsq);
6605
6606 return (mp);
6607 }
6608
6609 /*
6610 * Return completion status of previously initiated DLPI operations on
6611 * ills in the purview of an ipsq.
6612 */
6613 static boolean_t
6614 ipsq_dlpi_done(ipsq_t *ipsq)
6615 {
6616 ipsq_t *ipsq_start;
6617 phyint_t *phyi;
6618 ill_t *ill;
6619
6620 ASSERT(RW_LOCK_HELD(&ipsq->ipsq_ipst->ips_ill_g_lock));
6621 ipsq_start = ipsq;
6622
6623 do {
6624 /*
6625 * The only current users of this function are ipsq_try_enter
6626 * and ipsq_enter which have made sure that ipsq_writer is
6627 * NULL before we reach here. ill_dlpi_pending is modified
6628 * only by an ipsq writer
6629 */
6630 ASSERT(ipsq->ipsq_xop->ipx_writer == NULL);
6631 phyi = ipsq->ipsq_phyint;
6632 /*
6633 * phyi could be NULL if a phyint that is part of an
6634 * IPMP group is being unplumbed. A more detailed
6635 * comment is in ipmp_grp_update_kstats()
6636 */
6637 if (phyi != NULL) {
6638 ill = phyi->phyint_illv4;
6639 if (ill != NULL &&
6640 (ill->ill_dlpi_pending != DL_PRIM_INVAL ||
6641 ill->ill_arl_dlpi_pending))
6642 return (B_FALSE);
6643
6644 ill = phyi->phyint_illv6;
6645 if (ill != NULL &&
6646 ill->ill_dlpi_pending != DL_PRIM_INVAL)
6647 return (B_FALSE);
6648 }
6649
6650 } while ((ipsq = ipsq->ipsq_next) != ipsq_start);
6651
6652 return (B_TRUE);
6653 }
6654
6655 /*
6656 * Enter the ipsq corresponding to ill, by waiting synchronously till
6657 * we can enter the ipsq exclusively. Unless 'force' is used, the ipsq
6658 * will have to drain completely before ipsq_enter returns success.
6659 * ipx_current_ipif will be set if some exclusive op is in progress,
6660 * and the ipsq_exit logic will start the next enqueued op after
6661 * completion of the current op. If 'force' is used, we don't wait
6662 * for the enqueued ops. This is needed when a conn_close wants to
6663 * enter the ipsq and abort an ioctl that is somehow stuck. Unplumb
6664 * of an ill can also use this option. But we dont' use it currently.
6665 */
6666 #define ENTER_SQ_WAIT_TICKS 100
6667 boolean_t
6668 ipsq_enter(ill_t *ill, boolean_t force, int type)
6669 {
6670 ipsq_t *ipsq;
6671 ipxop_t *ipx;
6672 boolean_t waited_enough = B_FALSE;
6673 ip_stack_t *ipst = ill->ill_ipst;
6674
6675 /*
6676 * Note that the relationship between ill and ipsq is fixed as long as
6677 * the ill is not ILL_CONDEMNED. Holding ipsq_lock ensures the
6678 * relationship between the IPSQ and xop cannot change. However,
6679 * since we cannot hold ipsq_lock across the cv_wait(), it may change
6680 * while we're waiting. We wait on ill_cv and rely on ipsq_exit()
6681 * waking up all ills in the xop when it becomes available.
6682 */
6683 for (;;) {
6684 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
6685 mutex_enter(&ill->ill_lock);
6686 if (ill->ill_state_flags & ILL_CONDEMNED) {
6687 mutex_exit(&ill->ill_lock);
6688 rw_exit(&ipst->ips_ill_g_lock);
6689 return (B_FALSE);
6690 }
6691
6692 ipsq = ill->ill_phyint->phyint_ipsq;
6693 mutex_enter(&ipsq->ipsq_lock);
6694 ipx = ipsq->ipsq_xop;
6695 mutex_enter(&ipx->ipx_lock);
6696
6697 if (ipx->ipx_writer == NULL && (type == CUR_OP ||
6698 (ipx->ipx_current_ipif == NULL && ipsq_dlpi_done(ipsq)) ||
6699 waited_enough))
6700 break;
6701
6702 rw_exit(&ipst->ips_ill_g_lock);
6703
6704 if (!force || ipx->ipx_writer != NULL) {
6705 mutex_exit(&ipx->ipx_lock);
6706 mutex_exit(&ipsq->ipsq_lock);
6707 cv_wait(&ill->ill_cv, &ill->ill_lock);
6708 } else {
6709 mutex_exit(&ipx->ipx_lock);
6710 mutex_exit(&ipsq->ipsq_lock);
6711 (void) cv_reltimedwait(&ill->ill_cv,
6712 &ill->ill_lock, ENTER_SQ_WAIT_TICKS, TR_CLOCK_TICK);
6713 waited_enough = B_TRUE;
6714 }
6715 mutex_exit(&ill->ill_lock);
6716 }
6717
6718 ASSERT(ipx->ipx_mphead == NULL && ipx->ipx_mptail == NULL);
6719 ASSERT(ipx->ipx_reentry_cnt == 0);
6720 ipx->ipx_writer = curthread;
6721 ipx->ipx_forced = (ipx->ipx_current_ipif != NULL);
6722 ipx->ipx_reentry_cnt++;
6723 #ifdef DEBUG
6724 ipx->ipx_depth = getpcstack(ipx->ipx_stack, IPX_STACK_DEPTH);
6725 #endif
6726 mutex_exit(&ipx->ipx_lock);
6727 mutex_exit(&ipsq->ipsq_lock);
6728 mutex_exit(&ill->ill_lock);
6729 rw_exit(&ipst->ips_ill_g_lock);
6730
6731 return (B_TRUE);
6732 }
6733
6734 /*
6735 * ipif_set_values() has a constraint that it cannot drop the ips_ill_g_lock
6736 * across the call to the core interface ipsq_try_enter() and hence calls this
6737 * function directly. This is explained more fully in ipif_set_values().
6738 * In order to support the above constraint, ipsq_try_enter is implemented as
6739 * a wrapper that grabs the ips_ill_g_lock and calls this function subsequently
6740 */
6741 static ipsq_t *
6742 ipsq_try_enter_internal(ill_t *ill, queue_t *q, mblk_t *mp, ipsq_func_t func,
6743 int type, boolean_t reentry_ok)
6744 {
6745 ipsq_t *ipsq;
6746 ipxop_t *ipx;
6747 ip_stack_t *ipst = ill->ill_ipst;
6748
6749 /*
6750 * lock ordering:
6751 * ill_g_lock -> conn_lock -> ill_lock -> ipsq_lock -> ipx_lock.
6752 *
6753 * ipx of an ipsq can't change when ipsq_lock is held.
6754 */
6755 ASSERT(RW_LOCK_HELD(&ipst->ips_ill_g_lock));
6756 GRAB_CONN_LOCK(q);
6757 mutex_enter(&ill->ill_lock);
6758 ipsq = ill->ill_phyint->phyint_ipsq;
6759 mutex_enter(&ipsq->ipsq_lock);
6760 ipx = ipsq->ipsq_xop;
6761 mutex_enter(&ipx->ipx_lock);
6762
6763 /*
6764 * 1. Enter the ipsq if we are already writer and reentry is ok.
6765 * (Note: If the caller does not specify reentry_ok then neither
6766 * 'func' nor any of its callees must ever attempt to enter the ipsq
6767 * again. Otherwise it can lead to an infinite loop
6768 * 2. Enter the ipsq if there is no current writer and this attempted
6769 * entry is part of the current operation
6770 * 3. Enter the ipsq if there is no current writer and this is a new
6771 * operation and the operation queue is empty and there is no
6772 * operation currently in progress and if all previously initiated
6773 * DLPI operations have completed.
6774 */
6775 if ((ipx->ipx_writer == curthread && reentry_ok) ||
6776 (ipx->ipx_writer == NULL && (type == CUR_OP || (type == NEW_OP &&
6777 !ipx->ipx_ipsq_queued && ipx->ipx_current_ipif == NULL &&
6778 ipsq_dlpi_done(ipsq))))) {
6779 /* Success. */
6780 ipx->ipx_reentry_cnt++;
6781 ipx->ipx_writer = curthread;
6782 ipx->ipx_forced = B_FALSE;
6783 mutex_exit(&ipx->ipx_lock);
6784 mutex_exit(&ipsq->ipsq_lock);
6785 mutex_exit(&ill->ill_lock);
6786 RELEASE_CONN_LOCK(q);
6787 #ifdef DEBUG
6788 ipx->ipx_depth = getpcstack(ipx->ipx_stack, IPX_STACK_DEPTH);
6789 #endif
6790 return (ipsq);
6791 }
6792
6793 if (func != NULL)
6794 ipsq_enq(ipsq, q, mp, func, type, ill);
6795
6796 mutex_exit(&ipx->ipx_lock);
6797 mutex_exit(&ipsq->ipsq_lock);
6798 mutex_exit(&ill->ill_lock);
6799 RELEASE_CONN_LOCK(q);
6800 return (NULL);
6801 }
6802
6803 /*
6804 * The ipsq_t (ipsq) is the synchronization data structure used to serialize
6805 * certain critical operations like plumbing (i.e. most set ioctls), etc.
6806 * There is one ipsq per phyint. The ipsq
6807 * serializes exclusive ioctls issued by applications on a per ipsq basis in
6808 * ipsq_xopq_mphead. It also protects against multiple threads executing in
6809 * the ipsq. Responses from the driver pertain to the current ioctl (say a
6810 * DL_BIND_ACK in response to a DL_BIND_REQ initiated as part of bringing
6811 * up the interface) and are enqueued in ipx_mphead.
6812 *
6813 * If a thread does not want to reenter the ipsq when it is already writer,
6814 * it must make sure that the specified reentry point to be called later
6815 * when the ipsq is empty, nor any code path starting from the specified reentry
6816 * point must never ever try to enter the ipsq again. Otherwise it can lead
6817 * to an infinite loop. The reentry point ip_rput_dlpi_writer is an example.
6818 * When the thread that is currently exclusive finishes, it (ipsq_exit)
6819 * dequeues the requests waiting to become exclusive in ipx_mphead and calls
6820 * the reentry point. When the list at ipx_mphead becomes empty ipsq_exit
6821 * proceeds to dequeue the next ioctl in ipsq_xopq_mphead and start the next
6822 * ioctl if the current ioctl has completed. If the current ioctl is still
6823 * in progress it simply returns. The current ioctl could be waiting for
6824 * a response from another module (the driver or could be waiting for
6825 * the ipif/ill/ire refcnts to drop to zero. In such a case the ipx_pending_mp
6826 * and ipx_pending_ipif are set. ipx_current_ipif is set throughout the
6827 * execution of the ioctl and ipsq_exit does not start the next ioctl unless
6828 * ipx_current_ipif is NULL which happens only once the ioctl is complete and
6829 * all associated DLPI operations have completed.
6830 */
6831
6832 /*
6833 * Try to enter the IPSQ corresponding to `ipif' or `ill' exclusively (`ipif'
6834 * and `ill' cannot both be specified). Returns a pointer to the entered IPSQ
6835 * on success, or NULL on failure. The caller ensures ipif/ill is valid by
6836 * refholding it as necessary. If the IPSQ cannot be entered and `func' is
6837 * non-NULL, then `func' will be called back with `q' and `mp' once the IPSQ
6838 * can be entered. If `func' is NULL, then `q' and `mp' are ignored.
6839 */
6840 ipsq_t *
6841 ipsq_try_enter(ipif_t *ipif, ill_t *ill, queue_t *q, mblk_t *mp,
6842 ipsq_func_t func, int type, boolean_t reentry_ok)
6843 {
6844 ip_stack_t *ipst;
6845 ipsq_t *ipsq;
6846
6847 /* Only 1 of ipif or ill can be specified */
6848 ASSERT((ipif != NULL) ^ (ill != NULL));
6849
6850 if (ipif != NULL)
6851 ill = ipif->ipif_ill;
6852 ipst = ill->ill_ipst;
6853
6854 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
6855 ipsq = ipsq_try_enter_internal(ill, q, mp, func, type, reentry_ok);
6856 rw_exit(&ipst->ips_ill_g_lock);
6857
6858 return (ipsq);
6859 }
6860
6861 /*
6862 * Try to enter the IPSQ corresponding to `ill' as writer. The caller ensures
6863 * ill is valid by refholding it if necessary; we will refrele. If the IPSQ
6864 * cannot be entered, the mp is queued for completion.
6865 */
6866 void
6867 qwriter_ip(ill_t *ill, queue_t *q, mblk_t *mp, ipsq_func_t func, int type,
6868 boolean_t reentry_ok)
6869 {
6870 ipsq_t *ipsq;
6871
6872 ipsq = ipsq_try_enter(NULL, ill, q, mp, func, type, reentry_ok);
6873
6874 /*
6875 * Drop the caller's refhold on the ill. This is safe since we either
6876 * entered the IPSQ (and thus are exclusive), or failed to enter the
6877 * IPSQ, in which case we return without accessing ill anymore. This
6878 * is needed because func needs to see the correct refcount.
6879 * e.g. removeif can work only then.
6880 */
6881 ill_refrele(ill);
6882 if (ipsq != NULL) {
6883 (*func)(ipsq, q, mp, NULL);
6884 ipsq_exit(ipsq);
6885 }
6886 }
6887
6888 /*
6889 * Exit the specified IPSQ. If this is the final exit on it then drain it
6890 * prior to exiting. Caller must be writer on the specified IPSQ.
6891 */
6892 void
6893 ipsq_exit(ipsq_t *ipsq)
6894 {
6895 mblk_t *mp;
6896 ipsq_t *mp_ipsq;
6897 queue_t *q;
6898 phyint_t *phyi;
6899 ipsq_func_t func;
6900
6901 ASSERT(IAM_WRITER_IPSQ(ipsq));
6902
6903 ASSERT(ipsq->ipsq_xop->ipx_reentry_cnt >= 1);
6904 if (ipsq->ipsq_xop->ipx_reentry_cnt != 1) {
6905 ipsq->ipsq_xop->ipx_reentry_cnt--;
6906 return;
6907 }
6908
6909 for (;;) {
6910 phyi = ipsq->ipsq_phyint;
6911 mp = ipsq_dq(ipsq);
6912 mp_ipsq = (mp == NULL) ? NULL : (ipsq_t *)mp->b_next;
6913
6914 /*
6915 * If we've changed to a new IPSQ, and the phyint associated
6916 * with the old one has gone away, free the old IPSQ. Note
6917 * that this cannot happen while the IPSQ is in a group.
6918 */
6919 if (mp_ipsq != ipsq && phyi == NULL) {
6920 ASSERT(ipsq->ipsq_next == ipsq);
6921 ASSERT(ipsq->ipsq_xop == &ipsq->ipsq_ownxop);
6922 ipsq_delete(ipsq);
6923 }
6924
6925 if (mp == NULL)
6926 break;
6927
6928 q = mp->b_queue;
6929 func = (ipsq_func_t)mp->b_prev;
6930 ipsq = mp_ipsq;
6931 mp->b_next = mp->b_prev = NULL;
6932 mp->b_queue = NULL;
6933
6934 /*
6935 * If 'q' is an conn queue, it is valid, since we did a
6936 * a refhold on the conn at the start of the ioctl.
6937 * If 'q' is an ill queue, it is valid, since close of an
6938 * ill will clean up its IPSQ.
6939 */
6940 (*func)(ipsq, q, mp, NULL);
6941 }
6942 }
6943
6944 /*
6945 * Used to start any igmp or mld timers that could not be started
6946 * while holding ill_mcast_lock. The timers can't be started while holding
6947 * the lock, since mld/igmp_start_timers may need to call untimeout()
6948 * which can't be done while holding the lock which the timeout handler
6949 * acquires. Otherwise
6950 * there could be a deadlock since the timeout handlers
6951 * mld_timeout_handler_per_ill/igmp_timeout_handler_per_ill also acquire
6952 * ill_mcast_lock.
6953 */
6954 void
6955 ill_mcast_timer_start(ip_stack_t *ipst)
6956 {
6957 int next;
6958
6959 mutex_enter(&ipst->ips_igmp_timer_lock);
6960 next = ipst->ips_igmp_deferred_next;
6961 ipst->ips_igmp_deferred_next = INFINITY;
6962 mutex_exit(&ipst->ips_igmp_timer_lock);
6963
6964 if (next != INFINITY)
6965 igmp_start_timers(next, ipst);
6966
6967 mutex_enter(&ipst->ips_mld_timer_lock);
6968 next = ipst->ips_mld_deferred_next;
6969 ipst->ips_mld_deferred_next = INFINITY;
6970 mutex_exit(&ipst->ips_mld_timer_lock);
6971
6972 if (next != INFINITY)
6973 mld_start_timers(next, ipst);
6974 }
6975
6976 /*
6977 * Start the current exclusive operation on `ipsq'; associate it with `ipif'
6978 * and `ioccmd'.
6979 */
6980 void
6981 ipsq_current_start(ipsq_t *ipsq, ipif_t *ipif, int ioccmd)
6982 {
6983 ill_t *ill = ipif->ipif_ill;
6984 ipxop_t *ipx = ipsq->ipsq_xop;
6985
6986 ASSERT(IAM_WRITER_IPSQ(ipsq));
6987 ASSERT(ipx->ipx_current_ipif == NULL);
6988 ASSERT(ipx->ipx_current_ioctl == 0);
6989
6990 ipx->ipx_current_done = B_FALSE;
6991 ipx->ipx_current_ioctl = ioccmd;
6992 mutex_enter(&ipx->ipx_lock);
6993 ipx->ipx_current_ipif = ipif;
6994 mutex_exit(&ipx->ipx_lock);
6995
6996 /*
6997 * Set IPIF_CHANGING on one or more ipifs associated with the
6998 * current exclusive operation. IPIF_CHANGING prevents any new
6999 * references to the ipif (so that the references will eventually
7000 * drop to zero) and also prevents any "get" operations (e.g.,
7001 * SIOCGLIFFLAGS) from being able to access the ipif until the
7002 * operation has completed and the ipif is again in a stable state.
7003 *
7004 * For ioctls, IPIF_CHANGING is set on the ipif associated with the
7005 * ioctl. For internal operations (where ioccmd is zero), all ipifs
7006 * on the ill are marked with IPIF_CHANGING since it's unclear which
7007 * ipifs will be affected.
7008 *
7009 * Note that SIOCLIFREMOVEIF is a special case as it sets
7010 * IPIF_CONDEMNED internally after identifying the right ipif to
7011 * operate on.
7012 */
7013 switch (ioccmd) {
7014 case SIOCLIFREMOVEIF:
7015 break;
7016 case 0:
7017 mutex_enter(&ill->ill_lock);
7018 ipif = ipif->ipif_ill->ill_ipif;
7019 for (; ipif != NULL; ipif = ipif->ipif_next)
7020 ipif->ipif_state_flags |= IPIF_CHANGING;
7021 mutex_exit(&ill->ill_lock);
7022 break;
7023 default:
7024 mutex_enter(&ill->ill_lock);
7025 ipif->ipif_state_flags |= IPIF_CHANGING;
7026 mutex_exit(&ill->ill_lock);
7027 }
7028 }
7029
7030 /*
7031 * Finish the current exclusive operation on `ipsq'. Usually, this will allow
7032 * the next exclusive operation to begin once we ipsq_exit(). However, if
7033 * pending DLPI operations remain, then we will wait for the queue to drain
7034 * before allowing the next exclusive operation to begin. This ensures that
7035 * DLPI operations from one exclusive operation are never improperly processed
7036 * as part of a subsequent exclusive operation.
7037 */
7038 void
7039 ipsq_current_finish(ipsq_t *ipsq)
7040 {
7041 ipxop_t *ipx = ipsq->ipsq_xop;
7042 t_uscalar_t dlpi_pending = DL_PRIM_INVAL;
7043 ipif_t *ipif = ipx->ipx_current_ipif;
7044
7045 ASSERT(IAM_WRITER_IPSQ(ipsq));
7046
7047 /*
7048 * For SIOCLIFREMOVEIF, the ipif has been already been blown away
7049 * (but in that case, IPIF_CHANGING will already be clear and no
7050 * pending DLPI messages can remain).
7051 */
7052 if (ipx->ipx_current_ioctl != SIOCLIFREMOVEIF) {
7053 ill_t *ill = ipif->ipif_ill;
7054
7055 mutex_enter(&ill->ill_lock);
7056 dlpi_pending = ill->ill_dlpi_pending;
7057 if (ipx->ipx_current_ioctl == 0) {
7058 ipif = ill->ill_ipif;
7059 for (; ipif != NULL; ipif = ipif->ipif_next)
7060 ipif->ipif_state_flags &= ~IPIF_CHANGING;
7061 } else {
7062 ipif->ipif_state_flags &= ~IPIF_CHANGING;
7063 }
7064 mutex_exit(&ill->ill_lock);
7065 }
7066
7067 ASSERT(!ipx->ipx_current_done);
7068 ipx->ipx_current_done = B_TRUE;
7069 ipx->ipx_current_ioctl = 0;
7070 if (dlpi_pending == DL_PRIM_INVAL) {
7071 mutex_enter(&ipx->ipx_lock);
7072 ipx->ipx_current_ipif = NULL;
7073 mutex_exit(&ipx->ipx_lock);
7074 }
7075 }
7076
7077 /*
7078 * The ill is closing. Flush all messages on the ipsq that originated
7079 * from this ill. Usually there wont' be any messages on the ipsq_xopq_mphead
7080 * for this ill since ipsq_enter could not have entered until then.
7081 * New messages can't be queued since the CONDEMNED flag is set.
7082 */
7083 static void
7084 ipsq_flush(ill_t *ill)
7085 {
7086 queue_t *q;
7087 mblk_t *prev;
7088 mblk_t *mp;
7089 mblk_t *mp_next;
7090 ipxop_t *ipx = ill->ill_phyint->phyint_ipsq->ipsq_xop;
7091
7092 ASSERT(IAM_WRITER_ILL(ill));
7093
7094 /*
7095 * Flush any messages sent up by the driver.
7096 */
7097 mutex_enter(&ipx->ipx_lock);
7098 for (prev = NULL, mp = ipx->ipx_mphead; mp != NULL; mp = mp_next) {
7099 mp_next = mp->b_next;
7100 q = mp->b_queue;
7101 if (q == ill->ill_rq || q == ill->ill_wq) {
7102 /* dequeue mp */
7103 if (prev == NULL)
7104 ipx->ipx_mphead = mp->b_next;
7105 else
7106 prev->b_next = mp->b_next;
7107 if (ipx->ipx_mptail == mp) {
7108 ASSERT(mp_next == NULL);
7109 ipx->ipx_mptail = prev;
7110 }
7111 inet_freemsg(mp);
7112 } else {
7113 prev = mp;
7114 }
7115 }
7116 mutex_exit(&ipx->ipx_lock);
7117 (void) ipsq_pending_mp_cleanup(ill, NULL);
7118 ipsq_xopq_mp_cleanup(ill, NULL);
7119 }
7120
7121 /*
7122 * Parse an ifreq or lifreq struct coming down ioctls and refhold
7123 * and return the associated ipif.
7124 * Return value:
7125 * Non zero: An error has occurred. ci may not be filled out.
7126 * zero : ci is filled out with the ioctl cmd in ci.ci_name, and
7127 * a held ipif in ci.ci_ipif.
7128 */
7129 int
7130 ip_extract_lifreq(queue_t *q, mblk_t *mp, const ip_ioctl_cmd_t *ipip,
7131 cmd_info_t *ci)
7132 {
7133 char *name;
7134 struct ifreq *ifr;
7135 struct lifreq *lifr;
7136 ipif_t *ipif = NULL;
7137 ill_t *ill;
7138 conn_t *connp;
7139 boolean_t isv6;
7140 int err;
7141 mblk_t *mp1;
7142 zoneid_t zoneid;
7143 ip_stack_t *ipst;
7144
7145 if (q->q_next != NULL) {
7146 ill = (ill_t *)q->q_ptr;
7147 isv6 = ill->ill_isv6;
7148 connp = NULL;
7149 zoneid = ALL_ZONES;
7150 ipst = ill->ill_ipst;
7151 } else {
7152 ill = NULL;
7153 connp = Q_TO_CONN(q);
7154 isv6 = (connp->conn_family == AF_INET6);
7155 zoneid = connp->conn_zoneid;
7156 if (zoneid == GLOBAL_ZONEID) {
7157 /* global zone can access ipifs in all zones */
7158 zoneid = ALL_ZONES;
7159 }
7160 ipst = connp->conn_netstack->netstack_ip;
7161 }
7162
7163 /* Has been checked in ip_wput_nondata */
7164 mp1 = mp->b_cont->b_cont;
7165
7166 if (ipip->ipi_cmd_type == IF_CMD) {
7167 /* This a old style SIOC[GS]IF* command */
7168 ifr = (struct ifreq *)mp1->b_rptr;
7169 /*
7170 * Null terminate the string to protect against buffer
7171 * overrun. String was generated by user code and may not
7172 * be trusted.
7173 */
7174 ifr->ifr_name[IFNAMSIZ - 1] = '\0';
7175 name = ifr->ifr_name;
7176 ci->ci_sin = (sin_t *)&ifr->ifr_addr;
7177 ci->ci_sin6 = NULL;
7178 ci->ci_lifr = (struct lifreq *)ifr;
7179 } else {
7180 /* This a new style SIOC[GS]LIF* command */
7181 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
7182 lifr = (struct lifreq *)mp1->b_rptr;
7183 /*
7184 * Null terminate the string to protect against buffer
7185 * overrun. String was generated by user code and may not
7186 * be trusted.
7187 */
7188 lifr->lifr_name[LIFNAMSIZ - 1] = '\0';
7189 name = lifr->lifr_name;
7190 ci->ci_sin = (sin_t *)&lifr->lifr_addr;
7191 ci->ci_sin6 = (sin6_t *)&lifr->lifr_addr;
7192 ci->ci_lifr = lifr;
7193 }
7194
7195 if (ipip->ipi_cmd == SIOCSLIFNAME) {
7196 /*
7197 * The ioctl will be failed if the ioctl comes down
7198 * an conn stream
7199 */
7200 if (ill == NULL) {
7201 /*
7202 * Not an ill queue, return EINVAL same as the
7203 * old error code.
7204 */
7205 return (ENXIO);
7206 }
7207 ipif = ill->ill_ipif;
7208 ipif_refhold(ipif);
7209 } else {
7210 /*
7211 * Ensure that ioctls don't see any internal state changes
7212 * caused by set ioctls by deferring them if IPIF_CHANGING is
7213 * set.
7214 */
7215 ipif = ipif_lookup_on_name_async(name, mi_strlen(name),
7216 isv6, zoneid, q, mp, ip_process_ioctl, &err, ipst);
7217 if (ipif == NULL) {
7218 if (err == EINPROGRESS)
7219 return (err);
7220 err = 0; /* Ensure we don't use it below */
7221 }
7222 }
7223
7224 /*
7225 * Old style [GS]IFCMD does not admit IPv6 ipif
7226 */
7227 if (ipif != NULL && ipif->ipif_isv6 && ipip->ipi_cmd_type == IF_CMD) {
7228 ipif_refrele(ipif);
7229 return (ENXIO);
7230 }
7231
7232 if (ipif == NULL && ill != NULL && ill->ill_ipif != NULL &&
7233 name[0] == '\0') {
7234 /*
7235 * Handle a or a SIOC?IF* with a null name
7236 * during plumb (on the ill queue before the I_PLINK).
7237 */
7238 ipif = ill->ill_ipif;
7239 ipif_refhold(ipif);
7240 }
7241
7242 if (ipif == NULL)
7243 return (ENXIO);
7244
7245 DTRACE_PROBE4(ipif__ioctl, char *, "ip_extract_lifreq",
7246 int, ipip->ipi_cmd, ill_t *, ipif->ipif_ill, ipif_t *, ipif);
7247
7248 ci->ci_ipif = ipif;
7249 return (0);
7250 }
7251
7252 /*
7253 * Return the total number of ipifs.
7254 */
7255 static uint_t
7256 ip_get_numifs(zoneid_t zoneid, ip_stack_t *ipst)
7257 {
7258 uint_t numifs = 0;
7259 ill_t *ill;
7260 ill_walk_context_t ctx;
7261 ipif_t *ipif;
7262
7263 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
7264 ill = ILL_START_WALK_V4(&ctx, ipst);
7265 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
7266 if (IS_UNDER_IPMP(ill))
7267 continue;
7268 for (ipif = ill->ill_ipif; ipif != NULL;
7269 ipif = ipif->ipif_next) {
7270 if (ipif->ipif_zoneid == zoneid ||
7271 ipif->ipif_zoneid == ALL_ZONES)
7272 numifs++;
7273 }
7274 }
7275 rw_exit(&ipst->ips_ill_g_lock);
7276 return (numifs);
7277 }
7278
7279 /*
7280 * Return the total number of ipifs.
7281 */
7282 static uint_t
7283 ip_get_numlifs(int family, int lifn_flags, zoneid_t zoneid, ip_stack_t *ipst)
7284 {
7285 uint_t numifs = 0;
7286 ill_t *ill;
7287 ipif_t *ipif;
7288 ill_walk_context_t ctx;
7289
7290 ip1dbg(("ip_get_numlifs(%d %u %d)\n", family, lifn_flags, (int)zoneid));
7291
7292 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
7293 if (family == AF_INET)
7294 ill = ILL_START_WALK_V4(&ctx, ipst);
7295 else if (family == AF_INET6)
7296 ill = ILL_START_WALK_V6(&ctx, ipst);
7297 else
7298 ill = ILL_START_WALK_ALL(&ctx, ipst);
7299
7300 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
7301 if (IS_UNDER_IPMP(ill) && !(lifn_flags & LIFC_UNDER_IPMP))
7302 continue;
7303
7304 for (ipif = ill->ill_ipif; ipif != NULL;
7305 ipif = ipif->ipif_next) {
7306 if ((ipif->ipif_flags & IPIF_NOXMIT) &&
7307 !(lifn_flags & LIFC_NOXMIT))
7308 continue;
7309 if ((ipif->ipif_flags & IPIF_TEMPORARY) &&
7310 !(lifn_flags & LIFC_TEMPORARY))
7311 continue;
7312 if (((ipif->ipif_flags &
7313 (IPIF_NOXMIT|IPIF_NOLOCAL|
7314 IPIF_DEPRECATED)) ||
7315 IS_LOOPBACK(ill) ||
7316 !(ipif->ipif_flags & IPIF_UP)) &&
7317 (lifn_flags & LIFC_EXTERNAL_SOURCE))
7318 continue;
7319
7320 if (zoneid != ipif->ipif_zoneid &&
7321 ipif->ipif_zoneid != ALL_ZONES &&
7322 (zoneid != GLOBAL_ZONEID ||
7323 !(lifn_flags & LIFC_ALLZONES)))
7324 continue;
7325
7326 numifs++;
7327 }
7328 }
7329 rw_exit(&ipst->ips_ill_g_lock);
7330 return (numifs);
7331 }
7332
7333 uint_t
7334 ip_get_lifsrcofnum(ill_t *ill)
7335 {
7336 uint_t numifs = 0;
7337 ill_t *ill_head = ill;
7338 ip_stack_t *ipst = ill->ill_ipst;
7339
7340 /*
7341 * ill_g_usesrc_lock protects ill_usesrc_grp_next, for example, some
7342 * other thread may be trying to relink the ILLs in this usesrc group
7343 * and adjusting the ill_usesrc_grp_next pointers
7344 */
7345 rw_enter(&ipst->ips_ill_g_usesrc_lock, RW_READER);
7346 if ((ill->ill_usesrc_ifindex == 0) &&
7347 (ill->ill_usesrc_grp_next != NULL)) {
7348 for (; (ill != NULL) && (ill->ill_usesrc_grp_next != ill_head);
7349 ill = ill->ill_usesrc_grp_next)
7350 numifs++;
7351 }
7352 rw_exit(&ipst->ips_ill_g_usesrc_lock);
7353
7354 return (numifs);
7355 }
7356
7357 /* Null values are passed in for ipif, sin, and ifreq */
7358 /* ARGSUSED */
7359 int
7360 ip_sioctl_get_ifnum(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q,
7361 mblk_t *mp, ip_ioctl_cmd_t *ipip, void *ifreq)
7362 {
7363 int *nump;
7364 conn_t *connp = Q_TO_CONN(q);
7365
7366 ASSERT(q->q_next == NULL); /* not a valid ioctl for ip as a module */
7367
7368 /* Existence of b_cont->b_cont checked in ip_wput_nondata */
7369 nump = (int *)mp->b_cont->b_cont->b_rptr;
7370
7371 *nump = ip_get_numifs(connp->conn_zoneid,
7372 connp->conn_netstack->netstack_ip);
7373 ip1dbg(("ip_sioctl_get_ifnum numifs %d", *nump));
7374 return (0);
7375 }
7376
7377 /* Null values are passed in for ipif, sin, and ifreq */
7378 /* ARGSUSED */
7379 int
7380 ip_sioctl_get_lifnum(ipif_t *dummy_ipif, sin_t *dummy_sin,
7381 queue_t *q, mblk_t *mp, ip_ioctl_cmd_t *ipip, void *ifreq)
7382 {
7383 struct lifnum *lifn;
7384 mblk_t *mp1;
7385 conn_t *connp = Q_TO_CONN(q);
7386
7387 ASSERT(q->q_next == NULL); /* not a valid ioctl for ip as a module */
7388
7389 /* Existence checked in ip_wput_nondata */
7390 mp1 = mp->b_cont->b_cont;
7391
7392 lifn = (struct lifnum *)mp1->b_rptr;
7393 switch (lifn->lifn_family) {
7394 case AF_UNSPEC:
7395 case AF_INET:
7396 case AF_INET6:
7397 break;
7398 default:
7399 return (EAFNOSUPPORT);
7400 }
7401
7402 lifn->lifn_count = ip_get_numlifs(lifn->lifn_family, lifn->lifn_flags,
7403 connp->conn_zoneid, connp->conn_netstack->netstack_ip);
7404 ip1dbg(("ip_sioctl_get_lifnum numifs %d", lifn->lifn_count));
7405 return (0);
7406 }
7407
7408 /* ARGSUSED */
7409 int
7410 ip_sioctl_get_ifconf(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q,
7411 mblk_t *mp, ip_ioctl_cmd_t *ipip, void *ifreq)
7412 {
7413 STRUCT_HANDLE(ifconf, ifc);
7414 mblk_t *mp1;
7415 struct iocblk *iocp;
7416 struct ifreq *ifr;
7417 ill_walk_context_t ctx;
7418 ill_t *ill;
7419 ipif_t *ipif;
7420 struct sockaddr_in *sin;
7421 int32_t ifclen;
7422 zoneid_t zoneid;
7423 ip_stack_t *ipst = CONNQ_TO_IPST(q);
7424
7425 ASSERT(q->q_next == NULL); /* not valid ioctls for ip as a module */
7426
7427 ip1dbg(("ip_sioctl_get_ifconf"));
7428 /* Existence verified in ip_wput_nondata */
7429 mp1 = mp->b_cont->b_cont;
7430 iocp = (struct iocblk *)mp->b_rptr;
7431 zoneid = Q_TO_CONN(q)->conn_zoneid;
7432
7433 /*
7434 * The original SIOCGIFCONF passed in a struct ifconf which specified
7435 * the user buffer address and length into which the list of struct
7436 * ifreqs was to be copied. Since AT&T Streams does not seem to
7437 * allow M_COPYOUT to be used in conjunction with I_STR IOCTLS,
7438 * the SIOCGIFCONF operation was redefined to simply provide
7439 * a large output buffer into which we are supposed to jam the ifreq
7440 * array. The same ioctl command code was used, despite the fact that
7441 * both the applications and the kernel code had to change, thus making
7442 * it impossible to support both interfaces.
7443 *
7444 * For reasons not good enough to try to explain, the following
7445 * algorithm is used for deciding what to do with one of these:
7446 * If the IOCTL comes in as an I_STR, it is assumed to be of the new
7447 * form with the output buffer coming down as the continuation message.
7448 * If it arrives as a TRANSPARENT IOCTL, it is assumed to be old style,
7449 * and we have to copy in the ifconf structure to find out how big the
7450 * output buffer is and where to copy out to. Sure no problem...
7451 *
7452 */
7453 STRUCT_SET_HANDLE(ifc, iocp->ioc_flag, NULL);
7454 if ((mp1->b_wptr - mp1->b_rptr) == STRUCT_SIZE(ifc)) {
7455 int numifs = 0;
7456 size_t ifc_bufsize;
7457
7458 /*
7459 * Must be (better be!) continuation of a TRANSPARENT
7460 * IOCTL. We just copied in the ifconf structure.
7461 */
7462 STRUCT_SET_HANDLE(ifc, iocp->ioc_flag,
7463 (struct ifconf *)mp1->b_rptr);
7464
7465 /*
7466 * Allocate a buffer to hold requested information.
7467 *
7468 * If ifc_len is larger than what is needed, we only
7469 * allocate what we will use.
7470 *
7471 * If ifc_len is smaller than what is needed, return
7472 * EINVAL.
7473 *
7474 * XXX: the ill_t structure can hava 2 counters, for
7475 * v4 and v6 (not just ill_ipif_up_count) to store the
7476 * number of interfaces for a device, so we don't need
7477 * to count them here...
7478 */
7479 numifs = ip_get_numifs(zoneid, ipst);
7480
7481 ifclen = STRUCT_FGET(ifc, ifc_len);
7482 ifc_bufsize = numifs * sizeof (struct ifreq);
7483 if (ifc_bufsize > ifclen) {
7484 if (iocp->ioc_cmd == O_SIOCGIFCONF) {
7485 /* old behaviour */
7486 return (EINVAL);
7487 } else {
7488 ifc_bufsize = ifclen;
7489 }
7490 }
7491
7492 mp1 = mi_copyout_alloc(q, mp,
7493 STRUCT_FGETP(ifc, ifc_buf), ifc_bufsize, B_FALSE);
7494 if (mp1 == NULL)
7495 return (ENOMEM);
7496
7497 mp1->b_wptr = mp1->b_rptr + ifc_bufsize;
7498 }
7499 bzero(mp1->b_rptr, mp1->b_wptr - mp1->b_rptr);
7500 /*
7501 * the SIOCGIFCONF ioctl only knows about
7502 * IPv4 addresses, so don't try to tell
7503 * it about interfaces with IPv6-only
7504 * addresses. (Last parm 'isv6' is B_FALSE)
7505 */
7506
7507 ifr = (struct ifreq *)mp1->b_rptr;
7508
7509 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
7510 ill = ILL_START_WALK_V4(&ctx, ipst);
7511 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
7512 if (IS_UNDER_IPMP(ill))
7513 continue;
7514 for (ipif = ill->ill_ipif; ipif != NULL;
7515 ipif = ipif->ipif_next) {
7516 if (zoneid != ipif->ipif_zoneid &&
7517 ipif->ipif_zoneid != ALL_ZONES)
7518 continue;
7519 if ((uchar_t *)&ifr[1] > mp1->b_wptr) {
7520 if (iocp->ioc_cmd == O_SIOCGIFCONF) {
7521 /* old behaviour */
7522 rw_exit(&ipst->ips_ill_g_lock);
7523 return (EINVAL);
7524 } else {
7525 goto if_copydone;
7526 }
7527 }
7528 ipif_get_name(ipif, ifr->ifr_name,
7529 sizeof (ifr->ifr_name));
7530 sin = (sin_t *)&ifr->ifr_addr;
7531 *sin = sin_null;
7532 sin->sin_family = AF_INET;
7533 sin->sin_addr.s_addr = ipif->ipif_lcl_addr;
7534 ifr++;
7535 }
7536 }
7537 if_copydone:
7538 rw_exit(&ipst->ips_ill_g_lock);
7539 mp1->b_wptr = (uchar_t *)ifr;
7540
7541 if (STRUCT_BUF(ifc) != NULL) {
7542 STRUCT_FSET(ifc, ifc_len,
7543 (int)((uchar_t *)ifr - mp1->b_rptr));
7544 }
7545 return (0);
7546 }
7547
7548 /*
7549 * Get the interfaces using the address hosted on the interface passed in,
7550 * as a source adddress
7551 */
7552 /* ARGSUSED */
7553 int
7554 ip_sioctl_get_lifsrcof(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q,
7555 mblk_t *mp, ip_ioctl_cmd_t *ipip, void *ifreq)
7556 {
7557 mblk_t *mp1;
7558 ill_t *ill, *ill_head;
7559 ipif_t *ipif, *orig_ipif;
7560 int numlifs = 0;
7561 size_t lifs_bufsize, lifsmaxlen;
7562 struct lifreq *lifr;
7563 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
7564 uint_t ifindex;
7565 zoneid_t zoneid;
7566 boolean_t isv6 = B_FALSE;
7567 struct sockaddr_in *sin;
7568 struct sockaddr_in6 *sin6;
7569 STRUCT_HANDLE(lifsrcof, lifs);
7570 ip_stack_t *ipst;
7571
7572 ipst = CONNQ_TO_IPST(q);
7573
7574 ASSERT(q->q_next == NULL);
7575
7576 zoneid = Q_TO_CONN(q)->conn_zoneid;
7577
7578 /* Existence verified in ip_wput_nondata */
7579 mp1 = mp->b_cont->b_cont;
7580
7581 /*
7582 * Must be (better be!) continuation of a TRANSPARENT
7583 * IOCTL. We just copied in the lifsrcof structure.
7584 */
7585 STRUCT_SET_HANDLE(lifs, iocp->ioc_flag,
7586 (struct lifsrcof *)mp1->b_rptr);
7587
7588 if (MBLKL(mp1) != STRUCT_SIZE(lifs))
7589 return (EINVAL);
7590
7591 ifindex = STRUCT_FGET(lifs, lifs_ifindex);
7592 isv6 = (Q_TO_CONN(q))->conn_family == AF_INET6;
7593 ipif = ipif_lookup_on_ifindex(ifindex, isv6, zoneid, ipst);
7594 if (ipif == NULL) {
7595 ip1dbg(("ip_sioctl_get_lifsrcof: no ipif for ifindex %d\n",
7596 ifindex));
7597 return (ENXIO);
7598 }
7599
7600 /* Allocate a buffer to hold requested information */
7601 numlifs = ip_get_lifsrcofnum(ipif->ipif_ill);
7602 lifs_bufsize = numlifs * sizeof (struct lifreq);
7603 lifsmaxlen = STRUCT_FGET(lifs, lifs_maxlen);
7604 /* The actual size needed is always returned in lifs_len */
7605 STRUCT_FSET(lifs, lifs_len, lifs_bufsize);
7606
7607 /* If the amount we need is more than what is passed in, abort */
7608 if (lifs_bufsize > lifsmaxlen || lifs_bufsize == 0) {
7609 ipif_refrele(ipif);
7610 return (0);
7611 }
7612
7613 mp1 = mi_copyout_alloc(q, mp,
7614 STRUCT_FGETP(lifs, lifs_buf), lifs_bufsize, B_FALSE);
7615 if (mp1 == NULL) {
7616 ipif_refrele(ipif);
7617 return (ENOMEM);
7618 }
7619
7620 mp1->b_wptr = mp1->b_rptr + lifs_bufsize;
7621 bzero(mp1->b_rptr, lifs_bufsize);
7622
7623 lifr = (struct lifreq *)mp1->b_rptr;
7624
7625 ill = ill_head = ipif->ipif_ill;
7626 orig_ipif = ipif;
7627
7628 /* ill_g_usesrc_lock protects ill_usesrc_grp_next */
7629 rw_enter(&ipst->ips_ill_g_usesrc_lock, RW_READER);
7630 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
7631
7632 ill = ill->ill_usesrc_grp_next; /* start from next ill */
7633 for (; (ill != NULL) && (ill != ill_head);
7634 ill = ill->ill_usesrc_grp_next) {
7635
7636 if ((uchar_t *)&lifr[1] > mp1->b_wptr)
7637 break;
7638
7639 ipif = ill->ill_ipif;
7640 ipif_get_name(ipif, lifr->lifr_name, sizeof (lifr->lifr_name));
7641 if (ipif->ipif_isv6) {
7642 sin6 = (sin6_t *)&lifr->lifr_addr;
7643 *sin6 = sin6_null;
7644 sin6->sin6_family = AF_INET6;
7645 sin6->sin6_addr = ipif->ipif_v6lcl_addr;
7646 lifr->lifr_addrlen = ip_mask_to_plen_v6(
7647 &ipif->ipif_v6net_mask);
7648 } else {
7649 sin = (sin_t *)&lifr->lifr_addr;
7650 *sin = sin_null;
7651 sin->sin_family = AF_INET;
7652 sin->sin_addr.s_addr = ipif->ipif_lcl_addr;
7653 lifr->lifr_addrlen = ip_mask_to_plen(
7654 ipif->ipif_net_mask);
7655 }
7656 lifr++;
7657 }
7658 rw_exit(&ipst->ips_ill_g_lock);
7659 rw_exit(&ipst->ips_ill_g_usesrc_lock);
7660 ipif_refrele(orig_ipif);
7661 mp1->b_wptr = (uchar_t *)lifr;
7662 STRUCT_FSET(lifs, lifs_len, (int)((uchar_t *)lifr - mp1->b_rptr));
7663
7664 return (0);
7665 }
7666
7667 /* ARGSUSED */
7668 int
7669 ip_sioctl_get_lifconf(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q,
7670 mblk_t *mp, ip_ioctl_cmd_t *ipip, void *ifreq)
7671 {
7672 mblk_t *mp1;
7673 int list;
7674 ill_t *ill;
7675 ipif_t *ipif;
7676 int flags;
7677 int numlifs = 0;
7678 size_t lifc_bufsize;
7679 struct lifreq *lifr;
7680 sa_family_t family;
7681 struct sockaddr_in *sin;
7682 struct sockaddr_in6 *sin6;
7683 ill_walk_context_t ctx;
7684 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
7685 int32_t lifclen;
7686 zoneid_t zoneid;
7687 STRUCT_HANDLE(lifconf, lifc);
7688 ip_stack_t *ipst = CONNQ_TO_IPST(q);
7689
7690 ip1dbg(("ip_sioctl_get_lifconf"));
7691
7692 ASSERT(q->q_next == NULL);
7693
7694 zoneid = Q_TO_CONN(q)->conn_zoneid;
7695
7696 /* Existence verified in ip_wput_nondata */
7697 mp1 = mp->b_cont->b_cont;
7698
7699 /*
7700 * An extended version of SIOCGIFCONF that takes an
7701 * additional address family and flags field.
7702 * AF_UNSPEC retrieve both IPv4 and IPv6.
7703 * Unless LIFC_NOXMIT is specified the IPIF_NOXMIT
7704 * interfaces are omitted.
7705 * Similarly, IPIF_TEMPORARY interfaces are omitted
7706 * unless LIFC_TEMPORARY is specified.
7707 * If LIFC_EXTERNAL_SOURCE is specified, IPIF_NOXMIT,
7708 * IPIF_NOLOCAL, PHYI_LOOPBACK, IPIF_DEPRECATED and
7709 * not IPIF_UP interfaces are omitted. LIFC_EXTERNAL_SOURCE
7710 * has priority over LIFC_NOXMIT.
7711 */
7712 STRUCT_SET_HANDLE(lifc, iocp->ioc_flag, NULL);
7713
7714 if ((mp1->b_wptr - mp1->b_rptr) != STRUCT_SIZE(lifc))
7715 return (EINVAL);
7716
7717 /*
7718 * Must be (better be!) continuation of a TRANSPARENT
7719 * IOCTL. We just copied in the lifconf structure.
7720 */
7721 STRUCT_SET_HANDLE(lifc, iocp->ioc_flag, (struct lifconf *)mp1->b_rptr);
7722
7723 family = STRUCT_FGET(lifc, lifc_family);
7724 flags = STRUCT_FGET(lifc, lifc_flags);
7725
7726 switch (family) {
7727 case AF_UNSPEC:
7728 /*
7729 * walk all ILL's.
7730 */
7731 list = MAX_G_HEADS;
7732 break;
7733 case AF_INET:
7734 /*
7735 * walk only IPV4 ILL's.
7736 */
7737 list = IP_V4_G_HEAD;
7738 break;
7739 case AF_INET6:
7740 /*
7741 * walk only IPV6 ILL's.
7742 */
7743 list = IP_V6_G_HEAD;
7744 break;
7745 default:
7746 return (EAFNOSUPPORT);
7747 }
7748
7749 /*
7750 * Allocate a buffer to hold requested information.
7751 *
7752 * If lifc_len is larger than what is needed, we only
7753 * allocate what we will use.
7754 *
7755 * If lifc_len is smaller than what is needed, return
7756 * EINVAL.
7757 */
7758 numlifs = ip_get_numlifs(family, flags, zoneid, ipst);
7759 lifc_bufsize = numlifs * sizeof (struct lifreq);
7760 lifclen = STRUCT_FGET(lifc, lifc_len);
7761 if (lifc_bufsize > lifclen) {
7762 if (iocp->ioc_cmd == O_SIOCGLIFCONF)
7763 return (EINVAL);
7764 else
7765 lifc_bufsize = lifclen;
7766 }
7767
7768 mp1 = mi_copyout_alloc(q, mp,
7769 STRUCT_FGETP(lifc, lifc_buf), lifc_bufsize, B_FALSE);
7770 if (mp1 == NULL)
7771 return (ENOMEM);
7772
7773 mp1->b_wptr = mp1->b_rptr + lifc_bufsize;
7774 bzero(mp1->b_rptr, mp1->b_wptr - mp1->b_rptr);
7775
7776 lifr = (struct lifreq *)mp1->b_rptr;
7777
7778 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
7779 ill = ill_first(list, list, &ctx, ipst);
7780 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
7781 if (IS_UNDER_IPMP(ill) && !(flags & LIFC_UNDER_IPMP))
7782 continue;
7783
7784 for (ipif = ill->ill_ipif; ipif != NULL;
7785 ipif = ipif->ipif_next) {
7786 if ((ipif->ipif_flags & IPIF_NOXMIT) &&
7787 !(flags & LIFC_NOXMIT))
7788 continue;
7789
7790 if ((ipif->ipif_flags & IPIF_TEMPORARY) &&
7791 !(flags & LIFC_TEMPORARY))
7792 continue;
7793
7794 if (((ipif->ipif_flags &
7795 (IPIF_NOXMIT|IPIF_NOLOCAL|
7796 IPIF_DEPRECATED)) ||
7797 IS_LOOPBACK(ill) ||
7798 !(ipif->ipif_flags & IPIF_UP)) &&
7799 (flags & LIFC_EXTERNAL_SOURCE))
7800 continue;
7801
7802 if (zoneid != ipif->ipif_zoneid &&
7803 ipif->ipif_zoneid != ALL_ZONES &&
7804 (zoneid != GLOBAL_ZONEID ||
7805 !(flags & LIFC_ALLZONES)))
7806 continue;
7807
7808 if ((uchar_t *)&lifr[1] > mp1->b_wptr) {
7809 if (iocp->ioc_cmd == O_SIOCGLIFCONF) {
7810 rw_exit(&ipst->ips_ill_g_lock);
7811 return (EINVAL);
7812 } else {
7813 goto lif_copydone;
7814 }
7815 }
7816
7817 ipif_get_name(ipif, lifr->lifr_name,
7818 sizeof (lifr->lifr_name));
7819 lifr->lifr_type = ill->ill_type;
7820 if (ipif->ipif_isv6) {
7821 sin6 = (sin6_t *)&lifr->lifr_addr;
7822 *sin6 = sin6_null;
7823 sin6->sin6_family = AF_INET6;
7824 sin6->sin6_addr =
7825 ipif->ipif_v6lcl_addr;
7826 lifr->lifr_addrlen =
7827 ip_mask_to_plen_v6(
7828 &ipif->ipif_v6net_mask);
7829 } else {
7830 sin = (sin_t *)&lifr->lifr_addr;
7831 *sin = sin_null;
7832 sin->sin_family = AF_INET;
7833 sin->sin_addr.s_addr =
7834 ipif->ipif_lcl_addr;
7835 lifr->lifr_addrlen =
7836 ip_mask_to_plen(
7837 ipif->ipif_net_mask);
7838 }
7839 lifr++;
7840 }
7841 }
7842 lif_copydone:
7843 rw_exit(&ipst->ips_ill_g_lock);
7844
7845 mp1->b_wptr = (uchar_t *)lifr;
7846 if (STRUCT_BUF(lifc) != NULL) {
7847 STRUCT_FSET(lifc, lifc_len,
7848 (int)((uchar_t *)lifr - mp1->b_rptr));
7849 }
7850 return (0);
7851 }
7852
7853 static void
7854 ip_sioctl_ip6addrpolicy(queue_t *q, mblk_t *mp)
7855 {
7856 ip6_asp_t *table;
7857 size_t table_size;
7858 mblk_t *data_mp;
7859 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
7860 ip_stack_t *ipst;
7861
7862 if (q->q_next == NULL)
7863 ipst = CONNQ_TO_IPST(q);
7864 else
7865 ipst = ILLQ_TO_IPST(q);
7866
7867 /* These two ioctls are I_STR only */
7868 if (iocp->ioc_count == TRANSPARENT) {
7869 miocnak(q, mp, 0, EINVAL);
7870 return;
7871 }
7872
7873 data_mp = mp->b_cont;
7874 if (data_mp == NULL) {
7875 /* The user passed us a NULL argument */
7876 table = NULL;
7877 table_size = iocp->ioc_count;
7878 } else {
7879 /*
7880 * The user provided a table. The stream head
7881 * may have copied in the user data in chunks,
7882 * so make sure everything is pulled up
7883 * properly.
7884 */
7885 if (MBLKL(data_mp) < iocp->ioc_count) {
7886 mblk_t *new_data_mp;
7887 if ((new_data_mp = msgpullup(data_mp, -1)) ==
7888 NULL) {
7889 miocnak(q, mp, 0, ENOMEM);
7890 return;
7891 }
7892 freemsg(data_mp);
7893 data_mp = new_data_mp;
7894 mp->b_cont = data_mp;
7895 }
7896 table = (ip6_asp_t *)data_mp->b_rptr;
7897 table_size = iocp->ioc_count;
7898 }
7899
7900 switch (iocp->ioc_cmd) {
7901 case SIOCGIP6ADDRPOLICY:
7902 iocp->ioc_rval = ip6_asp_get(table, table_size, ipst);
7903 if (iocp->ioc_rval == -1)
7904 iocp->ioc_error = EINVAL;
7905 #if defined(_SYSCALL32_IMPL) && _LONG_LONG_ALIGNMENT_32 == 4
7906 else if (table != NULL &&
7907 (iocp->ioc_flag & IOC_MODELS) == IOC_ILP32) {
7908 ip6_asp_t *src = table;
7909 ip6_asp32_t *dst = (void *)table;
7910 int count = table_size / sizeof (ip6_asp_t);
7911 int i;
7912
7913 /*
7914 * We need to do an in-place shrink of the array
7915 * to match the alignment attributes of the
7916 * 32-bit ABI looking at it.
7917 */
7918 /* LINTED: logical expression always true: op "||" */
7919 ASSERT(sizeof (*src) > sizeof (*dst));
7920 for (i = 1; i < count; i++)
7921 bcopy(src + i, dst + i, sizeof (*dst));
7922 }
7923 #endif
7924 break;
7925
7926 case SIOCSIP6ADDRPOLICY:
7927 ASSERT(mp->b_prev == NULL);
7928 mp->b_prev = (void *)q;
7929 #if defined(_SYSCALL32_IMPL) && _LONG_LONG_ALIGNMENT_32 == 4
7930 /*
7931 * We pass in the datamodel here so that the ip6_asp_replace()
7932 * routine can handle converting from 32-bit to native formats
7933 * where necessary.
7934 *
7935 * A better way to handle this might be to convert the inbound
7936 * data structure here, and hang it off a new 'mp'; thus the
7937 * ip6_asp_replace() logic would always be dealing with native
7938 * format data structures..
7939 *
7940 * (An even simpler way to handle these ioctls is to just
7941 * add a 32-bit trailing 'pad' field to the ip6_asp_t structure
7942 * and just recompile everything that depends on it.)
7943 */
7944 #endif
7945 ip6_asp_replace(mp, table, table_size, B_FALSE, ipst,
7946 iocp->ioc_flag & IOC_MODELS);
7947 return;
7948 }
7949
7950 DB_TYPE(mp) = (iocp->ioc_error == 0) ? M_IOCACK : M_IOCNAK;
7951 qreply(q, mp);
7952 }
7953
7954 static void
7955 ip_sioctl_dstinfo(queue_t *q, mblk_t *mp)
7956 {
7957 mblk_t *data_mp;
7958 struct dstinforeq *dir;
7959 uint8_t *end, *cur;
7960 in6_addr_t *daddr, *saddr;
7961 ipaddr_t v4daddr;
7962 ire_t *ire;
7963 ipaddr_t v4setsrc;
7964 in6_addr_t v6setsrc;
7965 char *slabel, *dlabel;
7966 boolean_t isipv4;
7967 int match_ire;
7968 ill_t *dst_ill;
7969 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
7970 conn_t *connp = Q_TO_CONN(q);
7971 zoneid_t zoneid = IPCL_ZONEID(connp);
7972 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
7973 uint64_t ipif_flags;
7974
7975 ASSERT(q->q_next == NULL); /* this ioctl not allowed if ip is module */
7976
7977 /*
7978 * This ioctl is I_STR only, and must have a
7979 * data mblk following the M_IOCTL mblk.
7980 */
7981 data_mp = mp->b_cont;
7982 if (iocp->ioc_count == TRANSPARENT || data_mp == NULL) {
7983 miocnak(q, mp, 0, EINVAL);
7984 return;
7985 }
7986
7987 if (MBLKL(data_mp) < iocp->ioc_count) {
7988 mblk_t *new_data_mp;
7989
7990 if ((new_data_mp = msgpullup(data_mp, -1)) == NULL) {
7991 miocnak(q, mp, 0, ENOMEM);
7992 return;
7993 }
7994 freemsg(data_mp);
7995 data_mp = new_data_mp;
7996 mp->b_cont = data_mp;
7997 }
7998 match_ire = MATCH_IRE_DSTONLY;
7999
8000 for (cur = data_mp->b_rptr, end = data_mp->b_wptr;
8001 end - cur >= sizeof (struct dstinforeq);
8002 cur += sizeof (struct dstinforeq)) {
8003 dir = (struct dstinforeq *)cur;
8004 daddr = &dir->dir_daddr;
8005 saddr = &dir->dir_saddr;
8006
8007 /*
8008 * ip_addr_scope_v6() and ip6_asp_lookup() handle
8009 * v4 mapped addresses; ire_ftable_lookup_v6()
8010 * and ip_select_source_v6() do not.
8011 */
8012 dir->dir_dscope = ip_addr_scope_v6(daddr);
8013 dlabel = ip6_asp_lookup(daddr, &dir->dir_precedence, ipst);
8014
8015 isipv4 = IN6_IS_ADDR_V4MAPPED(daddr);
8016 if (isipv4) {
8017 IN6_V4MAPPED_TO_IPADDR(daddr, v4daddr);
8018 v4setsrc = INADDR_ANY;
8019 ire = ire_route_recursive_v4(v4daddr, 0, NULL, zoneid,
8020 NULL, match_ire, IRR_ALLOCATE, 0, ipst, &v4setsrc,
8021 NULL, NULL);
8022 } else {
8023 v6setsrc = ipv6_all_zeros;
8024 ire = ire_route_recursive_v6(daddr, 0, NULL, zoneid,
8025 NULL, match_ire, IRR_ALLOCATE, 0, ipst, &v6setsrc,
8026 NULL, NULL);
8027 }
8028 ASSERT(ire != NULL);
8029 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
8030 ire_refrele(ire);
8031 dir->dir_dreachable = 0;
8032
8033 /* move on to next dst addr */
8034 continue;
8035 }
8036 dir->dir_dreachable = 1;
8037
8038 dst_ill = ire_nexthop_ill(ire);
8039 if (dst_ill == NULL) {
8040 ire_refrele(ire);
8041 continue;
8042 }
8043
8044 /* With ipmp we most likely look at the ipmp ill here */
8045 dir->dir_dmactype = dst_ill->ill_mactype;
8046
8047 if (isipv4) {
8048 ipaddr_t v4saddr;
8049
8050 if (ip_select_source_v4(dst_ill, v4setsrc, v4daddr,
8051 connp->conn_ixa->ixa_multicast_ifaddr, zoneid, ipst,
8052 &v4saddr, NULL, &ipif_flags) != 0) {
8053 v4saddr = INADDR_ANY;
8054 ipif_flags = 0;
8055 }
8056 IN6_IPADDR_TO_V4MAPPED(v4saddr, saddr);
8057 } else {
8058 if (ip_select_source_v6(dst_ill, &v6setsrc, daddr,
8059 zoneid, ipst, B_FALSE, IPV6_PREFER_SRC_DEFAULT,
8060 saddr, NULL, &ipif_flags) != 0) {
8061 *saddr = ipv6_all_zeros;
8062 ipif_flags = 0;
8063 }
8064 }
8065
8066 dir->dir_sscope = ip_addr_scope_v6(saddr);
8067 slabel = ip6_asp_lookup(saddr, NULL, ipst);
8068 dir->dir_labelmatch = ip6_asp_labelcmp(dlabel, slabel);
8069 dir->dir_sdeprecated = (ipif_flags & IPIF_DEPRECATED) ? 1 : 0;
8070 ire_refrele(ire);
8071 ill_refrele(dst_ill);
8072 }
8073 miocack(q, mp, iocp->ioc_count, 0);
8074 }
8075
8076 /*
8077 * Check if this is an address assigned to this machine.
8078 * Skips interfaces that are down by using ire checks.
8079 * Translates mapped addresses to v4 addresses and then
8080 * treats them as such, returning true if the v4 address
8081 * associated with this mapped address is configured.
8082 * Note: Applications will have to be careful what they do
8083 * with the response; use of mapped addresses limits
8084 * what can be done with the socket, especially with
8085 * respect to socket options and ioctls - neither IPv4
8086 * options nor IPv6 sticky options/ancillary data options
8087 * may be used.
8088 */
8089 /* ARGSUSED */
8090 int
8091 ip_sioctl_tmyaddr(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
8092 ip_ioctl_cmd_t *ipip, void *dummy_ifreq)
8093 {
8094 struct sioc_addrreq *sia;
8095 sin_t *sin;
8096 ire_t *ire;
8097 mblk_t *mp1;
8098 zoneid_t zoneid;
8099 ip_stack_t *ipst;
8100
8101 ip1dbg(("ip_sioctl_tmyaddr"));
8102
8103 ASSERT(q->q_next == NULL); /* this ioctl not allowed if ip is module */
8104 zoneid = Q_TO_CONN(q)->conn_zoneid;
8105 ipst = CONNQ_TO_IPST(q);
8106
8107 /* Existence verified in ip_wput_nondata */
8108 mp1 = mp->b_cont->b_cont;
8109 sia = (struct sioc_addrreq *)mp1->b_rptr;
8110 sin = (sin_t *)&sia->sa_addr;
8111 switch (sin->sin_family) {
8112 case AF_INET6: {
8113 sin6_t *sin6 = (sin6_t *)sin;
8114
8115 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
8116 ipaddr_t v4_addr;
8117
8118 IN6_V4MAPPED_TO_IPADDR(&sin6->sin6_addr,
8119 v4_addr);
8120 ire = ire_ftable_lookup_v4(v4_addr, 0, 0,
8121 IRE_LOCAL|IRE_LOOPBACK, NULL, zoneid, NULL,
8122 MATCH_IRE_TYPE | MATCH_IRE_ZONEONLY, 0, ipst, NULL);
8123 } else {
8124 in6_addr_t v6addr;
8125
8126 v6addr = sin6->sin6_addr;
8127 ire = ire_ftable_lookup_v6(&v6addr, 0, 0,
8128 IRE_LOCAL|IRE_LOOPBACK, NULL, zoneid, NULL,
8129 MATCH_IRE_TYPE | MATCH_IRE_ZONEONLY, 0, ipst, NULL);
8130 }
8131 break;
8132 }
8133 case AF_INET: {
8134 ipaddr_t v4addr;
8135
8136 v4addr = sin->sin_addr.s_addr;
8137 ire = ire_ftable_lookup_v4(v4addr, 0, 0,
8138 IRE_LOCAL|IRE_LOOPBACK, NULL, zoneid,
8139 NULL, MATCH_IRE_TYPE | MATCH_IRE_ZONEONLY, 0, ipst, NULL);
8140 break;
8141 }
8142 default:
8143 return (EAFNOSUPPORT);
8144 }
8145 if (ire != NULL) {
8146 sia->sa_res = 1;
8147 ire_refrele(ire);
8148 } else {
8149 sia->sa_res = 0;
8150 }
8151 return (0);
8152 }
8153
8154 /*
8155 * Check if this is an address assigned on-link i.e. neighbor,
8156 * and makes sure it's reachable from the current zone.
8157 * Returns true for my addresses as well.
8158 * Translates mapped addresses to v4 addresses and then
8159 * treats them as such, returning true if the v4 address
8160 * associated with this mapped address is configured.
8161 * Note: Applications will have to be careful what they do
8162 * with the response; use of mapped addresses limits
8163 * what can be done with the socket, especially with
8164 * respect to socket options and ioctls - neither IPv4
8165 * options nor IPv6 sticky options/ancillary data options
8166 * may be used.
8167 */
8168 /* ARGSUSED */
8169 int
8170 ip_sioctl_tonlink(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
8171 ip_ioctl_cmd_t *ipip, void *duymmy_ifreq)
8172 {
8173 struct sioc_addrreq *sia;
8174 sin_t *sin;
8175 mblk_t *mp1;
8176 ire_t *ire = NULL;
8177 zoneid_t zoneid;
8178 ip_stack_t *ipst;
8179
8180 ip1dbg(("ip_sioctl_tonlink"));
8181
8182 ASSERT(q->q_next == NULL); /* this ioctl not allowed if ip is module */
8183 zoneid = Q_TO_CONN(q)->conn_zoneid;
8184 ipst = CONNQ_TO_IPST(q);
8185
8186 /* Existence verified in ip_wput_nondata */
8187 mp1 = mp->b_cont->b_cont;
8188 sia = (struct sioc_addrreq *)mp1->b_rptr;
8189 sin = (sin_t *)&sia->sa_addr;
8190
8191 /*
8192 * We check for IRE_ONLINK and exclude IRE_BROADCAST|IRE_MULTICAST
8193 * to make sure we only look at on-link unicast address.
8194 */
8195 switch (sin->sin_family) {
8196 case AF_INET6: {
8197 sin6_t *sin6 = (sin6_t *)sin;
8198
8199 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
8200 ipaddr_t v4_addr;
8201
8202 IN6_V4MAPPED_TO_IPADDR(&sin6->sin6_addr,
8203 v4_addr);
8204 if (!CLASSD(v4_addr)) {
8205 ire = ire_ftable_lookup_v4(v4_addr, 0, 0, 0,
8206 NULL, zoneid, NULL, MATCH_IRE_DSTONLY,
8207 0, ipst, NULL);
8208 }
8209 } else {
8210 in6_addr_t v6addr;
8211
8212 v6addr = sin6->sin6_addr;
8213 if (!IN6_IS_ADDR_MULTICAST(&v6addr)) {
8214 ire = ire_ftable_lookup_v6(&v6addr, 0, 0, 0,
8215 NULL, zoneid, NULL, MATCH_IRE_DSTONLY, 0,
8216 ipst, NULL);
8217 }
8218 }
8219 break;
8220 }
8221 case AF_INET: {
8222 ipaddr_t v4addr;
8223
8224 v4addr = sin->sin_addr.s_addr;
8225 if (!CLASSD(v4addr)) {
8226 ire = ire_ftable_lookup_v4(v4addr, 0, 0, 0, NULL,
8227 zoneid, NULL, MATCH_IRE_DSTONLY, 0, ipst, NULL);
8228 }
8229 break;
8230 }
8231 default:
8232 return (EAFNOSUPPORT);
8233 }
8234 sia->sa_res = 0;
8235 if (ire != NULL) {
8236 ASSERT(!(ire->ire_type & IRE_MULTICAST));
8237
8238 if ((ire->ire_type & IRE_ONLINK) &&
8239 !(ire->ire_type & IRE_BROADCAST))
8240 sia->sa_res = 1;
8241 ire_refrele(ire);
8242 }
8243 return (0);
8244 }
8245
8246 /*
8247 * TBD: implement when kernel maintaines a list of site prefixes.
8248 */
8249 /* ARGSUSED */
8250 int
8251 ip_sioctl_tmysite(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
8252 ip_ioctl_cmd_t *ipip, void *ifreq)
8253 {
8254 return (ENXIO);
8255 }
8256
8257 /* ARP IOCTLs. */
8258 /* ARGSUSED */
8259 int
8260 ip_sioctl_arp(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
8261 ip_ioctl_cmd_t *ipip, void *dummy_ifreq)
8262 {
8263 int err;
8264 ipaddr_t ipaddr;
8265 struct iocblk *iocp;
8266 conn_t *connp;
8267 struct arpreq *ar;
8268 struct xarpreq *xar;
8269 int arp_flags, flags, alength;
8270 uchar_t *lladdr;
8271 ip_stack_t *ipst;
8272 ill_t *ill = ipif->ipif_ill;
8273 ill_t *proxy_ill = NULL;
8274 ipmp_arpent_t *entp = NULL;
8275 boolean_t proxyarp = B_FALSE;
8276 boolean_t if_arp_ioctl = B_FALSE;
8277 ncec_t *ncec = NULL;
8278 nce_t *nce;
8279
8280 ASSERT(!(q->q_flag & QREADR) && q->q_next == NULL);
8281 connp = Q_TO_CONN(q);
8282 ipst = connp->conn_netstack->netstack_ip;
8283 iocp = (struct iocblk *)mp->b_rptr;
8284
8285 if (ipip->ipi_cmd_type == XARP_CMD) {
8286 /* We have a chain - M_IOCTL-->MI_COPY_MBLK-->XARPREQ_MBLK */
8287 xar = (struct xarpreq *)mp->b_cont->b_cont->b_rptr;
8288 ar = NULL;
8289
8290 arp_flags = xar->xarp_flags;
8291 lladdr = (uchar_t *)LLADDR(&xar->xarp_ha);
8292 if_arp_ioctl = (xar->xarp_ha.sdl_nlen != 0);
8293 /*
8294 * Validate against user's link layer address length
8295 * input and name and addr length limits.
8296 */
8297 alength = ill->ill_phys_addr_length;
8298 if (ipip->ipi_cmd == SIOCSXARP) {
8299 if (alength != xar->xarp_ha.sdl_alen ||
8300 (alength + xar->xarp_ha.sdl_nlen >
8301 sizeof (xar->xarp_ha.sdl_data)))
8302 return (EINVAL);
8303 }
8304 } else {
8305 /* We have a chain - M_IOCTL-->MI_COPY_MBLK-->ARPREQ_MBLK */
8306 ar = (struct arpreq *)mp->b_cont->b_cont->b_rptr;
8307 xar = NULL;
8308
8309 arp_flags = ar->arp_flags;
8310 lladdr = (uchar_t *)ar->arp_ha.sa_data;
8311 /*
8312 * Theoretically, the sa_family could tell us what link
8313 * layer type this operation is trying to deal with. By
8314 * common usage AF_UNSPEC means ethernet. We'll assume
8315 * any attempt to use the SIOC?ARP ioctls is for ethernet,
8316 * for now. Our new SIOC*XARP ioctls can be used more
8317 * generally.
8318 *
8319 * If the underlying media happens to have a non 6 byte
8320 * address, arp module will fail set/get, but the del
8321 * operation will succeed.
8322 */
8323 alength = 6;
8324 if ((ipip->ipi_cmd != SIOCDARP) &&
8325 (alength != ill->ill_phys_addr_length)) {
8326 return (EINVAL);
8327 }
8328 }
8329
8330 /* Translate ATF* flags to NCE* flags */
8331 flags = 0;
8332 if (arp_flags & ATF_AUTHORITY)
8333 flags |= NCE_F_AUTHORITY;
8334 if (arp_flags & ATF_PERM)
8335 flags |= NCE_F_NONUD; /* not subject to aging */
8336 if (arp_flags & ATF_PUBL)
8337 flags |= NCE_F_PUBLISH;
8338
8339 /*
8340 * IPMP ARP special handling:
8341 *
8342 * 1. Since ARP mappings must appear consistent across the group,
8343 * prohibit changing ARP mappings on the underlying interfaces.
8344 *
8345 * 2. Since ARP mappings for IPMP data addresses are maintained by
8346 * IP itself, prohibit changing them.
8347 *
8348 * 3. For proxy ARP, use a functioning hardware address in the group,
8349 * provided one exists. If one doesn't, just add the entry as-is;
8350 * ipmp_illgrp_refresh_arpent() will refresh it if things change.
8351 */
8352 if (IS_UNDER_IPMP(ill)) {
8353 if (ipip->ipi_cmd != SIOCGARP && ipip->ipi_cmd != SIOCGXARP)
8354 return (EPERM);
8355 }
8356 if (IS_IPMP(ill)) {
8357 ipmp_illgrp_t *illg = ill->ill_grp;
8358
8359 switch (ipip->ipi_cmd) {
8360 case SIOCSARP:
8361 case SIOCSXARP:
8362 proxy_ill = ipmp_illgrp_find_ill(illg, lladdr, alength);
8363 if (proxy_ill != NULL) {
8364 proxyarp = B_TRUE;
8365 if (!ipmp_ill_is_active(proxy_ill))
8366 proxy_ill = ipmp_illgrp_next_ill(illg);
8367 if (proxy_ill != NULL)
8368 lladdr = proxy_ill->ill_phys_addr;
8369 }
8370 /* FALLTHRU */
8371 }
8372 }
8373
8374 ipaddr = sin->sin_addr.s_addr;
8375 /*
8376 * don't match across illgrp per case (1) and (2).
8377 * XXX use IS_IPMP(ill) like ndp_sioc_update?
8378 */
8379 nce = nce_lookup_v4(ill, &ipaddr);
8380 if (nce != NULL)
8381 ncec = nce->nce_common;
8382
8383 switch (iocp->ioc_cmd) {
8384 case SIOCDARP:
8385 case SIOCDXARP: {
8386 /*
8387 * Delete the NCE if any.
8388 */
8389 if (ncec == NULL) {
8390 iocp->ioc_error = ENXIO;
8391 break;
8392 }
8393 /* Don't allow changes to arp mappings of local addresses. */
8394 if (NCE_MYADDR(ncec)) {
8395 nce_refrele(nce);
8396 return (ENOTSUP);
8397 }
8398 iocp->ioc_error = 0;
8399
8400 /*
8401 * Delete the nce_common which has ncec_ill set to ipmp_ill.
8402 * This will delete all the nce entries on the under_ills.
8403 */
8404 ncec_delete(ncec);
8405 /*
8406 * Once the NCE has been deleted, then the ire_dep* consistency
8407 * mechanism will find any IRE which depended on the now
8408 * condemned NCE (as part of sending packets).
8409 * That mechanism handles redirects by deleting redirects
8410 * that refer to UNREACHABLE nces.
8411 */
8412 break;
8413 }
8414 case SIOCGARP:
8415 case SIOCGXARP:
8416 if (ncec != NULL) {
8417 lladdr = ncec->ncec_lladdr;
8418 flags = ncec->ncec_flags;
8419 iocp->ioc_error = 0;
8420 ip_sioctl_garp_reply(mp, ncec->ncec_ill, lladdr, flags);
8421 } else {
8422 iocp->ioc_error = ENXIO;
8423 }
8424 break;
8425 case SIOCSARP:
8426 case SIOCSXARP:
8427 /* Don't allow changes to arp mappings of local addresses. */
8428 if (ncec != NULL && NCE_MYADDR(ncec)) {
8429 nce_refrele(nce);
8430 return (ENOTSUP);
8431 }
8432
8433 /* static arp entries will undergo NUD if ATF_PERM is not set */
8434 flags |= NCE_F_STATIC;
8435 if (!if_arp_ioctl) {
8436 ip_nce_lookup_and_update(&ipaddr, NULL, ipst,
8437 lladdr, alength, flags);
8438 } else {
8439 ipif_t *ipif = ipif_get_next_ipif(NULL, ill);
8440 if (ipif != NULL) {
8441 ip_nce_lookup_and_update(&ipaddr, ipif, ipst,
8442 lladdr, alength, flags);
8443 ipif_refrele(ipif);
8444 }
8445 }
8446 if (nce != NULL) {
8447 nce_refrele(nce);
8448 nce = NULL;
8449 }
8450 /*
8451 * NCE_F_STATIC entries will be added in state ND_REACHABLE
8452 * by nce_add_common()
8453 */
8454 err = nce_lookup_then_add_v4(ill, lladdr,
8455 ill->ill_phys_addr_length, &ipaddr, flags, ND_UNCHANGED,
8456 &nce);
8457 if (err == EEXIST) {
8458 ncec = nce->nce_common;
8459 mutex_enter(&ncec->ncec_lock);
8460 ncec->ncec_state = ND_REACHABLE;
8461 ncec->ncec_flags = flags;
8462 nce_update(ncec, ND_UNCHANGED, lladdr);
8463 mutex_exit(&ncec->ncec_lock);
8464 err = 0;
8465 }
8466 if (nce != NULL) {
8467 nce_refrele(nce);
8468 nce = NULL;
8469 }
8470 if (IS_IPMP(ill) && err == 0) {
8471 entp = ipmp_illgrp_create_arpent(ill->ill_grp,
8472 proxyarp, ipaddr, lladdr, ill->ill_phys_addr_length,
8473 flags);
8474 if (entp == NULL || (proxyarp && proxy_ill == NULL)) {
8475 iocp->ioc_error = (entp == NULL ? ENOMEM : 0);
8476 break;
8477 }
8478 }
8479 iocp->ioc_error = err;
8480 }
8481
8482 if (nce != NULL) {
8483 nce_refrele(nce);
8484 }
8485
8486 /*
8487 * If we created an IPMP ARP entry, mark that we've notified ARP.
8488 */
8489 if (entp != NULL)
8490 ipmp_illgrp_mark_arpent(ill->ill_grp, entp);
8491
8492 return (iocp->ioc_error);
8493 }
8494
8495 /*
8496 * Parse an [x]arpreq structure coming down SIOC[GSD][X]ARP ioctls, identify
8497 * the associated sin and refhold and return the associated ipif via `ci'.
8498 */
8499 int
8500 ip_extract_arpreq(queue_t *q, mblk_t *mp, const ip_ioctl_cmd_t *ipip,
8501 cmd_info_t *ci)
8502 {
8503 mblk_t *mp1;
8504 sin_t *sin;
8505 conn_t *connp;
8506 ipif_t *ipif;
8507 ire_t *ire = NULL;
8508 ill_t *ill = NULL;
8509 boolean_t exists;
8510 ip_stack_t *ipst;
8511 struct arpreq *ar;
8512 struct xarpreq *xar;
8513 struct sockaddr_dl *sdl;
8514
8515 /* ioctl comes down on a conn */
8516 ASSERT(!(q->q_flag & QREADR) && q->q_next == NULL);
8517 connp = Q_TO_CONN(q);
8518 if (connp->conn_family == AF_INET6)
8519 return (ENXIO);
8520
8521 ipst = connp->conn_netstack->netstack_ip;
8522
8523 /* Verified in ip_wput_nondata */
8524 mp1 = mp->b_cont->b_cont;
8525
8526 if (ipip->ipi_cmd_type == XARP_CMD) {
8527 ASSERT(MBLKL(mp1) >= sizeof (struct xarpreq));
8528 xar = (struct xarpreq *)mp1->b_rptr;
8529 sin = (sin_t *)&xar->xarp_pa;
8530 sdl = &xar->xarp_ha;
8531
8532 if (sdl->sdl_family != AF_LINK || sin->sin_family != AF_INET)
8533 return (ENXIO);
8534 if (sdl->sdl_nlen >= LIFNAMSIZ)
8535 return (EINVAL);
8536 } else {
8537 ASSERT(ipip->ipi_cmd_type == ARP_CMD);
8538 ASSERT(MBLKL(mp1) >= sizeof (struct arpreq));
8539 ar = (struct arpreq *)mp1->b_rptr;
8540 sin = (sin_t *)&ar->arp_pa;
8541 }
8542
8543 if (ipip->ipi_cmd_type == XARP_CMD && sdl->sdl_nlen != 0) {
8544 ipif = ipif_lookup_on_name(sdl->sdl_data, sdl->sdl_nlen,
8545 B_FALSE, &exists, B_FALSE, ALL_ZONES, ipst);
8546 if (ipif == NULL)
8547 return (ENXIO);
8548 if (ipif->ipif_id != 0) {
8549 ipif_refrele(ipif);
8550 return (ENXIO);
8551 }
8552 } else {
8553 /*
8554 * Either an SIOC[DGS]ARP or an SIOC[DGS]XARP with an sdl_nlen
8555 * of 0: use the IP address to find the ipif. If the IP
8556 * address is an IPMP test address, ire_ftable_lookup() will
8557 * find the wrong ill, so we first do an ipif_lookup_addr().
8558 */
8559 ipif = ipif_lookup_addr(sin->sin_addr.s_addr, NULL, ALL_ZONES,
8560 ipst);
8561 if (ipif == NULL) {
8562 ire = ire_ftable_lookup_v4(sin->sin_addr.s_addr,
8563 0, 0, IRE_IF_RESOLVER, NULL, ALL_ZONES,
8564 NULL, MATCH_IRE_TYPE, 0, ipst, NULL);
8565 if (ire == NULL || ((ill = ire->ire_ill) == NULL)) {
8566 if (ire != NULL)
8567 ire_refrele(ire);
8568 return (ENXIO);
8569 }
8570 ASSERT(ire != NULL && ill != NULL);
8571 ipif = ill->ill_ipif;
8572 ipif_refhold(ipif);
8573 ire_refrele(ire);
8574 }
8575 }
8576
8577 if (ipif->ipif_ill->ill_net_type != IRE_IF_RESOLVER) {
8578 ipif_refrele(ipif);
8579 return (ENXIO);
8580 }
8581
8582 ci->ci_sin = sin;
8583 ci->ci_ipif = ipif;
8584 return (0);
8585 }
8586
8587 /*
8588 * Link or unlink the illgrp on IPMP meta-interface `ill' depending on the
8589 * value of `ioccmd'. While an illgrp is linked to an ipmp_grp_t, it is
8590 * accessible from that ipmp_grp_t, which means SIOCSLIFGROUPNAME can look it
8591 * up and thus an ill can join that illgrp.
8592 *
8593 * We use I_PLINK/I_PUNLINK to do the link/unlink operations rather than
8594 * open()/close() primarily because close() is not allowed to fail or block
8595 * forever. On the other hand, I_PUNLINK *can* fail, and there's no reason
8596 * why anyone should ever need to I_PUNLINK an in-use IPMP stream. To ensure
8597 * symmetric behavior (e.g., doing an I_PLINK after and I_PUNLINK undoes the
8598 * I_PUNLINK) we defer linking to I_PLINK. Separately, we also fail attempts
8599 * to I_LINK since I_UNLINK is optional and we'd end up in an inconsistent
8600 * state if I_UNLINK didn't occur.
8601 *
8602 * Note that for each plumb/unplumb operation, we may end up here more than
8603 * once because of the way ifconfig works. However, it's OK to link the same
8604 * illgrp more than once, or unlink an illgrp that's already unlinked.
8605 */
8606 static int
8607 ip_sioctl_plink_ipmp(ill_t *ill, int ioccmd)
8608 {
8609 int err;
8610 ip_stack_t *ipst = ill->ill_ipst;
8611
8612 ASSERT(IS_IPMP(ill));
8613 ASSERT(IAM_WRITER_ILL(ill));
8614
8615 switch (ioccmd) {
8616 case I_LINK:
8617 return (ENOTSUP);
8618
8619 case I_PLINK:
8620 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
8621 ipmp_illgrp_link_grp(ill->ill_grp, ill->ill_phyint->phyint_grp);
8622 rw_exit(&ipst->ips_ipmp_lock);
8623 break;
8624
8625 case I_PUNLINK:
8626 /*
8627 * Require all UP ipifs be brought down prior to unlinking the
8628 * illgrp so any associated IREs (and other state) is torched.
8629 */
8630 if (ill->ill_ipif_up_count + ill->ill_ipif_dup_count > 0)
8631 return (EBUSY);
8632
8633 /*
8634 * NOTE: We hold ipmp_lock across the unlink to prevent a race
8635 * with an SIOCSLIFGROUPNAME request from an ill trying to
8636 * join this group. Specifically: ills trying to join grab
8637 * ipmp_lock and bump a "pending join" counter checked by
8638 * ipmp_illgrp_unlink_grp(). During the unlink no new pending
8639 * joins can occur (since we have ipmp_lock). Once we drop
8640 * ipmp_lock, subsequent SIOCSLIFGROUPNAME requests will not
8641 * find the illgrp (since we unlinked it) and will return
8642 * EAFNOSUPPORT. This will then take them back through the
8643 * IPMP meta-interface plumbing logic in ifconfig, and thus
8644 * back through I_PLINK above.
8645 */
8646 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
8647 err = ipmp_illgrp_unlink_grp(ill->ill_grp);
8648 rw_exit(&ipst->ips_ipmp_lock);
8649 return (err);
8650 default:
8651 break;
8652 }
8653 return (0);
8654 }
8655
8656 /*
8657 * Do I_PLINK/I_LINK or I_PUNLINK/I_UNLINK with consistency checks and also
8658 * atomically set/clear the muxids. Also complete the ioctl by acking or
8659 * naking it. Note that the code is structured such that the link type,
8660 * whether it's persistent or not, is treated equally. ifconfig(1M) and
8661 * its clones use the persistent link, while pppd(1M) and perhaps many
8662 * other daemons may use non-persistent link. When combined with some
8663 * ill_t states, linking and unlinking lower streams may be used as
8664 * indicators of dynamic re-plumbing events [see PSARC/1999/348].
8665 */
8666 /* ARGSUSED */
8667 void
8668 ip_sioctl_plink(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8669 {
8670 mblk_t *mp1;
8671 struct linkblk *li;
8672 int ioccmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd;
8673 int err = 0;
8674
8675 ASSERT(ioccmd == I_PLINK || ioccmd == I_PUNLINK ||
8676 ioccmd == I_LINK || ioccmd == I_UNLINK);
8677
8678 mp1 = mp->b_cont; /* This is the linkblk info */
8679 li = (struct linkblk *)mp1->b_rptr;
8680
8681 err = ip_sioctl_plink_ipmod(ipsq, q, mp, ioccmd, li);
8682 if (err == EINPROGRESS)
8683 return;
8684 if (err == 0)
8685 miocack(q, mp, 0, 0);
8686 else
8687 miocnak(q, mp, 0, err);
8688
8689 /* Conn was refheld in ip_sioctl_copyin_setup */
8690 if (CONN_Q(q)) {
8691 CONN_DEC_IOCTLREF(Q_TO_CONN(q));
8692 CONN_OPER_PENDING_DONE(Q_TO_CONN(q));
8693 }
8694 }
8695
8696 /*
8697 * Process I_{P}LINK and I_{P}UNLINK requests named by `ioccmd' and pointed to
8698 * by `mp' and `li' for the IP module stream (if li->q_bot is in fact an IP
8699 * module stream).
8700 * Returns zero on success, EINPROGRESS if the operation is still pending, or
8701 * an error code on failure.
8702 */
8703 static int
8704 ip_sioctl_plink_ipmod(ipsq_t *ipsq, queue_t *q, mblk_t *mp, int ioccmd,
8705 struct linkblk *li)
8706 {
8707 int err = 0;
8708 ill_t *ill;
8709 queue_t *ipwq, *dwq;
8710 const char *name;
8711 struct qinit *qinfo;
8712 boolean_t islink = (ioccmd == I_PLINK || ioccmd == I_LINK);
8713 boolean_t entered_ipsq = B_FALSE;
8714 boolean_t is_ip = B_FALSE;
8715 arl_t *arl;
8716
8717 /*
8718 * Walk the lower stream to verify it's the IP module stream.
8719 * The IP module is identified by its name, wput function,
8720 * and non-NULL q_next. STREAMS ensures that the lower stream
8721 * (li->l_qbot) will not vanish until this ioctl completes.
8722 */
8723 for (ipwq = li->l_qbot; ipwq != NULL; ipwq = ipwq->q_next) {
8724 qinfo = ipwq->q_qinfo;
8725 name = qinfo->qi_minfo->mi_idname;
8726 if (name != NULL && strcmp(name, ip_mod_info.mi_idname) == 0 &&
8727 qinfo->qi_putp != ip_lwput && ipwq->q_next != NULL) {
8728 is_ip = B_TRUE;
8729 break;
8730 }
8731 if (name != NULL && strcmp(name, arp_mod_info.mi_idname) == 0 &&
8732 qinfo->qi_putp != ip_lwput && ipwq->q_next != NULL) {
8733 break;
8734 }
8735 }
8736
8737 /*
8738 * If this isn't an IP module stream, bail.
8739 */
8740 if (ipwq == NULL)
8741 return (0);
8742
8743 if (!is_ip) {
8744 arl = (arl_t *)ipwq->q_ptr;
8745 ill = arl_to_ill(arl);
8746 if (ill == NULL)
8747 return (0);
8748 } else {
8749 ill = ipwq->q_ptr;
8750 }
8751 ASSERT(ill != NULL);
8752
8753 if (ipsq == NULL) {
8754 ipsq = ipsq_try_enter(NULL, ill, q, mp, ip_sioctl_plink,
8755 NEW_OP, B_FALSE);
8756 if (ipsq == NULL) {
8757 if (!is_ip)
8758 ill_refrele(ill);
8759 return (EINPROGRESS);
8760 }
8761 entered_ipsq = B_TRUE;
8762 }
8763 ASSERT(IAM_WRITER_ILL(ill));
8764 mutex_enter(&ill->ill_lock);
8765 if (!is_ip) {
8766 if (islink && ill->ill_muxid == 0) {
8767 /*
8768 * Plumbing has to be done with IP plumbed first, arp
8769 * second, but here we have arp being plumbed first.
8770 */
8771 mutex_exit(&ill->ill_lock);
8772 if (entered_ipsq)
8773 ipsq_exit(ipsq);
8774 ill_refrele(ill);
8775 return (EINVAL);
8776 }
8777 }
8778 mutex_exit(&ill->ill_lock);
8779 if (!is_ip) {
8780 arl->arl_muxid = islink ? li->l_index : 0;
8781 ill_refrele(ill);
8782 goto done;
8783 }
8784
8785 if (IS_IPMP(ill) && (err = ip_sioctl_plink_ipmp(ill, ioccmd)) != 0)
8786 goto done;
8787
8788 /*
8789 * As part of I_{P}LINKing, stash the number of downstream modules and
8790 * the read queue of the module immediately below IP in the ill.
8791 * These are used during the capability negotiation below.
8792 */
8793 ill->ill_lmod_rq = NULL;
8794 ill->ill_lmod_cnt = 0;
8795 if (islink && ((dwq = ipwq->q_next) != NULL)) {
8796 ill->ill_lmod_rq = RD(dwq);
8797 for (; dwq != NULL; dwq = dwq->q_next)
8798 ill->ill_lmod_cnt++;
8799 }
8800
8801 ill->ill_muxid = islink ? li->l_index : 0;
8802
8803 /*
8804 * Mark the ipsq busy until the capability operations initiated below
8805 * complete. The PLINK/UNLINK ioctl itself completes when our caller
8806 * returns, but the capability operation may complete asynchronously
8807 * much later.
8808 */
8809 ipsq_current_start(ipsq, ill->ill_ipif, ioccmd);
8810 /*
8811 * If there's at least one up ipif on this ill, then we're bound to
8812 * the underlying driver via DLPI. In that case, renegotiate
8813 * capabilities to account for any possible change in modules
8814 * interposed between IP and the driver.
8815 */
8816 if (ill->ill_ipif_up_count > 0) {
8817 if (islink)
8818 ill_capability_probe(ill);
8819 else
8820 ill_capability_reset(ill, B_FALSE);
8821 }
8822 ipsq_current_finish(ipsq);
8823 done:
8824 if (entered_ipsq)
8825 ipsq_exit(ipsq);
8826
8827 return (err);
8828 }
8829
8830 /*
8831 * Search the ioctl command in the ioctl tables and return a pointer
8832 * to the ioctl command information. The ioctl command tables are
8833 * static and fully populated at compile time.
8834 */
8835 ip_ioctl_cmd_t *
8836 ip_sioctl_lookup(int ioc_cmd)
8837 {
8838 int index;
8839 ip_ioctl_cmd_t *ipip;
8840 ip_ioctl_cmd_t *ipip_end;
8841
8842 if (ioc_cmd == IPI_DONTCARE)
8843 return (NULL);
8844
8845 /*
8846 * Do a 2 step search. First search the indexed table
8847 * based on the least significant byte of the ioctl cmd.
8848 * If we don't find a match, then search the misc table
8849 * serially.
8850 */
8851 index = ioc_cmd & 0xFF;
8852 if (index < ip_ndx_ioctl_count) {
8853 ipip = &ip_ndx_ioctl_table[index];
8854 if (ipip->ipi_cmd == ioc_cmd) {
8855 /* Found a match in the ndx table */
8856 return (ipip);
8857 }
8858 }
8859
8860 /* Search the misc table */
8861 ipip_end = &ip_misc_ioctl_table[ip_misc_ioctl_count];
8862 for (ipip = ip_misc_ioctl_table; ipip < ipip_end; ipip++) {
8863 if (ipip->ipi_cmd == ioc_cmd)
8864 /* Found a match in the misc table */
8865 return (ipip);
8866 }
8867
8868 return (NULL);
8869 }
8870
8871 /*
8872 * helper function for ip_sioctl_getsetprop(), which does some sanity checks
8873 */
8874 static boolean_t
8875 getset_ioctl_checks(mblk_t *mp)
8876 {
8877 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
8878 mblk_t *mp1 = mp->b_cont;
8879 mod_ioc_prop_t *pioc;
8880 uint_t flags;
8881 uint_t pioc_size;
8882
8883 /* do sanity checks on various arguments */
8884 if (mp1 == NULL || iocp->ioc_count == 0 ||
8885 iocp->ioc_count == TRANSPARENT) {
8886 return (B_FALSE);
8887 }
8888 if (msgdsize(mp1) < iocp->ioc_count) {
8889 if (!pullupmsg(mp1, iocp->ioc_count))
8890 return (B_FALSE);
8891 }
8892
8893 pioc = (mod_ioc_prop_t *)mp1->b_rptr;
8894
8895 /* sanity checks on mpr_valsize */
8896 pioc_size = sizeof (mod_ioc_prop_t);
8897 if (pioc->mpr_valsize != 0)
8898 pioc_size += pioc->mpr_valsize - 1;
8899
8900 if (iocp->ioc_count != pioc_size)
8901 return (B_FALSE);
8902
8903 flags = pioc->mpr_flags;
8904 if (iocp->ioc_cmd == SIOCSETPROP) {
8905 /*
8906 * One can either reset the value to it's default value or
8907 * change the current value or append/remove the value from
8908 * a multi-valued properties.
8909 */
8910 if ((flags & MOD_PROP_DEFAULT) != MOD_PROP_DEFAULT &&
8911 flags != MOD_PROP_ACTIVE &&
8912 flags != (MOD_PROP_ACTIVE|MOD_PROP_APPEND) &&
8913 flags != (MOD_PROP_ACTIVE|MOD_PROP_REMOVE))
8914 return (B_FALSE);
8915 } else {
8916 ASSERT(iocp->ioc_cmd == SIOCGETPROP);
8917
8918 /*
8919 * One can retrieve only one kind of property information
8920 * at a time.
8921 */
8922 if ((flags & MOD_PROP_ACTIVE) != MOD_PROP_ACTIVE &&
8923 (flags & MOD_PROP_DEFAULT) != MOD_PROP_DEFAULT &&
8924 (flags & MOD_PROP_POSSIBLE) != MOD_PROP_POSSIBLE &&
8925 (flags & MOD_PROP_PERM) != MOD_PROP_PERM)
8926 return (B_FALSE);
8927 }
8928
8929 return (B_TRUE);
8930 }
8931
8932 /*
8933 * process the SIOC{SET|GET}PROP ioctl's
8934 */
8935 /* ARGSUSED */
8936 static void
8937 ip_sioctl_getsetprop(queue_t *q, mblk_t *mp)
8938 {
8939 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
8940 mblk_t *mp1 = mp->b_cont;
8941 mod_ioc_prop_t *pioc;
8942 mod_prop_info_t *ptbl = NULL, *pinfo = NULL;
8943 ip_stack_t *ipst;
8944 netstack_t *stack;
8945 cred_t *cr;
8946 boolean_t set;
8947 int err;
8948
8949 ASSERT(q->q_next == NULL);
8950 ASSERT(CONN_Q(q));
8951
8952 if (!getset_ioctl_checks(mp)) {
8953 miocnak(q, mp, 0, EINVAL);
8954 return;
8955 }
8956 ipst = CONNQ_TO_IPST(q);
8957 stack = ipst->ips_netstack;
8958 pioc = (mod_ioc_prop_t *)mp1->b_rptr;
8959
8960 switch (pioc->mpr_proto) {
8961 case MOD_PROTO_IP:
8962 case MOD_PROTO_IPV4:
8963 case MOD_PROTO_IPV6:
8964 ptbl = ipst->ips_propinfo_tbl;
8965 break;
8966 case MOD_PROTO_RAWIP:
8967 ptbl = stack->netstack_icmp->is_propinfo_tbl;
8968 break;
8969 case MOD_PROTO_TCP:
8970 ptbl = stack->netstack_tcp->tcps_propinfo_tbl;
8971 break;
8972 case MOD_PROTO_UDP:
8973 ptbl = stack->netstack_udp->us_propinfo_tbl;
8974 break;
8975 case MOD_PROTO_SCTP:
8976 ptbl = stack->netstack_sctp->sctps_propinfo_tbl;
8977 break;
8978 default:
8979 miocnak(q, mp, 0, EINVAL);
8980 return;
8981 }
8982
8983 pinfo = mod_prop_lookup(ptbl, pioc->mpr_name, pioc->mpr_proto);
8984 if (pinfo == NULL) {
8985 miocnak(q, mp, 0, ENOENT);
8986 return;
8987 }
8988
8989 set = (iocp->ioc_cmd == SIOCSETPROP) ? B_TRUE : B_FALSE;
8990 if (set && pinfo->mpi_setf != NULL) {
8991 cr = msg_getcred(mp, NULL);
8992 if (cr == NULL)
8993 cr = iocp->ioc_cr;
8994 err = pinfo->mpi_setf(stack, cr, pinfo, pioc->mpr_ifname,
8995 pioc->mpr_val, pioc->mpr_flags);
8996 } else if (!set && pinfo->mpi_getf != NULL) {
8997 err = pinfo->mpi_getf(stack, pinfo, pioc->mpr_ifname,
8998 pioc->mpr_val, pioc->mpr_valsize, pioc->mpr_flags);
8999 } else {
9000 err = EPERM;
9001 }
9002
9003 if (err != 0) {
9004 miocnak(q, mp, 0, err);
9005 } else {
9006 if (set)
9007 miocack(q, mp, 0, 0);
9008 else /* For get, we need to return back the data */
9009 miocack(q, mp, iocp->ioc_count, 0);
9010 }
9011 }
9012
9013 /*
9014 * process the legacy ND_GET, ND_SET ioctl just for {ip|ip6}_forwarding
9015 * as several routing daemons have unfortunately used this 'unpublished'
9016 * but well-known ioctls.
9017 */
9018 /* ARGSUSED */
9019 static void
9020 ip_process_legacy_nddprop(queue_t *q, mblk_t *mp)
9021 {
9022 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
9023 mblk_t *mp1 = mp->b_cont;
9024 char *pname, *pval, *buf;
9025 uint_t bufsize, proto;
9026 mod_prop_info_t *pinfo = NULL;
9027 ip_stack_t *ipst;
9028 int err = 0;
9029
9030 ASSERT(CONN_Q(q));
9031 ipst = CONNQ_TO_IPST(q);
9032
9033 if (iocp->ioc_count == 0 || mp1 == NULL) {
9034 miocnak(q, mp, 0, EINVAL);
9035 return;
9036 }
9037
9038 mp1->b_datap->db_lim[-1] = '\0'; /* Force null termination */
9039 pval = buf = pname = (char *)mp1->b_rptr;
9040 bufsize = MBLKL(mp1);
9041
9042 if (strcmp(pname, "ip_forwarding") == 0) {
9043 pname = "forwarding";
9044 proto = MOD_PROTO_IPV4;
9045 } else if (strcmp(pname, "ip6_forwarding") == 0) {
9046 pname = "forwarding";
9047 proto = MOD_PROTO_IPV6;
9048 } else {
9049 miocnak(q, mp, 0, EINVAL);
9050 return;
9051 }
9052
9053 pinfo = mod_prop_lookup(ipst->ips_propinfo_tbl, pname, proto);
9054
9055 switch (iocp->ioc_cmd) {
9056 case ND_GET:
9057 if ((err = pinfo->mpi_getf(ipst->ips_netstack, pinfo, NULL, buf,
9058 bufsize, 0)) == 0) {
9059 miocack(q, mp, iocp->ioc_count, 0);
9060 return;
9061 }
9062 break;
9063 case ND_SET:
9064 /*
9065 * buffer will have property name and value in the following
9066 * format,
9067 * <property name>'\0'<property value>'\0', extract them;
9068 */
9069 while (*pval++)
9070 noop;
9071
9072 if (!*pval || pval >= (char *)mp1->b_wptr) {
9073 err = EINVAL;
9074 } else if ((err = pinfo->mpi_setf(ipst->ips_netstack, NULL,
9075 pinfo, NULL, pval, 0)) == 0) {
9076 miocack(q, mp, 0, 0);
9077 return;
9078 }
9079 break;
9080 default:
9081 err = EINVAL;
9082 break;
9083 }
9084 miocnak(q, mp, 0, err);
9085 }
9086
9087 /*
9088 * Wrapper function for resuming deferred ioctl processing
9089 * Used for SIOCGDSTINFO, SIOCGIP6ADDRPOLICY, SIOCGMSFILTER,
9090 * SIOCSMSFILTER, SIOCGIPMSFILTER, and SIOCSIPMSFILTER currently.
9091 */
9092 /* ARGSUSED */
9093 void
9094 ip_sioctl_copyin_resume(ipsq_t *dummy_ipsq, queue_t *q, mblk_t *mp,
9095 void *dummy_arg)
9096 {
9097 ip_sioctl_copyin_setup(q, mp);
9098 }
9099
9100 /*
9101 * ip_sioctl_copyin_setup is called by ip_wput_nondata with any M_IOCTL message
9102 * that arrives. Most of the IOCTLs are "socket" IOCTLs which we handle
9103 * in either I_STR or TRANSPARENT form, using the mi_copy facility.
9104 * We establish here the size of the block to be copied in. mi_copyin
9105 * arranges for this to happen, an processing continues in ip_wput_nondata with
9106 * an M_IOCDATA message.
9107 */
9108 void
9109 ip_sioctl_copyin_setup(queue_t *q, mblk_t *mp)
9110 {
9111 int copyin_size;
9112 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
9113 ip_ioctl_cmd_t *ipip;
9114 cred_t *cr;
9115 ip_stack_t *ipst;
9116
9117 if (CONN_Q(q))
9118 ipst = CONNQ_TO_IPST(q);
9119 else
9120 ipst = ILLQ_TO_IPST(q);
9121
9122 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
9123 if (ipip == NULL) {
9124 /*
9125 * The ioctl is not one we understand or own.
9126 * Pass it along to be processed down stream,
9127 * if this is a module instance of IP, else nak
9128 * the ioctl.
9129 */
9130 if (q->q_next == NULL) {
9131 goto nak;
9132 } else {
9133 putnext(q, mp);
9134 return;
9135 }
9136 }
9137
9138 /*
9139 * If this is deferred, then we will do all the checks when we
9140 * come back.
9141 */
9142 if ((iocp->ioc_cmd == SIOCGDSTINFO ||
9143 iocp->ioc_cmd == SIOCGIP6ADDRPOLICY) && !ip6_asp_can_lookup(ipst)) {
9144 ip6_asp_pending_op(q, mp, ip_sioctl_copyin_resume);
9145 return;
9146 }
9147
9148 /*
9149 * Only allow a very small subset of IP ioctls on this stream if
9150 * IP is a module and not a driver. Allowing ioctls to be processed
9151 * in this case may cause assert failures or data corruption.
9152 * Typically G[L]IFFLAGS, SLIFNAME/IF_UNITSEL are the only few
9153 * ioctls allowed on an IP module stream, after which this stream
9154 * normally becomes a multiplexor (at which time the stream head
9155 * will fail all ioctls).
9156 */
9157 if ((q->q_next != NULL) && !(ipip->ipi_flags & IPI_MODOK)) {
9158 goto nak;
9159 }
9160
9161 /* Make sure we have ioctl data to process. */
9162 if (mp->b_cont == NULL && !(ipip->ipi_flags & IPI_NULL_BCONT))
9163 goto nak;
9164
9165 /*
9166 * Prefer dblk credential over ioctl credential; some synthesized
9167 * ioctls have kcred set because there's no way to crhold()
9168 * a credential in some contexts. (ioc_cr is not crfree() by
9169 * the framework; the caller of ioctl needs to hold the reference
9170 * for the duration of the call).
9171 */
9172 cr = msg_getcred(mp, NULL);
9173 if (cr == NULL)
9174 cr = iocp->ioc_cr;
9175
9176 /* Make sure normal users don't send down privileged ioctls */
9177 if ((ipip->ipi_flags & IPI_PRIV) &&
9178 (cr != NULL) && secpolicy_ip_config(cr, B_TRUE) != 0) {
9179 /* We checked the privilege earlier but log it here */
9180 miocnak(q, mp, 0, secpolicy_ip_config(cr, B_FALSE));
9181 return;
9182 }
9183
9184 /*
9185 * The ioctl command tables can only encode fixed length
9186 * ioctl data. If the length is variable, the table will
9187 * encode the length as zero. Such special cases are handled
9188 * below in the switch.
9189 */
9190 if (ipip->ipi_copyin_size != 0) {
9191 mi_copyin(q, mp, NULL, ipip->ipi_copyin_size);
9192 return;
9193 }
9194
9195 switch (iocp->ioc_cmd) {
9196 case O_SIOCGIFCONF:
9197 case SIOCGIFCONF:
9198 /*
9199 * This IOCTL is hilarious. See comments in
9200 * ip_sioctl_get_ifconf for the story.
9201 */
9202 if (iocp->ioc_count == TRANSPARENT)
9203 copyin_size = SIZEOF_STRUCT(ifconf,
9204 iocp->ioc_flag);
9205 else
9206 copyin_size = iocp->ioc_count;
9207 mi_copyin(q, mp, NULL, copyin_size);
9208 return;
9209
9210 case O_SIOCGLIFCONF:
9211 case SIOCGLIFCONF:
9212 copyin_size = SIZEOF_STRUCT(lifconf, iocp->ioc_flag);
9213 mi_copyin(q, mp, NULL, copyin_size);
9214 return;
9215
9216 case SIOCGLIFSRCOF:
9217 copyin_size = SIZEOF_STRUCT(lifsrcof, iocp->ioc_flag);
9218 mi_copyin(q, mp, NULL, copyin_size);
9219 return;
9220
9221 case SIOCGIP6ADDRPOLICY:
9222 ip_sioctl_ip6addrpolicy(q, mp);
9223 ip6_asp_table_refrele(ipst);
9224 return;
9225
9226 case SIOCSIP6ADDRPOLICY:
9227 ip_sioctl_ip6addrpolicy(q, mp);
9228 return;
9229
9230 case SIOCGDSTINFO:
9231 ip_sioctl_dstinfo(q, mp);
9232 ip6_asp_table_refrele(ipst);
9233 return;
9234
9235 case ND_SET:
9236 case ND_GET:
9237 ip_process_legacy_nddprop(q, mp);
9238 return;
9239
9240 case SIOCSETPROP:
9241 case SIOCGETPROP:
9242 ip_sioctl_getsetprop(q, mp);
9243 return;
9244
9245 case I_PLINK:
9246 case I_PUNLINK:
9247 case I_LINK:
9248 case I_UNLINK:
9249 /*
9250 * We treat non-persistent link similarly as the persistent
9251 * link case, in terms of plumbing/unplumbing, as well as
9252 * dynamic re-plumbing events indicator. See comments
9253 * in ip_sioctl_plink() for more.
9254 *
9255 * Request can be enqueued in the 'ipsq' while waiting
9256 * to become exclusive. So bump up the conn ref.
9257 */
9258 if (CONN_Q(q)) {
9259 CONN_INC_REF(Q_TO_CONN(q));
9260 CONN_INC_IOCTLREF(Q_TO_CONN(q))
9261 }
9262 ip_sioctl_plink(NULL, q, mp, NULL);
9263 return;
9264
9265 case IP_IOCTL:
9266 ip_wput_ioctl(q, mp);
9267 return;
9268
9269 case SIOCILB:
9270 /* The ioctl length varies depending on the ILB command. */
9271 copyin_size = iocp->ioc_count;
9272 if (copyin_size < sizeof (ilb_cmd_t))
9273 goto nak;
9274 mi_copyin(q, mp, NULL, copyin_size);
9275 return;
9276
9277 default:
9278 cmn_err(CE_WARN, "Unknown ioctl %d/0x%x slipped through.",
9279 iocp->ioc_cmd, iocp->ioc_cmd);
9280 /* FALLTHRU */
9281 }
9282 nak:
9283 if (mp->b_cont != NULL) {
9284 freemsg(mp->b_cont);
9285 mp->b_cont = NULL;
9286 }
9287 iocp->ioc_error = EINVAL;
9288 mp->b_datap->db_type = M_IOCNAK;
9289 iocp->ioc_count = 0;
9290 qreply(q, mp);
9291 }
9292
9293 static void
9294 ip_sioctl_garp_reply(mblk_t *mp, ill_t *ill, void *hwaddr, int flags)
9295 {
9296 struct arpreq *ar;
9297 struct xarpreq *xar;
9298 mblk_t *tmp;
9299 struct iocblk *iocp;
9300 int x_arp_ioctl = B_FALSE;
9301 int *flagsp;
9302 char *storage = NULL;
9303
9304 ASSERT(ill != NULL);
9305
9306 iocp = (struct iocblk *)mp->b_rptr;
9307 ASSERT(iocp->ioc_cmd == SIOCGXARP || iocp->ioc_cmd == SIOCGARP);
9308
9309 tmp = (mp->b_cont)->b_cont; /* xarpreq/arpreq */
9310 if ((iocp->ioc_cmd == SIOCGXARP) ||
9311 (iocp->ioc_cmd == SIOCSXARP)) {
9312 x_arp_ioctl = B_TRUE;
9313 xar = (struct xarpreq *)tmp->b_rptr;
9314 flagsp = &xar->xarp_flags;
9315 storage = xar->xarp_ha.sdl_data;
9316 } else {
9317 ar = (struct arpreq *)tmp->b_rptr;
9318 flagsp = &ar->arp_flags;
9319 storage = ar->arp_ha.sa_data;
9320 }
9321
9322 /*
9323 * We're done if this is not an SIOCG{X}ARP
9324 */
9325 if (x_arp_ioctl) {
9326 storage += ill_xarp_info(&xar->xarp_ha, ill);
9327 if ((ill->ill_phys_addr_length + ill->ill_name_length) >
9328 sizeof (xar->xarp_ha.sdl_data)) {
9329 iocp->ioc_error = EINVAL;
9330 return;
9331 }
9332 }
9333 *flagsp = ATF_INUSE;
9334 /*
9335 * If /sbin/arp told us we are the authority using the "permanent"
9336 * flag, or if this is one of my addresses print "permanent"
9337 * in the /sbin/arp output.
9338 */
9339 if ((flags & NCE_F_MYADDR) || (flags & NCE_F_AUTHORITY))
9340 *flagsp |= ATF_AUTHORITY;
9341 if (flags & NCE_F_NONUD)
9342 *flagsp |= ATF_PERM; /* not subject to aging */
9343 if (flags & NCE_F_PUBLISH)
9344 *flagsp |= ATF_PUBL;
9345 if (hwaddr != NULL) {
9346 *flagsp |= ATF_COM;
9347 bcopy((char *)hwaddr, storage, ill->ill_phys_addr_length);
9348 }
9349 }
9350
9351 /*
9352 * Create a new logical interface. If ipif_id is zero (i.e. not a logical
9353 * interface) create the next available logical interface for this
9354 * physical interface.
9355 * If ipif is NULL (i.e. the lookup didn't find one) attempt to create an
9356 * ipif with the specified name.
9357 *
9358 * If the address family is not AF_UNSPEC then set the address as well.
9359 *
9360 * If ip_sioctl_addr returns EINPROGRESS then the ioctl (the copyout)
9361 * is completed when the DL_BIND_ACK arrive in ip_rput_dlpi_writer.
9362 *
9363 * Executed as a writer on the ill.
9364 * So no lock is needed to traverse the ipif chain, or examine the
9365 * phyint flags.
9366 */
9367 /* ARGSUSED */
9368 int
9369 ip_sioctl_addif(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
9370 ip_ioctl_cmd_t *dummy_ipip, void *dummy_ifreq)
9371 {
9372 mblk_t *mp1;
9373 struct lifreq *lifr;
9374 boolean_t isv6;
9375 boolean_t exists;
9376 char *name;
9377 char *endp;
9378 char *cp;
9379 int namelen;
9380 ipif_t *ipif;
9381 long id;
9382 ipsq_t *ipsq;
9383 ill_t *ill;
9384 sin_t *sin;
9385 int err = 0;
9386 boolean_t found_sep = B_FALSE;
9387 conn_t *connp;
9388 zoneid_t zoneid;
9389 ip_stack_t *ipst = CONNQ_TO_IPST(q);
9390
9391 ASSERT(q->q_next == NULL);
9392 ip1dbg(("ip_sioctl_addif\n"));
9393 /* Existence of mp1 has been checked in ip_wput_nondata */
9394 mp1 = mp->b_cont->b_cont;
9395 /*
9396 * Null terminate the string to protect against buffer
9397 * overrun. String was generated by user code and may not
9398 * be trusted.
9399 */
9400 lifr = (struct lifreq *)mp1->b_rptr;
9401 lifr->lifr_name[LIFNAMSIZ - 1] = '\0';
9402 name = lifr->lifr_name;
9403 ASSERT(CONN_Q(q));
9404 connp = Q_TO_CONN(q);
9405 isv6 = (connp->conn_family == AF_INET6);
9406 zoneid = connp->conn_zoneid;
9407 namelen = mi_strlen(name);
9408 if (namelen == 0)
9409 return (EINVAL);
9410
9411 exists = B_FALSE;
9412 if ((namelen + 1 == sizeof (ipif_loopback_name)) &&
9413 (mi_strcmp(name, ipif_loopback_name) == 0)) {
9414 /*
9415 * Allow creating lo0 using SIOCLIFADDIF.
9416 * can't be any other writer thread. So can pass null below
9417 * for the last 4 args to ipif_lookup_name.
9418 */
9419 ipif = ipif_lookup_on_name(lifr->lifr_name, namelen, B_TRUE,
9420 &exists, isv6, zoneid, ipst);
9421 /* Prevent any further action */
9422 if (ipif == NULL) {
9423 return (ENOBUFS);
9424 } else if (!exists) {
9425 /* We created the ipif now and as writer */
9426 ipif_refrele(ipif);
9427 return (0);
9428 } else {
9429 ill = ipif->ipif_ill;
9430 ill_refhold(ill);
9431 ipif_refrele(ipif);
9432 }
9433 } else {
9434 /* Look for a colon in the name. */
9435 endp = &name[namelen];
9436 for (cp = endp; --cp > name; ) {
9437 if (*cp == IPIF_SEPARATOR_CHAR) {
9438 found_sep = B_TRUE;
9439 /*
9440 * Reject any non-decimal aliases for plumbing
9441 * of logical interfaces. Aliases with leading
9442 * zeroes are also rejected as they introduce
9443 * ambiguity in the naming of the interfaces.
9444 * Comparing with "0" takes care of all such
9445 * cases.
9446 */
9447 if ((strncmp("0", cp+1, 1)) == 0)
9448 return (EINVAL);
9449
9450 if (ddi_strtol(cp+1, &endp, 10, &id) != 0 ||
9451 id <= 0 || *endp != '\0') {
9452 return (EINVAL);
9453 }
9454 *cp = '\0';
9455 break;
9456 }
9457 }
9458 ill = ill_lookup_on_name(name, B_FALSE, isv6, NULL, ipst);
9459 if (found_sep)
9460 *cp = IPIF_SEPARATOR_CHAR;
9461 if (ill == NULL)
9462 return (ENXIO);
9463 }
9464
9465 ipsq = ipsq_try_enter(NULL, ill, q, mp, ip_process_ioctl, NEW_OP,
9466 B_TRUE);
9467
9468 /*
9469 * Release the refhold due to the lookup, now that we are excl
9470 * or we are just returning
9471 */
9472 ill_refrele(ill);
9473
9474 if (ipsq == NULL)
9475 return (EINPROGRESS);
9476
9477 /* We are now exclusive on the IPSQ */
9478 ASSERT(IAM_WRITER_ILL(ill));
9479
9480 if (found_sep) {
9481 /* Now see if there is an IPIF with this unit number. */
9482 for (ipif = ill->ill_ipif; ipif != NULL;
9483 ipif = ipif->ipif_next) {
9484 if (ipif->ipif_id == id) {
9485 err = EEXIST;
9486 goto done;
9487 }
9488 }
9489 }
9490
9491 /*
9492 * We use IRE_LOCAL for lo0:1 etc. for "receive only" use
9493 * of lo0. Plumbing for lo0:0 happens in ipif_lookup_on_name()
9494 * instead.
9495 */
9496 if ((ipif = ipif_allocate(ill, found_sep ? id : -1, IRE_LOCAL,
9497 B_TRUE, B_TRUE, &err)) == NULL) {
9498 goto done;
9499 }
9500
9501 /* Return created name with ioctl */
9502 (void) sprintf(lifr->lifr_name, "%s%c%d", ill->ill_name,
9503 IPIF_SEPARATOR_CHAR, ipif->ipif_id);
9504 ip1dbg(("created %s\n", lifr->lifr_name));
9505
9506 /* Set address */
9507 sin = (sin_t *)&lifr->lifr_addr;
9508 if (sin->sin_family != AF_UNSPEC) {
9509 err = ip_sioctl_addr(ipif, sin, q, mp,
9510 &ip_ndx_ioctl_table[SIOCLIFADDR_NDX], lifr);
9511 }
9512
9513 done:
9514 ipsq_exit(ipsq);
9515 return (err);
9516 }
9517
9518 /*
9519 * Remove an existing logical interface. If ipif_id is zero (i.e. not a logical
9520 * interface) delete it based on the IP address (on this physical interface).
9521 * Otherwise delete it based on the ipif_id.
9522 * Also, special handling to allow a removeif of lo0.
9523 */
9524 /* ARGSUSED */
9525 int
9526 ip_sioctl_removeif(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9527 ip_ioctl_cmd_t *ipip, void *dummy_if_req)
9528 {
9529 conn_t *connp;
9530 ill_t *ill = ipif->ipif_ill;
9531 boolean_t success;
9532 ip_stack_t *ipst;
9533
9534 ipst = CONNQ_TO_IPST(q);
9535
9536 ASSERT(q->q_next == NULL);
9537 ip1dbg(("ip_sioctl_remove_if(%s:%u %p)\n",
9538 ill->ill_name, ipif->ipif_id, (void *)ipif));
9539 ASSERT(IAM_WRITER_IPIF(ipif));
9540
9541 connp = Q_TO_CONN(q);
9542 /*
9543 * Special case for unplumbing lo0 (the loopback physical interface).
9544 * If unplumbing lo0, the incoming address structure has been
9545 * initialized to all zeros. When unplumbing lo0, all its logical
9546 * interfaces must be removed too.
9547 *
9548 * Note that this interface may be called to remove a specific
9549 * loopback logical interface (eg, lo0:1). But in that case
9550 * ipif->ipif_id != 0 so that the code path for that case is the
9551 * same as any other interface (meaning it skips the code directly
9552 * below).
9553 */
9554 if (ipif->ipif_id == 0 && ill->ill_net_type == IRE_LOOPBACK) {
9555 if (sin->sin_family == AF_UNSPEC &&
9556 (IN6_IS_ADDR_UNSPECIFIED(&((sin6_t *)sin)->sin6_addr))) {
9557 /*
9558 * Mark it condemned. No new ref. will be made to ill.
9559 */
9560 mutex_enter(&ill->ill_lock);
9561 ill->ill_state_flags |= ILL_CONDEMNED;
9562 for (ipif = ill->ill_ipif; ipif != NULL;
9563 ipif = ipif->ipif_next) {
9564 ipif->ipif_state_flags |= IPIF_CONDEMNED;
9565 }
9566 mutex_exit(&ill->ill_lock);
9567
9568 ipif = ill->ill_ipif;
9569 /* unplumb the loopback interface */
9570 ill_delete(ill);
9571 mutex_enter(&connp->conn_lock);
9572 mutex_enter(&ill->ill_lock);
9573
9574 /* Are any references to this ill active */
9575 if (ill_is_freeable(ill)) {
9576 mutex_exit(&ill->ill_lock);
9577 mutex_exit(&connp->conn_lock);
9578 ill_delete_tail(ill);
9579 mi_free(ill);
9580 return (0);
9581 }
9582 success = ipsq_pending_mp_add(connp, ipif,
9583 CONNP_TO_WQ(connp), mp, ILL_FREE);
9584 mutex_exit(&connp->conn_lock);
9585 mutex_exit(&ill->ill_lock);
9586 if (success)
9587 return (EINPROGRESS);
9588 else
9589 return (EINTR);
9590 }
9591 }
9592
9593 if (ipif->ipif_id == 0) {
9594 ipsq_t *ipsq;
9595
9596 /* Find based on address */
9597 if (ipif->ipif_isv6) {
9598 sin6_t *sin6;
9599
9600 if (sin->sin_family != AF_INET6)
9601 return (EAFNOSUPPORT);
9602
9603 sin6 = (sin6_t *)sin;
9604 /* We are a writer, so we should be able to lookup */
9605 ipif = ipif_lookup_addr_exact_v6(&sin6->sin6_addr, ill,
9606 ipst);
9607 } else {
9608 if (sin->sin_family != AF_INET)
9609 return (EAFNOSUPPORT);
9610
9611 /* We are a writer, so we should be able to lookup */
9612 ipif = ipif_lookup_addr_exact(sin->sin_addr.s_addr, ill,
9613 ipst);
9614 }
9615 if (ipif == NULL) {
9616 return (EADDRNOTAVAIL);
9617 }
9618
9619 /*
9620 * It is possible for a user to send an SIOCLIFREMOVEIF with
9621 * lifr_name of the physical interface but with an ip address
9622 * lifr_addr of a logical interface plumbed over it.
9623 * So update ipx_current_ipif now that ipif points to the
9624 * correct one.
9625 */
9626 ipsq = ipif->ipif_ill->ill_phyint->phyint_ipsq;
9627 ipsq->ipsq_xop->ipx_current_ipif = ipif;
9628
9629 /* This is a writer */
9630 ipif_refrele(ipif);
9631 }
9632
9633 /*
9634 * Can not delete instance zero since it is tied to the ill.
9635 */
9636 if (ipif->ipif_id == 0)
9637 return (EBUSY);
9638
9639 mutex_enter(&ill->ill_lock);
9640 ipif->ipif_state_flags |= IPIF_CONDEMNED;
9641 mutex_exit(&ill->ill_lock);
9642
9643 ipif_free(ipif);
9644
9645 mutex_enter(&connp->conn_lock);
9646 mutex_enter(&ill->ill_lock);
9647
9648 /* Are any references to this ipif active */
9649 if (ipif_is_freeable(ipif)) {
9650 mutex_exit(&ill->ill_lock);
9651 mutex_exit(&connp->conn_lock);
9652 ipif_non_duplicate(ipif);
9653 (void) ipif_down_tail(ipif);
9654 ipif_free_tail(ipif); /* frees ipif */
9655 return (0);
9656 }
9657 success = ipsq_pending_mp_add(connp, ipif, CONNP_TO_WQ(connp), mp,
9658 IPIF_FREE);
9659 mutex_exit(&ill->ill_lock);
9660 mutex_exit(&connp->conn_lock);
9661 if (success)
9662 return (EINPROGRESS);
9663 else
9664 return (EINTR);
9665 }
9666
9667 /*
9668 * Restart the removeif ioctl. The refcnt has gone down to 0.
9669 * The ipif is already condemned. So can't find it thru lookups.
9670 */
9671 /* ARGSUSED */
9672 int
9673 ip_sioctl_removeif_restart(ipif_t *ipif, sin_t *dummy_sin, queue_t *q,
9674 mblk_t *mp, ip_ioctl_cmd_t *ipip, void *dummy_if_req)
9675 {
9676 ill_t *ill = ipif->ipif_ill;
9677
9678 ASSERT(IAM_WRITER_IPIF(ipif));
9679 ASSERT(ipif->ipif_state_flags & IPIF_CONDEMNED);
9680
9681 ip1dbg(("ip_sioctl_removeif_restart(%s:%u %p)\n",
9682 ill->ill_name, ipif->ipif_id, (void *)ipif));
9683
9684 if (ipif->ipif_id == 0 && ill->ill_net_type == IRE_LOOPBACK) {
9685 ASSERT(ill->ill_state_flags & ILL_CONDEMNED);
9686 ill_delete_tail(ill);
9687 mi_free(ill);
9688 return (0);
9689 }
9690
9691 ipif_non_duplicate(ipif);
9692 (void) ipif_down_tail(ipif);
9693 ipif_free_tail(ipif);
9694
9695 return (0);
9696 }
9697
9698 /*
9699 * Set the local interface address using the given prefix and ill_token.
9700 */
9701 /* ARGSUSED */
9702 int
9703 ip_sioctl_prefix(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9704 ip_ioctl_cmd_t *dummy_ipip, void *dummy_ifreq)
9705 {
9706 int err;
9707 in6_addr_t v6addr;
9708 sin6_t *sin6;
9709 ill_t *ill;
9710 int i;
9711
9712 ip1dbg(("ip_sioctl_prefix(%s:%u %p)\n",
9713 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
9714
9715 ASSERT(IAM_WRITER_IPIF(ipif));
9716
9717 if (!ipif->ipif_isv6)
9718 return (EINVAL);
9719
9720 if (sin->sin_family != AF_INET6)
9721 return (EAFNOSUPPORT);
9722
9723 sin6 = (sin6_t *)sin;
9724 v6addr = sin6->sin6_addr;
9725 ill = ipif->ipif_ill;
9726
9727 if (IN6_IS_ADDR_UNSPECIFIED(&v6addr) ||
9728 IN6_IS_ADDR_UNSPECIFIED(&ill->ill_token))
9729 return (EADDRNOTAVAIL);
9730
9731 for (i = 0; i < 4; i++)
9732 sin6->sin6_addr.s6_addr32[i] |= ill->ill_token.s6_addr32[i];
9733
9734 err = ip_sioctl_addr(ipif, sin, q, mp,
9735 &ip_ndx_ioctl_table[SIOCLIFADDR_NDX], dummy_ifreq);
9736 return (err);
9737 }
9738
9739 /*
9740 * Restart entry point to restart the address set operation after the
9741 * refcounts have dropped to zero.
9742 */
9743 /* ARGSUSED */
9744 int
9745 ip_sioctl_prefix_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9746 ip_ioctl_cmd_t *ipip, void *ifreq)
9747 {
9748 ip1dbg(("ip_sioctl_prefix_restart(%s:%u %p)\n",
9749 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
9750 return (ip_sioctl_addr_restart(ipif, sin, q, mp, ipip, ifreq));
9751 }
9752
9753 /*
9754 * Set the local interface address.
9755 * Allow an address of all zero when the interface is down.
9756 */
9757 /* ARGSUSED */
9758 int
9759 ip_sioctl_addr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9760 ip_ioctl_cmd_t *dummy_ipip, void *dummy_ifreq)
9761 {
9762 int err = 0;
9763 in6_addr_t v6addr;
9764 boolean_t need_up = B_FALSE;
9765 ill_t *ill;
9766
9767 ip1dbg(("ip_sioctl_addr(%s:%u %p)\n",
9768 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
9769
9770 ASSERT(IAM_WRITER_IPIF(ipif));
9771
9772 ill = ipif->ipif_ill;
9773 if (ipif->ipif_isv6) {
9774 sin6_t *sin6;
9775 phyint_t *phyi;
9776
9777 if (sin->sin_family != AF_INET6)
9778 return (EAFNOSUPPORT);
9779
9780 sin6 = (sin6_t *)sin;
9781 v6addr = sin6->sin6_addr;
9782 phyi = ill->ill_phyint;
9783
9784 /*
9785 * Enforce that true multicast interfaces have a link-local
9786 * address for logical unit 0.
9787 *
9788 * However for those ipif's for which link-local address was
9789 * not created by default, also allow setting :: as the address.
9790 * This scenario would arise, when we delete an address on ipif
9791 * with logical unit 0, we would want to set :: as the address.
9792 */
9793 if (ipif->ipif_id == 0 &&
9794 (ill->ill_flags & ILLF_MULTICAST) &&
9795 !(ipif->ipif_flags & (IPIF_POINTOPOINT)) &&
9796 !(phyi->phyint_flags & (PHYI_LOOPBACK)) &&
9797 !IN6_IS_ADDR_LINKLOCAL(&v6addr)) {
9798
9799 /*
9800 * if default link-local was not created by kernel for
9801 * this ill, allow setting :: as the address on ipif:0.
9802 */
9803 if (ill->ill_flags & ILLF_NOLINKLOCAL) {
9804 if (!IN6_IS_ADDR_UNSPECIFIED(&v6addr))
9805 return (EADDRNOTAVAIL);
9806 } else {
9807 return (EADDRNOTAVAIL);
9808 }
9809 }
9810
9811 /*
9812 * up interfaces shouldn't have the unspecified address
9813 * unless they also have the IPIF_NOLOCAL flags set and
9814 * have a subnet assigned.
9815 */
9816 if ((ipif->ipif_flags & IPIF_UP) &&
9817 IN6_IS_ADDR_UNSPECIFIED(&v6addr) &&
9818 (!(ipif->ipif_flags & IPIF_NOLOCAL) ||
9819 IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6subnet))) {
9820 return (EADDRNOTAVAIL);
9821 }
9822
9823 if (!ip_local_addr_ok_v6(&v6addr, &ipif->ipif_v6net_mask))
9824 return (EADDRNOTAVAIL);
9825 } else {
9826 ipaddr_t addr;
9827
9828 if (sin->sin_family != AF_INET)
9829 return (EAFNOSUPPORT);
9830
9831 addr = sin->sin_addr.s_addr;
9832
9833 /* Allow INADDR_ANY as the local address. */
9834 if (addr != INADDR_ANY &&
9835 !ip_addr_ok_v4(addr, ipif->ipif_net_mask))
9836 return (EADDRNOTAVAIL);
9837
9838 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
9839 }
9840 /* verify that the address being configured is permitted by mac */
9841 if (!ill_ipcheck_addr(ill, &v6addr)) {
9842 return (EPERM);
9843 }
9844 /*
9845 * Even if there is no change we redo things just to rerun
9846 * ipif_set_default.
9847 */
9848 if (ipif->ipif_flags & IPIF_UP) {
9849 /*
9850 * Setting a new local address, make sure
9851 * we have net and subnet bcast ire's for
9852 * the old address if we need them.
9853 */
9854 /*
9855 * If the interface is already marked up,
9856 * we call ipif_down which will take care
9857 * of ditching any IREs that have been set
9858 * up based on the old interface address.
9859 */
9860 err = ipif_logical_down(ipif, q, mp);
9861 if (err == EINPROGRESS)
9862 return (err);
9863 (void) ipif_down_tail(ipif);
9864 need_up = 1;
9865 }
9866
9867 err = ip_sioctl_addr_tail(ipif, sin, q, mp, need_up);
9868 return (err);
9869 }
9870
9871 int
9872 ip_sioctl_addr_tail(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9873 boolean_t need_up)
9874 {
9875 in6_addr_t v6addr;
9876 in6_addr_t ov6addr;
9877 ipaddr_t addr;
9878 sin6_t *sin6;
9879 int sinlen;
9880 int err = 0;
9881 ill_t *ill = ipif->ipif_ill;
9882 boolean_t need_dl_down;
9883 boolean_t need_arp_down;
9884 struct iocblk *iocp;
9885
9886 iocp = (mp != NULL) ? (struct iocblk *)mp->b_rptr : NULL;
9887
9888 ip1dbg(("ip_sioctl_addr_tail(%s:%u %p)\n",
9889 ill->ill_name, ipif->ipif_id, (void *)ipif));
9890 ASSERT(IAM_WRITER_IPIF(ipif));
9891
9892 /* Must cancel any pending timer before taking the ill_lock */
9893 if (ipif->ipif_recovery_id != 0)
9894 (void) untimeout(ipif->ipif_recovery_id);
9895 ipif->ipif_recovery_id = 0;
9896
9897 if (ipif->ipif_isv6) {
9898 sin6 = (sin6_t *)sin;
9899 v6addr = sin6->sin6_addr;
9900 sinlen = sizeof (struct sockaddr_in6);
9901 } else {
9902 addr = sin->sin_addr.s_addr;
9903 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
9904 sinlen = sizeof (struct sockaddr_in);
9905 }
9906 mutex_enter(&ill->ill_lock);
9907 ov6addr = ipif->ipif_v6lcl_addr;
9908 ipif->ipif_v6lcl_addr = v6addr;
9909 sctp_update_ipif_addr(ipif, ov6addr);
9910 ipif->ipif_addr_ready = 0;
9911
9912 ip_rts_newaddrmsg(RTM_CHGADDR, 0, ipif, RTSQ_DEFAULT);
9913
9914 /*
9915 * If the interface was previously marked as a duplicate, then since
9916 * we've now got a "new" address, it should no longer be considered a
9917 * duplicate -- even if the "new" address is the same as the old one.
9918 * Note that if all ipifs are down, we may have a pending ARP down
9919 * event to handle. This is because we want to recover from duplicates
9920 * and thus delay tearing down ARP until the duplicates have been
9921 * removed or disabled.
9922 */
9923 need_dl_down = need_arp_down = B_FALSE;
9924 if (ipif->ipif_flags & IPIF_DUPLICATE) {
9925 need_arp_down = !need_up;
9926 ipif->ipif_flags &= ~IPIF_DUPLICATE;
9927 if (--ill->ill_ipif_dup_count == 0 && !need_up &&
9928 ill->ill_ipif_up_count == 0 && ill->ill_dl_up) {
9929 need_dl_down = B_TRUE;
9930 }
9931 }
9932
9933 ipif_set_default(ipif);
9934
9935 /*
9936 * If we've just manually set the IPv6 link-local address (0th ipif),
9937 * tag the ill so that future updates to the interface ID don't result
9938 * in this address getting automatically reconfigured from under the
9939 * administrator.
9940 */
9941 if (ipif->ipif_isv6 && ipif->ipif_id == 0) {
9942 if (iocp == NULL || (iocp->ioc_cmd == SIOCSLIFADDR &&
9943 !IN6_IS_ADDR_UNSPECIFIED(&v6addr)))
9944 ill->ill_manual_linklocal = 1;
9945 }
9946
9947 /*
9948 * When publishing an interface address change event, we only notify
9949 * the event listeners of the new address. It is assumed that if they
9950 * actively care about the addresses assigned that they will have
9951 * already discovered the previous address assigned (if there was one.)
9952 *
9953 * Don't attach nic event message for SIOCLIFADDIF ioctl.
9954 */
9955 if (iocp != NULL && iocp->ioc_cmd != SIOCLIFADDIF) {
9956 ill_nic_event_dispatch(ill, MAP_IPIF_ID(ipif->ipif_id),
9957 NE_ADDRESS_CHANGE, sin, sinlen);
9958 }
9959
9960 mutex_exit(&ill->ill_lock);
9961
9962 if (need_up) {
9963 /*
9964 * Now bring the interface back up. If this
9965 * is the only IPIF for the ILL, ipif_up
9966 * will have to re-bind to the device, so
9967 * we may get back EINPROGRESS, in which
9968 * case, this IOCTL will get completed in
9969 * ip_rput_dlpi when we see the DL_BIND_ACK.
9970 */
9971 err = ipif_up(ipif, q, mp);
9972 } else {
9973 /* Perhaps ilgs should use this ill */
9974 update_conn_ill(NULL, ill->ill_ipst);
9975 }
9976
9977 if (need_dl_down)
9978 ill_dl_down(ill);
9979
9980 if (need_arp_down && !ill->ill_isv6)
9981 (void) ipif_arp_down(ipif);
9982
9983 /*
9984 * The default multicast interface might have changed (for
9985 * instance if the IPv6 scope of the address changed)
9986 */
9987 ire_increment_multicast_generation(ill->ill_ipst, ill->ill_isv6);
9988
9989 return (err);
9990 }
9991
9992 /*
9993 * Restart entry point to restart the address set operation after the
9994 * refcounts have dropped to zero.
9995 */
9996 /* ARGSUSED */
9997 int
9998 ip_sioctl_addr_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
9999 ip_ioctl_cmd_t *ipip, void *ifreq)
10000 {
10001 ip1dbg(("ip_sioctl_addr_restart(%s:%u %p)\n",
10002 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10003 ASSERT(IAM_WRITER_IPIF(ipif));
10004 (void) ipif_down_tail(ipif);
10005 return (ip_sioctl_addr_tail(ipif, sin, q, mp, B_TRUE));
10006 }
10007
10008 /* ARGSUSED */
10009 int
10010 ip_sioctl_get_addr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10011 ip_ioctl_cmd_t *ipip, void *if_req)
10012 {
10013 sin6_t *sin6 = (struct sockaddr_in6 *)sin;
10014 struct lifreq *lifr = (struct lifreq *)if_req;
10015
10016 ip1dbg(("ip_sioctl_get_addr(%s:%u %p)\n",
10017 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10018 /*
10019 * The net mask and address can't change since we have a
10020 * reference to the ipif. So no lock is necessary.
10021 */
10022 if (ipif->ipif_isv6) {
10023 *sin6 = sin6_null;
10024 sin6->sin6_family = AF_INET6;
10025 sin6->sin6_addr = ipif->ipif_v6lcl_addr;
10026 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
10027 sin6->sin6_scope_id =
10028 ipif->ipif_ill->ill_phyint->phyint_ifindex;
10029 }
10030 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
10031 lifr->lifr_addrlen =
10032 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
10033 } else {
10034 *sin = sin_null;
10035 sin->sin_family = AF_INET;
10036 sin->sin_addr.s_addr = ipif->ipif_lcl_addr;
10037 if (ipip->ipi_cmd_type == LIF_CMD) {
10038 lifr->lifr_addrlen =
10039 ip_mask_to_plen(ipif->ipif_net_mask);
10040 }
10041 }
10042 return (0);
10043 }
10044
10045 /*
10046 * Set the destination address for a pt-pt interface.
10047 */
10048 /* ARGSUSED */
10049 int
10050 ip_sioctl_dstaddr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10051 ip_ioctl_cmd_t *ipip, void *if_req)
10052 {
10053 int err = 0;
10054 in6_addr_t v6addr;
10055 boolean_t need_up = B_FALSE;
10056
10057 ip1dbg(("ip_sioctl_dstaddr(%s:%u %p)\n",
10058 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10059 ASSERT(IAM_WRITER_IPIF(ipif));
10060
10061 if (ipif->ipif_isv6) {
10062 sin6_t *sin6;
10063
10064 if (sin->sin_family != AF_INET6)
10065 return (EAFNOSUPPORT);
10066
10067 sin6 = (sin6_t *)sin;
10068 v6addr = sin6->sin6_addr;
10069
10070 if (!ip_remote_addr_ok_v6(&v6addr, &ipif->ipif_v6net_mask))
10071 return (EADDRNOTAVAIL);
10072 } else {
10073 ipaddr_t addr;
10074
10075 if (sin->sin_family != AF_INET)
10076 return (EAFNOSUPPORT);
10077
10078 addr = sin->sin_addr.s_addr;
10079 if (addr != INADDR_ANY &&
10080 !ip_addr_ok_v4(addr, ipif->ipif_net_mask)) {
10081 return (EADDRNOTAVAIL);
10082 }
10083
10084 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
10085 }
10086
10087 if (IN6_ARE_ADDR_EQUAL(&ipif->ipif_v6pp_dst_addr, &v6addr))
10088 return (0); /* No change */
10089
10090 if (ipif->ipif_flags & IPIF_UP) {
10091 /*
10092 * If the interface is already marked up,
10093 * we call ipif_down which will take care
10094 * of ditching any IREs that have been set
10095 * up based on the old pp dst address.
10096 */
10097 err = ipif_logical_down(ipif, q, mp);
10098 if (err == EINPROGRESS)
10099 return (err);
10100 (void) ipif_down_tail(ipif);
10101 need_up = B_TRUE;
10102 }
10103 /*
10104 * could return EINPROGRESS. If so ioctl will complete in
10105 * ip_rput_dlpi_writer
10106 */
10107 err = ip_sioctl_dstaddr_tail(ipif, sin, q, mp, need_up);
10108 return (err);
10109 }
10110
10111 static int
10112 ip_sioctl_dstaddr_tail(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10113 boolean_t need_up)
10114 {
10115 in6_addr_t v6addr;
10116 ill_t *ill = ipif->ipif_ill;
10117 int err = 0;
10118 boolean_t need_dl_down;
10119 boolean_t need_arp_down;
10120
10121 ip1dbg(("ip_sioctl_dstaddr_tail(%s:%u %p)\n", ill->ill_name,
10122 ipif->ipif_id, (void *)ipif));
10123
10124 /* Must cancel any pending timer before taking the ill_lock */
10125 if (ipif->ipif_recovery_id != 0)
10126 (void) untimeout(ipif->ipif_recovery_id);
10127 ipif->ipif_recovery_id = 0;
10128
10129 if (ipif->ipif_isv6) {
10130 sin6_t *sin6;
10131
10132 sin6 = (sin6_t *)sin;
10133 v6addr = sin6->sin6_addr;
10134 } else {
10135 ipaddr_t addr;
10136
10137 addr = sin->sin_addr.s_addr;
10138 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
10139 }
10140 mutex_enter(&ill->ill_lock);
10141 /* Set point to point destination address. */
10142 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0) {
10143 /*
10144 * Allow this as a means of creating logical
10145 * pt-pt interfaces on top of e.g. an Ethernet.
10146 * XXX Undocumented HACK for testing.
10147 * pt-pt interfaces are created with NUD disabled.
10148 */
10149 ipif->ipif_flags |= IPIF_POINTOPOINT;
10150 ipif->ipif_flags &= ~IPIF_BROADCAST;
10151 if (ipif->ipif_isv6)
10152 ill->ill_flags |= ILLF_NONUD;
10153 }
10154
10155 /*
10156 * If the interface was previously marked as a duplicate, then since
10157 * we've now got a "new" address, it should no longer be considered a
10158 * duplicate -- even if the "new" address is the same as the old one.
10159 * Note that if all ipifs are down, we may have a pending ARP down
10160 * event to handle.
10161 */
10162 need_dl_down = need_arp_down = B_FALSE;
10163 if (ipif->ipif_flags & IPIF_DUPLICATE) {
10164 need_arp_down = !need_up;
10165 ipif->ipif_flags &= ~IPIF_DUPLICATE;
10166 if (--ill->ill_ipif_dup_count == 0 && !need_up &&
10167 ill->ill_ipif_up_count == 0 && ill->ill_dl_up) {
10168 need_dl_down = B_TRUE;
10169 }
10170 }
10171
10172 /*
10173 * If we've just manually set the IPv6 destination link-local address
10174 * (0th ipif), tag the ill so that future updates to the destination
10175 * interface ID (as can happen with interfaces over IP tunnels) don't
10176 * result in this address getting automatically reconfigured from
10177 * under the administrator.
10178 */
10179 if (ipif->ipif_isv6 && ipif->ipif_id == 0)
10180 ill->ill_manual_dst_linklocal = 1;
10181
10182 /* Set the new address. */
10183 ipif->ipif_v6pp_dst_addr = v6addr;
10184 /* Make sure subnet tracks pp_dst */
10185 ipif->ipif_v6subnet = ipif->ipif_v6pp_dst_addr;
10186 mutex_exit(&ill->ill_lock);
10187
10188 if (need_up) {
10189 /*
10190 * Now bring the interface back up. If this
10191 * is the only IPIF for the ILL, ipif_up
10192 * will have to re-bind to the device, so
10193 * we may get back EINPROGRESS, in which
10194 * case, this IOCTL will get completed in
10195 * ip_rput_dlpi when we see the DL_BIND_ACK.
10196 */
10197 err = ipif_up(ipif, q, mp);
10198 }
10199
10200 if (need_dl_down)
10201 ill_dl_down(ill);
10202 if (need_arp_down && !ipif->ipif_isv6)
10203 (void) ipif_arp_down(ipif);
10204
10205 return (err);
10206 }
10207
10208 /*
10209 * Restart entry point to restart the dstaddress set operation after the
10210 * refcounts have dropped to zero.
10211 */
10212 /* ARGSUSED */
10213 int
10214 ip_sioctl_dstaddr_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10215 ip_ioctl_cmd_t *ipip, void *ifreq)
10216 {
10217 ip1dbg(("ip_sioctl_dstaddr_restart(%s:%u %p)\n",
10218 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10219 (void) ipif_down_tail(ipif);
10220 return (ip_sioctl_dstaddr_tail(ipif, sin, q, mp, B_TRUE));
10221 }
10222
10223 /* ARGSUSED */
10224 int
10225 ip_sioctl_get_dstaddr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10226 ip_ioctl_cmd_t *ipip, void *if_req)
10227 {
10228 sin6_t *sin6 = (struct sockaddr_in6 *)sin;
10229
10230 ip1dbg(("ip_sioctl_get_dstaddr(%s:%u %p)\n",
10231 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10232 /*
10233 * Get point to point destination address. The addresses can't
10234 * change since we hold a reference to the ipif.
10235 */
10236 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0)
10237 return (EADDRNOTAVAIL);
10238
10239 if (ipif->ipif_isv6) {
10240 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
10241 *sin6 = sin6_null;
10242 sin6->sin6_family = AF_INET6;
10243 sin6->sin6_addr = ipif->ipif_v6pp_dst_addr;
10244 } else {
10245 *sin = sin_null;
10246 sin->sin_family = AF_INET;
10247 sin->sin_addr.s_addr = ipif->ipif_pp_dst_addr;
10248 }
10249 return (0);
10250 }
10251
10252 /*
10253 * Check which flags will change by the given flags being set
10254 * silently ignore flags which userland is not allowed to control.
10255 * (Because these flags may change between SIOCGLIFFLAGS and
10256 * SIOCSLIFFLAGS, and that's outside of userland's control,
10257 * we need to silently ignore them rather than fail.)
10258 */
10259 static void
10260 ip_sioctl_flags_onoff(ipif_t *ipif, uint64_t flags, uint64_t *onp,
10261 uint64_t *offp)
10262 {
10263 ill_t *ill = ipif->ipif_ill;
10264 phyint_t *phyi = ill->ill_phyint;
10265 uint64_t cantchange_flags, intf_flags;
10266 uint64_t turn_on, turn_off;
10267
10268 intf_flags = ipif->ipif_flags | ill->ill_flags | phyi->phyint_flags;
10269 cantchange_flags = IFF_CANTCHANGE;
10270 if (IS_IPMP(ill))
10271 cantchange_flags |= IFF_IPMP_CANTCHANGE;
10272 turn_on = (flags ^ intf_flags) & ~cantchange_flags;
10273 turn_off = intf_flags & turn_on;
10274 turn_on ^= turn_off;
10275 *onp = turn_on;
10276 *offp = turn_off;
10277 }
10278
10279 /*
10280 * Set interface flags. Many flags require special handling (e.g.,
10281 * bringing the interface down); see below for details.
10282 *
10283 * NOTE : We really don't enforce that ipif_id zero should be used
10284 * for setting any flags other than IFF_LOGINT_FLAGS. This
10285 * is because applications generally does SICGLIFFLAGS and
10286 * ORs in the new flags (that affects the logical) and does a
10287 * SIOCSLIFFLAGS. Thus, "flags" below could contain bits other
10288 * than IFF_LOGINT_FLAGS. One could check whether "turn_on" - the
10289 * flags that will be turned on is correct with respect to
10290 * ipif_id 0. For backward compatibility reasons, it is not done.
10291 */
10292 /* ARGSUSED */
10293 int
10294 ip_sioctl_flags(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10295 ip_ioctl_cmd_t *ipip, void *if_req)
10296 {
10297 uint64_t turn_on;
10298 uint64_t turn_off;
10299 int err = 0;
10300 phyint_t *phyi;
10301 ill_t *ill;
10302 conn_t *connp;
10303 uint64_t intf_flags;
10304 boolean_t phyint_flags_modified = B_FALSE;
10305 uint64_t flags;
10306 struct ifreq *ifr;
10307 struct lifreq *lifr;
10308 boolean_t set_linklocal = B_FALSE;
10309
10310 ip1dbg(("ip_sioctl_flags(%s:%u %p)\n",
10311 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10312
10313 ASSERT(IAM_WRITER_IPIF(ipif));
10314
10315 ill = ipif->ipif_ill;
10316 phyi = ill->ill_phyint;
10317
10318 if (ipip->ipi_cmd_type == IF_CMD) {
10319 ifr = (struct ifreq *)if_req;
10320 flags = (uint64_t)(ifr->ifr_flags & 0x0000ffff);
10321 } else {
10322 lifr = (struct lifreq *)if_req;
10323 flags = lifr->lifr_flags;
10324 }
10325
10326 intf_flags = ipif->ipif_flags | ill->ill_flags | phyi->phyint_flags;
10327
10328 /*
10329 * Have the flags been set correctly until now?
10330 */
10331 ASSERT((phyi->phyint_flags & ~(IFF_PHYINT_FLAGS)) == 0);
10332 ASSERT((ill->ill_flags & ~(IFF_PHYINTINST_FLAGS)) == 0);
10333 ASSERT((ipif->ipif_flags & ~(IFF_LOGINT_FLAGS)) == 0);
10334 /*
10335 * Compare the new flags to the old, and partition
10336 * into those coming on and those going off.
10337 * For the 16 bit command keep the bits above bit 16 unchanged.
10338 */
10339 if (ipip->ipi_cmd == SIOCSIFFLAGS)
10340 flags |= intf_flags & ~0xFFFF;
10341
10342 /*
10343 * Explicitly fail attempts to change flags that are always invalid on
10344 * an IPMP meta-interface.
10345 */
10346 if (IS_IPMP(ill) && ((flags ^ intf_flags) & IFF_IPMP_INVALID))
10347 return (EINVAL);
10348
10349 ip_sioctl_flags_onoff(ipif, flags, &turn_on, &turn_off);
10350 if ((turn_on|turn_off) == 0)
10351 return (0); /* No change */
10352
10353 /*
10354 * All test addresses must be IFF_DEPRECATED (to ensure source address
10355 * selection avoids them) -- so force IFF_DEPRECATED on, and do not
10356 * allow it to be turned off.
10357 */
10358 if ((turn_off & (IFF_DEPRECATED|IFF_NOFAILOVER)) == IFF_DEPRECATED &&
10359 (turn_on|intf_flags) & IFF_NOFAILOVER)
10360 return (EINVAL);
10361
10362 if ((connp = Q_TO_CONN(q)) == NULL)
10363 return (EINVAL);
10364
10365 /*
10366 * Only vrrp control socket is allowed to change IFF_UP and
10367 * IFF_NOACCEPT flags when IFF_VRRP is set.
10368 */
10369 if ((intf_flags & IFF_VRRP) && ((turn_off | turn_on) & IFF_UP)) {
10370 if (!connp->conn_isvrrp)
10371 return (EINVAL);
10372 }
10373
10374 /*
10375 * The IFF_NOACCEPT flag can only be set on an IFF_VRRP IP address by
10376 * VRRP control socket.
10377 */
10378 if ((turn_off | turn_on) & IFF_NOACCEPT) {
10379 if (!connp->conn_isvrrp || !(intf_flags & IFF_VRRP))
10380 return (EINVAL);
10381 }
10382
10383 if (turn_on & IFF_NOFAILOVER) {
10384 turn_on |= IFF_DEPRECATED;
10385 flags |= IFF_DEPRECATED;
10386 }
10387
10388 /*
10389 * On underlying interfaces, only allow applications to manage test
10390 * addresses -- otherwise, they may get confused when the address
10391 * moves as part of being brought up. Likewise, prevent an
10392 * application-managed test address from being converted to a data
10393 * address. To prevent migration of administratively up addresses in
10394 * the kernel, we don't allow them to be converted either.
10395 */
10396 if (IS_UNDER_IPMP(ill)) {
10397 const uint64_t appflags = IFF_DHCPRUNNING | IFF_ADDRCONF;
10398
10399 if ((turn_on & appflags) && !(flags & IFF_NOFAILOVER))
10400 return (EINVAL);
10401
10402 if ((turn_off & IFF_NOFAILOVER) &&
10403 (flags & (appflags | IFF_UP | IFF_DUPLICATE)))
10404 return (EINVAL);
10405 }
10406
10407 /*
10408 * Only allow IFF_TEMPORARY flag to be set on
10409 * IPv6 interfaces.
10410 */
10411 if ((turn_on & IFF_TEMPORARY) && !(ipif->ipif_isv6))
10412 return (EINVAL);
10413
10414 /*
10415 * cannot turn off IFF_NOXMIT on VNI interfaces.
10416 */
10417 if ((turn_off & IFF_NOXMIT) && IS_VNI(ipif->ipif_ill))
10418 return (EINVAL);
10419
10420 /*
10421 * Don't allow the IFF_ROUTER flag to be turned on on loopback
10422 * interfaces. It makes no sense in that context.
10423 */
10424 if ((turn_on & IFF_ROUTER) && (phyi->phyint_flags & PHYI_LOOPBACK))
10425 return (EINVAL);
10426
10427 /*
10428 * For IPv6 ipif_id 0, don't allow the interface to be up without
10429 * a link local address if IFF_NOLOCAL or IFF_ANYCAST are not set.
10430 * If the link local address isn't set, and can be set, it will get
10431 * set later on in this function.
10432 */
10433 if (ipif->ipif_id == 0 && ipif->ipif_isv6 &&
10434 (flags & IFF_UP) && !(flags & (IFF_NOLOCAL|IFF_ANYCAST)) &&
10435 IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr)) {
10436 if (ipif_cant_setlinklocal(ipif))
10437 return (EINVAL);
10438 set_linklocal = B_TRUE;
10439 }
10440
10441 /*
10442 * If we modify physical interface flags, we'll potentially need to
10443 * send up two routing socket messages for the changes (one for the
10444 * IPv4 ill, and another for the IPv6 ill). Note that here.
10445 */
10446 if ((turn_on|turn_off) & IFF_PHYINT_FLAGS)
10447 phyint_flags_modified = B_TRUE;
10448
10449 /*
10450 * All functioning PHYI_STANDBY interfaces start life PHYI_INACTIVE
10451 * (otherwise, we'd immediately use them, defeating standby). Also,
10452 * since PHYI_INACTIVE has a separate meaning when PHYI_STANDBY is not
10453 * set, don't allow PHYI_STANDBY to be set if PHYI_INACTIVE is already
10454 * set, and clear PHYI_INACTIVE if PHYI_STANDBY is being cleared. We
10455 * also don't allow PHYI_STANDBY if VNI is enabled since its semantics
10456 * will not be honored.
10457 */
10458 if (turn_on & PHYI_STANDBY) {
10459 /*
10460 * No need to grab ill_g_usesrc_lock here; see the
10461 * synchronization notes in ip.c.
10462 */
10463 if (ill->ill_usesrc_grp_next != NULL ||
10464 intf_flags & PHYI_INACTIVE)
10465 return (EINVAL);
10466 if (!(flags & PHYI_FAILED)) {
10467 flags |= PHYI_INACTIVE;
10468 turn_on |= PHYI_INACTIVE;
10469 }
10470 }
10471
10472 if (turn_off & PHYI_STANDBY) {
10473 flags &= ~PHYI_INACTIVE;
10474 turn_off |= PHYI_INACTIVE;
10475 }
10476
10477 /*
10478 * PHYI_FAILED and PHYI_INACTIVE are mutually exclusive; fail if both
10479 * would end up on.
10480 */
10481 if ((flags & (PHYI_FAILED | PHYI_INACTIVE)) ==
10482 (PHYI_FAILED | PHYI_INACTIVE))
10483 return (EINVAL);
10484
10485 /*
10486 * If ILLF_ROUTER changes, we need to change the ip forwarding
10487 * status of the interface.
10488 */
10489 if ((turn_on | turn_off) & ILLF_ROUTER) {
10490 err = ill_forward_set(ill, ((turn_on & ILLF_ROUTER) != 0));
10491 if (err != 0)
10492 return (err);
10493 }
10494
10495 /*
10496 * If the interface is not UP and we are not going to
10497 * bring it UP, record the flags and return. When the
10498 * interface comes UP later, the right actions will be
10499 * taken.
10500 */
10501 if (!(ipif->ipif_flags & IPIF_UP) &&
10502 !(turn_on & IPIF_UP)) {
10503 /* Record new flags in their respective places. */
10504 mutex_enter(&ill->ill_lock);
10505 mutex_enter(&ill->ill_phyint->phyint_lock);
10506 ipif->ipif_flags |= (turn_on & IFF_LOGINT_FLAGS);
10507 ipif->ipif_flags &= (~turn_off & IFF_LOGINT_FLAGS);
10508 ill->ill_flags |= (turn_on & IFF_PHYINTINST_FLAGS);
10509 ill->ill_flags &= (~turn_off & IFF_PHYINTINST_FLAGS);
10510 phyi->phyint_flags |= (turn_on & IFF_PHYINT_FLAGS);
10511 phyi->phyint_flags &= (~turn_off & IFF_PHYINT_FLAGS);
10512 mutex_exit(&ill->ill_lock);
10513 mutex_exit(&ill->ill_phyint->phyint_lock);
10514
10515 /*
10516 * PHYI_FAILED, PHYI_INACTIVE, and PHYI_OFFLINE are all the
10517 * same to the kernel: if any of them has been set by
10518 * userland, the interface cannot be used for data traffic.
10519 */
10520 if ((turn_on|turn_off) &
10521 (PHYI_FAILED | PHYI_INACTIVE | PHYI_OFFLINE)) {
10522 ASSERT(!IS_IPMP(ill));
10523 /*
10524 * It's possible the ill is part of an "anonymous"
10525 * IPMP group rather than a real group. In that case,
10526 * there are no other interfaces in the group and thus
10527 * no need to call ipmp_phyint_refresh_active().
10528 */
10529 if (IS_UNDER_IPMP(ill))
10530 ipmp_phyint_refresh_active(phyi);
10531 }
10532
10533 if (phyint_flags_modified) {
10534 if (phyi->phyint_illv4 != NULL) {
10535 ip_rts_ifmsg(phyi->phyint_illv4->
10536 ill_ipif, RTSQ_DEFAULT);
10537 }
10538 if (phyi->phyint_illv6 != NULL) {
10539 ip_rts_ifmsg(phyi->phyint_illv6->
10540 ill_ipif, RTSQ_DEFAULT);
10541 }
10542 }
10543 /* The default multicast interface might have changed */
10544 ire_increment_multicast_generation(ill->ill_ipst,
10545 ill->ill_isv6);
10546
10547 return (0);
10548 } else if (set_linklocal) {
10549 mutex_enter(&ill->ill_lock);
10550 if (set_linklocal)
10551 ipif->ipif_state_flags |= IPIF_SET_LINKLOCAL;
10552 mutex_exit(&ill->ill_lock);
10553 }
10554
10555 /*
10556 * Disallow IPv6 interfaces coming up that have the unspecified address,
10557 * or point-to-point interfaces with an unspecified destination. We do
10558 * allow the address to be unspecified for IPIF_NOLOCAL interfaces that
10559 * have a subnet assigned, which is how in.ndpd currently manages its
10560 * onlink prefix list when no addresses are configured with those
10561 * prefixes.
10562 */
10563 if (ipif->ipif_isv6 &&
10564 ((IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr) &&
10565 (!(ipif->ipif_flags & IPIF_NOLOCAL) && !(turn_on & IPIF_NOLOCAL) ||
10566 IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6subnet))) ||
10567 ((ipif->ipif_flags & IPIF_POINTOPOINT) &&
10568 IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6pp_dst_addr)))) {
10569 return (EINVAL);
10570 }
10571
10572 /*
10573 * Prevent IPv4 point-to-point interfaces with a 0.0.0.0 destination
10574 * from being brought up.
10575 */
10576 if (!ipif->ipif_isv6 &&
10577 ((ipif->ipif_flags & IPIF_POINTOPOINT) &&
10578 ipif->ipif_pp_dst_addr == INADDR_ANY)) {
10579 return (EINVAL);
10580 }
10581
10582 /*
10583 * If we are going to change one or more of the flags that are
10584 * IPIF_UP, IPIF_DEPRECATED, IPIF_NOXMIT, IPIF_NOLOCAL, ILLF_NOARP,
10585 * ILLF_NONUD, IPIF_PRIVATE, IPIF_ANYCAST, IPIF_PREFERRED, and
10586 * IPIF_NOFAILOVER, we will take special action. This is
10587 * done by bring the ipif down, changing the flags and bringing
10588 * it back up again. For IPIF_NOFAILOVER, the act of bringing it
10589 * back up will trigger the address to be moved.
10590 *
10591 * If we are going to change IFF_NOACCEPT, we need to bring
10592 * all the ipifs down then bring them up again. The act of
10593 * bringing all the ipifs back up will trigger the local
10594 * ires being recreated with "no_accept" set/cleared.
10595 *
10596 * Note that ILLF_NOACCEPT is always set separately from the
10597 * other flags.
10598 */
10599 if ((turn_on|turn_off) &
10600 (IPIF_UP|IPIF_DEPRECATED|IPIF_NOXMIT|IPIF_NOLOCAL|ILLF_NOARP|
10601 ILLF_NONUD|IPIF_PRIVATE|IPIF_ANYCAST|IPIF_PREFERRED|
10602 IPIF_NOFAILOVER)) {
10603 /*
10604 * ipif_down() will ire_delete bcast ire's for the subnet,
10605 * while the ire_identical_ref tracks the case of IRE_BROADCAST
10606 * entries shared between multiple ipifs on the same subnet.
10607 */
10608 if (((ipif->ipif_flags | turn_on) & IPIF_UP) &&
10609 !(turn_off & IPIF_UP)) {
10610 if (ipif->ipif_flags & IPIF_UP)
10611 ill->ill_logical_down = 1;
10612 turn_on &= ~IPIF_UP;
10613 }
10614 err = ipif_down(ipif, q, mp);
10615 ip1dbg(("ipif_down returns %d err ", err));
10616 if (err == EINPROGRESS)
10617 return (err);
10618 (void) ipif_down_tail(ipif);
10619 } else if ((turn_on|turn_off) & ILLF_NOACCEPT) {
10620 /*
10621 * If we can quiesce the ill, then continue. If not, then
10622 * ip_sioctl_flags_tail() will be called from
10623 * ipif_ill_refrele_tail().
10624 */
10625 ill_down_ipifs(ill, B_TRUE);
10626
10627 mutex_enter(&connp->conn_lock);
10628 mutex_enter(&ill->ill_lock);
10629 if (!ill_is_quiescent(ill)) {
10630 boolean_t success;
10631
10632 success = ipsq_pending_mp_add(connp, ill->ill_ipif,
10633 q, mp, ILL_DOWN);
10634 mutex_exit(&ill->ill_lock);
10635 mutex_exit(&connp->conn_lock);
10636 return (success ? EINPROGRESS : EINTR);
10637 }
10638 mutex_exit(&ill->ill_lock);
10639 mutex_exit(&connp->conn_lock);
10640 }
10641 return (ip_sioctl_flags_tail(ipif, flags, q, mp));
10642 }
10643
10644 static int
10645 ip_sioctl_flags_tail(ipif_t *ipif, uint64_t flags, queue_t *q, mblk_t *mp)
10646 {
10647 ill_t *ill;
10648 phyint_t *phyi;
10649 uint64_t turn_on, turn_off;
10650 boolean_t phyint_flags_modified = B_FALSE;
10651 int err = 0;
10652 boolean_t set_linklocal = B_FALSE;
10653
10654 ip1dbg(("ip_sioctl_flags_tail(%s:%u)\n",
10655 ipif->ipif_ill->ill_name, ipif->ipif_id));
10656
10657 ASSERT(IAM_WRITER_IPIF(ipif));
10658
10659 ill = ipif->ipif_ill;
10660 phyi = ill->ill_phyint;
10661
10662 ip_sioctl_flags_onoff(ipif, flags, &turn_on, &turn_off);
10663
10664 /*
10665 * IFF_UP is handled separately.
10666 */
10667 turn_on &= ~IFF_UP;
10668 turn_off &= ~IFF_UP;
10669
10670 if ((turn_on|turn_off) & IFF_PHYINT_FLAGS)
10671 phyint_flags_modified = B_TRUE;
10672
10673 /*
10674 * Now we change the flags. Track current value of
10675 * other flags in their respective places.
10676 */
10677 mutex_enter(&ill->ill_lock);
10678 mutex_enter(&phyi->phyint_lock);
10679 ipif->ipif_flags |= (turn_on & IFF_LOGINT_FLAGS);
10680 ipif->ipif_flags &= (~turn_off & IFF_LOGINT_FLAGS);
10681 ill->ill_flags |= (turn_on & IFF_PHYINTINST_FLAGS);
10682 ill->ill_flags &= (~turn_off & IFF_PHYINTINST_FLAGS);
10683 phyi->phyint_flags |= (turn_on & IFF_PHYINT_FLAGS);
10684 phyi->phyint_flags &= (~turn_off & IFF_PHYINT_FLAGS);
10685 if (ipif->ipif_state_flags & IPIF_SET_LINKLOCAL) {
10686 set_linklocal = B_TRUE;
10687 ipif->ipif_state_flags &= ~IPIF_SET_LINKLOCAL;
10688 }
10689
10690 mutex_exit(&ill->ill_lock);
10691 mutex_exit(&phyi->phyint_lock);
10692
10693 if (set_linklocal)
10694 (void) ipif_setlinklocal(ipif);
10695
10696 /*
10697 * PHYI_FAILED, PHYI_INACTIVE, and PHYI_OFFLINE are all the same to
10698 * the kernel: if any of them has been set by userland, the interface
10699 * cannot be used for data traffic.
10700 */
10701 if ((turn_on|turn_off) & (PHYI_FAILED | PHYI_INACTIVE | PHYI_OFFLINE)) {
10702 ASSERT(!IS_IPMP(ill));
10703 /*
10704 * It's possible the ill is part of an "anonymous" IPMP group
10705 * rather than a real group. In that case, there are no other
10706 * interfaces in the group and thus no need for us to call
10707 * ipmp_phyint_refresh_active().
10708 */
10709 if (IS_UNDER_IPMP(ill))
10710 ipmp_phyint_refresh_active(phyi);
10711 }
10712
10713 if ((turn_on|turn_off) & ILLF_NOACCEPT) {
10714 /*
10715 * If the ILLF_NOACCEPT flag is changed, bring up all the
10716 * ipifs that were brought down.
10717 *
10718 * The routing sockets messages are sent as the result
10719 * of ill_up_ipifs(), further, SCTP's IPIF list was updated
10720 * as well.
10721 */
10722 err = ill_up_ipifs(ill, q, mp);
10723 } else if ((flags & IFF_UP) && !(ipif->ipif_flags & IPIF_UP)) {
10724 /*
10725 * XXX ipif_up really does not know whether a phyint flags
10726 * was modified or not. So, it sends up information on
10727 * only one routing sockets message. As we don't bring up
10728 * the interface and also set PHYI_ flags simultaneously
10729 * it should be okay.
10730 */
10731 err = ipif_up(ipif, q, mp);
10732 } else {
10733 /*
10734 * Make sure routing socket sees all changes to the flags.
10735 * ipif_up_done* handles this when we use ipif_up.
10736 */
10737 if (phyint_flags_modified) {
10738 if (phyi->phyint_illv4 != NULL) {
10739 ip_rts_ifmsg(phyi->phyint_illv4->
10740 ill_ipif, RTSQ_DEFAULT);
10741 }
10742 if (phyi->phyint_illv6 != NULL) {
10743 ip_rts_ifmsg(phyi->phyint_illv6->
10744 ill_ipif, RTSQ_DEFAULT);
10745 }
10746 } else {
10747 ip_rts_ifmsg(ipif, RTSQ_DEFAULT);
10748 }
10749 /*
10750 * Update the flags in SCTP's IPIF list, ipif_up() will do
10751 * this in need_up case.
10752 */
10753 sctp_update_ipif(ipif, SCTP_IPIF_UPDATE);
10754 }
10755
10756 /* The default multicast interface might have changed */
10757 ire_increment_multicast_generation(ill->ill_ipst, ill->ill_isv6);
10758 return (err);
10759 }
10760
10761 /*
10762 * Restart the flags operation now that the refcounts have dropped to zero.
10763 */
10764 /* ARGSUSED */
10765 int
10766 ip_sioctl_flags_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10767 ip_ioctl_cmd_t *ipip, void *if_req)
10768 {
10769 uint64_t flags;
10770 struct ifreq *ifr = if_req;
10771 struct lifreq *lifr = if_req;
10772 uint64_t turn_on, turn_off;
10773
10774 ip1dbg(("ip_sioctl_flags_restart(%s:%u %p)\n",
10775 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10776
10777 if (ipip->ipi_cmd_type == IF_CMD) {
10778 /* cast to uint16_t prevents unwanted sign extension */
10779 flags = (uint16_t)ifr->ifr_flags;
10780 } else {
10781 flags = lifr->lifr_flags;
10782 }
10783
10784 /*
10785 * If this function call is a result of the ILLF_NOACCEPT flag
10786 * change, do not call ipif_down_tail(). See ip_sioctl_flags().
10787 */
10788 ip_sioctl_flags_onoff(ipif, flags, &turn_on, &turn_off);
10789 if (!((turn_on|turn_off) & ILLF_NOACCEPT))
10790 (void) ipif_down_tail(ipif);
10791
10792 return (ip_sioctl_flags_tail(ipif, flags, q, mp));
10793 }
10794
10795 /*
10796 * Can operate on either a module or a driver queue.
10797 */
10798 /* ARGSUSED */
10799 int
10800 ip_sioctl_get_flags(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10801 ip_ioctl_cmd_t *ipip, void *if_req)
10802 {
10803 /*
10804 * Has the flags been set correctly till now ?
10805 */
10806 ill_t *ill = ipif->ipif_ill;
10807 phyint_t *phyi = ill->ill_phyint;
10808
10809 ip1dbg(("ip_sioctl_get_flags(%s:%u %p)\n",
10810 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10811 ASSERT((phyi->phyint_flags & ~(IFF_PHYINT_FLAGS)) == 0);
10812 ASSERT((ill->ill_flags & ~(IFF_PHYINTINST_FLAGS)) == 0);
10813 ASSERT((ipif->ipif_flags & ~(IFF_LOGINT_FLAGS)) == 0);
10814
10815 /*
10816 * Need a lock since some flags can be set even when there are
10817 * references to the ipif.
10818 */
10819 mutex_enter(&ill->ill_lock);
10820 if (ipip->ipi_cmd_type == IF_CMD) {
10821 struct ifreq *ifr = (struct ifreq *)if_req;
10822
10823 /* Get interface flags (low 16 only). */
10824 ifr->ifr_flags = ((ipif->ipif_flags |
10825 ill->ill_flags | phyi->phyint_flags) & 0xffff);
10826 } else {
10827 struct lifreq *lifr = (struct lifreq *)if_req;
10828
10829 /* Get interface flags. */
10830 lifr->lifr_flags = ipif->ipif_flags |
10831 ill->ill_flags | phyi->phyint_flags;
10832 }
10833 mutex_exit(&ill->ill_lock);
10834 return (0);
10835 }
10836
10837 /*
10838 * We allow the MTU to be set on an ILL, but not have it be different
10839 * for different IPIFs since we don't actually send packets on IPIFs.
10840 */
10841 /* ARGSUSED */
10842 int
10843 ip_sioctl_mtu(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10844 ip_ioctl_cmd_t *ipip, void *if_req)
10845 {
10846 int mtu;
10847 int ip_min_mtu;
10848 struct ifreq *ifr;
10849 struct lifreq *lifr;
10850 ill_t *ill;
10851
10852 ip1dbg(("ip_sioctl_mtu(%s:%u %p)\n", ipif->ipif_ill->ill_name,
10853 ipif->ipif_id, (void *)ipif));
10854 if (ipip->ipi_cmd_type == IF_CMD) {
10855 ifr = (struct ifreq *)if_req;
10856 mtu = ifr->ifr_metric;
10857 } else {
10858 lifr = (struct lifreq *)if_req;
10859 mtu = lifr->lifr_mtu;
10860 }
10861 /* Only allow for logical unit zero i.e. not on "bge0:17" */
10862 if (ipif->ipif_id != 0)
10863 return (EINVAL);
10864
10865 ill = ipif->ipif_ill;
10866 if (ipif->ipif_isv6)
10867 ip_min_mtu = IPV6_MIN_MTU;
10868 else
10869 ip_min_mtu = IP_MIN_MTU;
10870
10871 mutex_enter(&ill->ill_lock);
10872 if (mtu > ill->ill_max_frag || mtu < ip_min_mtu) {
10873 mutex_exit(&ill->ill_lock);
10874 return (EINVAL);
10875 }
10876 /* Avoid increasing ill_mc_mtu */
10877 if (ill->ill_mc_mtu > mtu)
10878 ill->ill_mc_mtu = mtu;
10879
10880 /*
10881 * The dce and fragmentation code can handle changes to ill_mtu
10882 * concurrent with sending/fragmenting packets.
10883 */
10884 ill->ill_mtu = mtu;
10885 ill->ill_flags |= ILLF_FIXEDMTU;
10886 mutex_exit(&ill->ill_lock);
10887
10888 /*
10889 * Make sure all dce_generation checks find out
10890 * that ill_mtu/ill_mc_mtu has changed.
10891 */
10892 dce_increment_all_generations(ill->ill_isv6, ill->ill_ipst);
10893
10894 /*
10895 * Refresh IPMP meta-interface MTU if necessary.
10896 */
10897 if (IS_UNDER_IPMP(ill))
10898 ipmp_illgrp_refresh_mtu(ill->ill_grp);
10899
10900 /* Update the MTU in SCTP's list */
10901 sctp_update_ipif(ipif, SCTP_IPIF_UPDATE);
10902 return (0);
10903 }
10904
10905 /* Get interface MTU. */
10906 /* ARGSUSED */
10907 int
10908 ip_sioctl_get_mtu(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10909 ip_ioctl_cmd_t *ipip, void *if_req)
10910 {
10911 struct ifreq *ifr;
10912 struct lifreq *lifr;
10913
10914 ip1dbg(("ip_sioctl_get_mtu(%s:%u %p)\n",
10915 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10916
10917 /*
10918 * We allow a get on any logical interface even though the set
10919 * can only be done on logical unit 0.
10920 */
10921 if (ipip->ipi_cmd_type == IF_CMD) {
10922 ifr = (struct ifreq *)if_req;
10923 ifr->ifr_metric = ipif->ipif_ill->ill_mtu;
10924 } else {
10925 lifr = (struct lifreq *)if_req;
10926 lifr->lifr_mtu = ipif->ipif_ill->ill_mtu;
10927 }
10928 return (0);
10929 }
10930
10931 /* Set interface broadcast address. */
10932 /* ARGSUSED2 */
10933 int
10934 ip_sioctl_brdaddr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10935 ip_ioctl_cmd_t *ipip, void *if_req)
10936 {
10937 ipaddr_t addr;
10938 ire_t *ire;
10939 ill_t *ill = ipif->ipif_ill;
10940 ip_stack_t *ipst = ill->ill_ipst;
10941
10942 ip1dbg(("ip_sioctl_brdaddr(%s:%u)\n", ill->ill_name,
10943 ipif->ipif_id));
10944
10945 ASSERT(IAM_WRITER_IPIF(ipif));
10946 if (!(ipif->ipif_flags & IPIF_BROADCAST))
10947 return (EADDRNOTAVAIL);
10948
10949 ASSERT(!(ipif->ipif_isv6)); /* No IPv6 broadcast */
10950
10951 if (sin->sin_family != AF_INET)
10952 return (EAFNOSUPPORT);
10953
10954 addr = sin->sin_addr.s_addr;
10955
10956 if (ipif->ipif_flags & IPIF_UP) {
10957 /*
10958 * If we are already up, make sure the new
10959 * broadcast address makes sense. If it does,
10960 * there should be an IRE for it already.
10961 */
10962 ire = ire_ftable_lookup_v4(addr, 0, 0, IRE_BROADCAST,
10963 ill, ipif->ipif_zoneid, NULL,
10964 (MATCH_IRE_ILL | MATCH_IRE_TYPE), 0, ipst, NULL);
10965 if (ire == NULL) {
10966 return (EINVAL);
10967 } else {
10968 ire_refrele(ire);
10969 }
10970 }
10971 /*
10972 * Changing the broadcast addr for this ipif. Since the IRE_BROADCAST
10973 * needs to already exist we never need to change the set of
10974 * IRE_BROADCASTs when we are UP.
10975 */
10976 if (addr != ipif->ipif_brd_addr)
10977 IN6_IPADDR_TO_V4MAPPED(addr, &ipif->ipif_v6brd_addr);
10978
10979 return (0);
10980 }
10981
10982 /* Get interface broadcast address. */
10983 /* ARGSUSED */
10984 int
10985 ip_sioctl_get_brdaddr(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
10986 ip_ioctl_cmd_t *ipip, void *if_req)
10987 {
10988 ip1dbg(("ip_sioctl_get_brdaddr(%s:%u %p)\n",
10989 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
10990 if (!(ipif->ipif_flags & IPIF_BROADCAST))
10991 return (EADDRNOTAVAIL);
10992
10993 /* IPIF_BROADCAST not possible with IPv6 */
10994 ASSERT(!ipif->ipif_isv6);
10995 *sin = sin_null;
10996 sin->sin_family = AF_INET;
10997 sin->sin_addr.s_addr = ipif->ipif_brd_addr;
10998 return (0);
10999 }
11000
11001 /*
11002 * This routine is called to handle the SIOCS*IFNETMASK IOCTL.
11003 */
11004 /* ARGSUSED */
11005 int
11006 ip_sioctl_netmask(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11007 ip_ioctl_cmd_t *ipip, void *if_req)
11008 {
11009 int err = 0;
11010 in6_addr_t v6mask;
11011
11012 ip1dbg(("ip_sioctl_netmask(%s:%u %p)\n",
11013 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11014
11015 ASSERT(IAM_WRITER_IPIF(ipif));
11016
11017 if (ipif->ipif_isv6) {
11018 sin6_t *sin6;
11019
11020 if (sin->sin_family != AF_INET6)
11021 return (EAFNOSUPPORT);
11022
11023 sin6 = (sin6_t *)sin;
11024 v6mask = sin6->sin6_addr;
11025 } else {
11026 ipaddr_t mask;
11027
11028 if (sin->sin_family != AF_INET)
11029 return (EAFNOSUPPORT);
11030
11031 mask = sin->sin_addr.s_addr;
11032 if (!ip_contiguous_mask(ntohl(mask)))
11033 return (ENOTSUP);
11034 V4MASK_TO_V6(mask, v6mask);
11035 }
11036
11037 /*
11038 * No big deal if the interface isn't already up, or the mask
11039 * isn't really changing, or this is pt-pt.
11040 */
11041 if (!(ipif->ipif_flags & IPIF_UP) ||
11042 IN6_ARE_ADDR_EQUAL(&v6mask, &ipif->ipif_v6net_mask) ||
11043 (ipif->ipif_flags & IPIF_POINTOPOINT)) {
11044 ipif->ipif_v6net_mask = v6mask;
11045 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0) {
11046 V6_MASK_COPY(ipif->ipif_v6lcl_addr,
11047 ipif->ipif_v6net_mask,
11048 ipif->ipif_v6subnet);
11049 }
11050 return (0);
11051 }
11052 /*
11053 * Make sure we have valid net and subnet broadcast ire's
11054 * for the old netmask, if needed by other logical interfaces.
11055 */
11056 err = ipif_logical_down(ipif, q, mp);
11057 if (err == EINPROGRESS)
11058 return (err);
11059 (void) ipif_down_tail(ipif);
11060 err = ip_sioctl_netmask_tail(ipif, sin, q, mp);
11061 return (err);
11062 }
11063
11064 static int
11065 ip_sioctl_netmask_tail(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp)
11066 {
11067 in6_addr_t v6mask;
11068 int err = 0;
11069
11070 ip1dbg(("ip_sioctl_netmask_tail(%s:%u %p)\n",
11071 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11072
11073 if (ipif->ipif_isv6) {
11074 sin6_t *sin6;
11075
11076 sin6 = (sin6_t *)sin;
11077 v6mask = sin6->sin6_addr;
11078 } else {
11079 ipaddr_t mask;
11080
11081 mask = sin->sin_addr.s_addr;
11082 V4MASK_TO_V6(mask, v6mask);
11083 }
11084
11085 ipif->ipif_v6net_mask = v6mask;
11086 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0) {
11087 V6_MASK_COPY(ipif->ipif_v6lcl_addr, ipif->ipif_v6net_mask,
11088 ipif->ipif_v6subnet);
11089 }
11090 err = ipif_up(ipif, q, mp);
11091
11092 if (err == 0 || err == EINPROGRESS) {
11093 /*
11094 * The interface must be DL_BOUND if this packet has to
11095 * go out on the wire. Since we only go through a logical
11096 * down and are bound with the driver during an internal
11097 * down/up that is satisfied.
11098 */
11099 if (!ipif->ipif_isv6 && ipif->ipif_ill->ill_wq != NULL) {
11100 /* Potentially broadcast an address mask reply. */
11101 ipif_mask_reply(ipif);
11102 }
11103 }
11104 return (err);
11105 }
11106
11107 /* ARGSUSED */
11108 int
11109 ip_sioctl_netmask_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11110 ip_ioctl_cmd_t *ipip, void *if_req)
11111 {
11112 ip1dbg(("ip_sioctl_netmask_restart(%s:%u %p)\n",
11113 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11114 (void) ipif_down_tail(ipif);
11115 return (ip_sioctl_netmask_tail(ipif, sin, q, mp));
11116 }
11117
11118 /* Get interface net mask. */
11119 /* ARGSUSED */
11120 int
11121 ip_sioctl_get_netmask(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11122 ip_ioctl_cmd_t *ipip, void *if_req)
11123 {
11124 struct lifreq *lifr = (struct lifreq *)if_req;
11125 struct sockaddr_in6 *sin6 = (sin6_t *)sin;
11126
11127 ip1dbg(("ip_sioctl_get_netmask(%s:%u %p)\n",
11128 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11129
11130 /*
11131 * net mask can't change since we have a reference to the ipif.
11132 */
11133 if (ipif->ipif_isv6) {
11134 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
11135 *sin6 = sin6_null;
11136 sin6->sin6_family = AF_INET6;
11137 sin6->sin6_addr = ipif->ipif_v6net_mask;
11138 lifr->lifr_addrlen =
11139 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
11140 } else {
11141 *sin = sin_null;
11142 sin->sin_family = AF_INET;
11143 sin->sin_addr.s_addr = ipif->ipif_net_mask;
11144 if (ipip->ipi_cmd_type == LIF_CMD) {
11145 lifr->lifr_addrlen =
11146 ip_mask_to_plen(ipif->ipif_net_mask);
11147 }
11148 }
11149 return (0);
11150 }
11151
11152 /* ARGSUSED */
11153 int
11154 ip_sioctl_metric(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11155 ip_ioctl_cmd_t *ipip, void *if_req)
11156 {
11157 ip1dbg(("ip_sioctl_metric(%s:%u %p)\n",
11158 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11159
11160 /*
11161 * Since no applications should ever be setting metrics on underlying
11162 * interfaces, we explicitly fail to smoke 'em out.
11163 */
11164 if (IS_UNDER_IPMP(ipif->ipif_ill))
11165 return (EINVAL);
11166
11167 /*
11168 * Set interface metric. We don't use this for
11169 * anything but we keep track of it in case it is
11170 * important to routing applications or such.
11171 */
11172 if (ipip->ipi_cmd_type == IF_CMD) {
11173 struct ifreq *ifr;
11174
11175 ifr = (struct ifreq *)if_req;
11176 ipif->ipif_ill->ill_metric = ifr->ifr_metric;
11177 } else {
11178 struct lifreq *lifr;
11179
11180 lifr = (struct lifreq *)if_req;
11181 ipif->ipif_ill->ill_metric = lifr->lifr_metric;
11182 }
11183 return (0);
11184 }
11185
11186 /* ARGSUSED */
11187 int
11188 ip_sioctl_get_metric(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11189 ip_ioctl_cmd_t *ipip, void *if_req)
11190 {
11191 /* Get interface metric. */
11192 ip1dbg(("ip_sioctl_get_metric(%s:%u %p)\n",
11193 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11194
11195 if (ipip->ipi_cmd_type == IF_CMD) {
11196 struct ifreq *ifr;
11197
11198 ifr = (struct ifreq *)if_req;
11199 ifr->ifr_metric = ipif->ipif_ill->ill_metric;
11200 } else {
11201 struct lifreq *lifr;
11202
11203 lifr = (struct lifreq *)if_req;
11204 lifr->lifr_metric = ipif->ipif_ill->ill_metric;
11205 }
11206
11207 return (0);
11208 }
11209
11210 /* ARGSUSED */
11211 int
11212 ip_sioctl_muxid(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11213 ip_ioctl_cmd_t *ipip, void *if_req)
11214 {
11215 int arp_muxid;
11216
11217 ip1dbg(("ip_sioctl_muxid(%s:%u %p)\n",
11218 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11219 /*
11220 * Set the muxid returned from I_PLINK.
11221 */
11222 if (ipip->ipi_cmd_type == IF_CMD) {
11223 struct ifreq *ifr = (struct ifreq *)if_req;
11224
11225 ipif->ipif_ill->ill_muxid = ifr->ifr_ip_muxid;
11226 arp_muxid = ifr->ifr_arp_muxid;
11227 } else {
11228 struct lifreq *lifr = (struct lifreq *)if_req;
11229
11230 ipif->ipif_ill->ill_muxid = lifr->lifr_ip_muxid;
11231 arp_muxid = lifr->lifr_arp_muxid;
11232 }
11233 arl_set_muxid(ipif->ipif_ill, arp_muxid);
11234 return (0);
11235 }
11236
11237 /* ARGSUSED */
11238 int
11239 ip_sioctl_get_muxid(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11240 ip_ioctl_cmd_t *ipip, void *if_req)
11241 {
11242 int arp_muxid = 0;
11243
11244 ip1dbg(("ip_sioctl_get_muxid(%s:%u %p)\n",
11245 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11246 /*
11247 * Get the muxid saved in ill for I_PUNLINK.
11248 */
11249 arp_muxid = arl_get_muxid(ipif->ipif_ill);
11250 if (ipip->ipi_cmd_type == IF_CMD) {
11251 struct ifreq *ifr = (struct ifreq *)if_req;
11252
11253 ifr->ifr_ip_muxid = ipif->ipif_ill->ill_muxid;
11254 ifr->ifr_arp_muxid = arp_muxid;
11255 } else {
11256 struct lifreq *lifr = (struct lifreq *)if_req;
11257
11258 lifr->lifr_ip_muxid = ipif->ipif_ill->ill_muxid;
11259 lifr->lifr_arp_muxid = arp_muxid;
11260 }
11261 return (0);
11262 }
11263
11264 /*
11265 * Set the subnet prefix. Does not modify the broadcast address.
11266 */
11267 /* ARGSUSED */
11268 int
11269 ip_sioctl_subnet(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11270 ip_ioctl_cmd_t *ipip, void *if_req)
11271 {
11272 int err = 0;
11273 in6_addr_t v6addr;
11274 in6_addr_t v6mask;
11275 boolean_t need_up = B_FALSE;
11276 int addrlen;
11277
11278 ip1dbg(("ip_sioctl_subnet(%s:%u %p)\n",
11279 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11280
11281 ASSERT(IAM_WRITER_IPIF(ipif));
11282 addrlen = ((struct lifreq *)if_req)->lifr_addrlen;
11283
11284 if (ipif->ipif_isv6) {
11285 sin6_t *sin6;
11286
11287 if (sin->sin_family != AF_INET6)
11288 return (EAFNOSUPPORT);
11289
11290 sin6 = (sin6_t *)sin;
11291 v6addr = sin6->sin6_addr;
11292 if (!ip_remote_addr_ok_v6(&v6addr, &ipv6_all_ones))
11293 return (EADDRNOTAVAIL);
11294 } else {
11295 ipaddr_t addr;
11296
11297 if (sin->sin_family != AF_INET)
11298 return (EAFNOSUPPORT);
11299
11300 addr = sin->sin_addr.s_addr;
11301 if (!ip_addr_ok_v4(addr, 0xFFFFFFFF))
11302 return (EADDRNOTAVAIL);
11303 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
11304 /* Add 96 bits */
11305 addrlen += IPV6_ABITS - IP_ABITS;
11306 }
11307
11308 if (ip_plen_to_mask_v6(addrlen, &v6mask) == NULL)
11309 return (EINVAL);
11310
11311 /* Check if bits in the address is set past the mask */
11312 if (!V6_MASK_EQ(v6addr, v6mask, v6addr))
11313 return (EINVAL);
11314
11315 if (IN6_ARE_ADDR_EQUAL(&ipif->ipif_v6subnet, &v6addr) &&
11316 IN6_ARE_ADDR_EQUAL(&ipif->ipif_v6net_mask, &v6mask))
11317 return (0); /* No change */
11318
11319 if (ipif->ipif_flags & IPIF_UP) {
11320 /*
11321 * If the interface is already marked up,
11322 * we call ipif_down which will take care
11323 * of ditching any IREs that have been set
11324 * up based on the old interface address.
11325 */
11326 err = ipif_logical_down(ipif, q, mp);
11327 if (err == EINPROGRESS)
11328 return (err);
11329 (void) ipif_down_tail(ipif);
11330 need_up = B_TRUE;
11331 }
11332
11333 err = ip_sioctl_subnet_tail(ipif, v6addr, v6mask, q, mp, need_up);
11334 return (err);
11335 }
11336
11337 static int
11338 ip_sioctl_subnet_tail(ipif_t *ipif, in6_addr_t v6addr, in6_addr_t v6mask,
11339 queue_t *q, mblk_t *mp, boolean_t need_up)
11340 {
11341 ill_t *ill = ipif->ipif_ill;
11342 int err = 0;
11343
11344 ip1dbg(("ip_sioctl_subnet_tail(%s:%u %p)\n",
11345 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11346
11347 /* Set the new address. */
11348 mutex_enter(&ill->ill_lock);
11349 ipif->ipif_v6net_mask = v6mask;
11350 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0) {
11351 V6_MASK_COPY(v6addr, ipif->ipif_v6net_mask,
11352 ipif->ipif_v6subnet);
11353 }
11354 mutex_exit(&ill->ill_lock);
11355
11356 if (need_up) {
11357 /*
11358 * Now bring the interface back up. If this
11359 * is the only IPIF for the ILL, ipif_up
11360 * will have to re-bind to the device, so
11361 * we may get back EINPROGRESS, in which
11362 * case, this IOCTL will get completed in
11363 * ip_rput_dlpi when we see the DL_BIND_ACK.
11364 */
11365 err = ipif_up(ipif, q, mp);
11366 if (err == EINPROGRESS)
11367 return (err);
11368 }
11369 return (err);
11370 }
11371
11372 /* ARGSUSED */
11373 int
11374 ip_sioctl_subnet_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11375 ip_ioctl_cmd_t *ipip, void *if_req)
11376 {
11377 int addrlen;
11378 in6_addr_t v6addr;
11379 in6_addr_t v6mask;
11380 struct lifreq *lifr = (struct lifreq *)if_req;
11381
11382 ip1dbg(("ip_sioctl_subnet_restart(%s:%u %p)\n",
11383 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11384 (void) ipif_down_tail(ipif);
11385
11386 addrlen = lifr->lifr_addrlen;
11387 if (ipif->ipif_isv6) {
11388 sin6_t *sin6;
11389
11390 sin6 = (sin6_t *)sin;
11391 v6addr = sin6->sin6_addr;
11392 } else {
11393 ipaddr_t addr;
11394
11395 addr = sin->sin_addr.s_addr;
11396 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr);
11397 addrlen += IPV6_ABITS - IP_ABITS;
11398 }
11399 (void) ip_plen_to_mask_v6(addrlen, &v6mask);
11400
11401 return (ip_sioctl_subnet_tail(ipif, v6addr, v6mask, q, mp, B_TRUE));
11402 }
11403
11404 /* ARGSUSED */
11405 int
11406 ip_sioctl_get_subnet(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11407 ip_ioctl_cmd_t *ipip, void *if_req)
11408 {
11409 struct lifreq *lifr = (struct lifreq *)if_req;
11410 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
11411
11412 ip1dbg(("ip_sioctl_get_subnet(%s:%u %p)\n",
11413 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11414 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
11415
11416 if (ipif->ipif_isv6) {
11417 *sin6 = sin6_null;
11418 sin6->sin6_family = AF_INET6;
11419 sin6->sin6_addr = ipif->ipif_v6subnet;
11420 lifr->lifr_addrlen =
11421 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
11422 } else {
11423 *sin = sin_null;
11424 sin->sin_family = AF_INET;
11425 sin->sin_addr.s_addr = ipif->ipif_subnet;
11426 lifr->lifr_addrlen = ip_mask_to_plen(ipif->ipif_net_mask);
11427 }
11428 return (0);
11429 }
11430
11431 /*
11432 * Set the IPv6 address token.
11433 */
11434 /* ARGSUSED */
11435 int
11436 ip_sioctl_token(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11437 ip_ioctl_cmd_t *ipi, void *if_req)
11438 {
11439 ill_t *ill = ipif->ipif_ill;
11440 int err;
11441 in6_addr_t v6addr;
11442 in6_addr_t v6mask;
11443 boolean_t need_up = B_FALSE;
11444 int i;
11445 sin6_t *sin6 = (sin6_t *)sin;
11446 struct lifreq *lifr = (struct lifreq *)if_req;
11447 int addrlen;
11448
11449 ip1dbg(("ip_sioctl_token(%s:%u %p)\n",
11450 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11451 ASSERT(IAM_WRITER_IPIF(ipif));
11452
11453 addrlen = lifr->lifr_addrlen;
11454 /* Only allow for logical unit zero i.e. not on "le0:17" */
11455 if (ipif->ipif_id != 0)
11456 return (EINVAL);
11457
11458 if (!ipif->ipif_isv6)
11459 return (EINVAL);
11460
11461 if (addrlen > IPV6_ABITS)
11462 return (EINVAL);
11463
11464 v6addr = sin6->sin6_addr;
11465
11466 /*
11467 * The length of the token is the length from the end. To get
11468 * the proper mask for this, compute the mask of the bits not
11469 * in the token; ie. the prefix, and then xor to get the mask.
11470 */
11471 if (ip_plen_to_mask_v6(IPV6_ABITS - addrlen, &v6mask) == NULL)
11472 return (EINVAL);
11473 for (i = 0; i < 4; i++) {
11474 v6mask.s6_addr32[i] ^= (uint32_t)0xffffffff;
11475 }
11476
11477 if (V6_MASK_EQ(v6addr, v6mask, ill->ill_token) &&
11478 ill->ill_token_length == addrlen)
11479 return (0); /* No change */
11480
11481 if (ipif->ipif_flags & IPIF_UP) {
11482 err = ipif_logical_down(ipif, q, mp);
11483 if (err == EINPROGRESS)
11484 return (err);
11485 (void) ipif_down_tail(ipif);
11486 need_up = B_TRUE;
11487 }
11488 err = ip_sioctl_token_tail(ipif, sin6, addrlen, q, mp, need_up);
11489 return (err);
11490 }
11491
11492 static int
11493 ip_sioctl_token_tail(ipif_t *ipif, sin6_t *sin6, int addrlen, queue_t *q,
11494 mblk_t *mp, boolean_t need_up)
11495 {
11496 in6_addr_t v6addr;
11497 in6_addr_t v6mask;
11498 ill_t *ill = ipif->ipif_ill;
11499 int i;
11500 int err = 0;
11501
11502 ip1dbg(("ip_sioctl_token_tail(%s:%u %p)\n",
11503 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11504 v6addr = sin6->sin6_addr;
11505 /*
11506 * The length of the token is the length from the end. To get
11507 * the proper mask for this, compute the mask of the bits not
11508 * in the token; ie. the prefix, and then xor to get the mask.
11509 */
11510 (void) ip_plen_to_mask_v6(IPV6_ABITS - addrlen, &v6mask);
11511 for (i = 0; i < 4; i++)
11512 v6mask.s6_addr32[i] ^= (uint32_t)0xffffffff;
11513
11514 mutex_enter(&ill->ill_lock);
11515 V6_MASK_COPY(v6addr, v6mask, ill->ill_token);
11516 ill->ill_token_length = addrlen;
11517 ill->ill_manual_token = 1;
11518
11519 /* Reconfigure the link-local address based on this new token */
11520 ipif_setlinklocal(ill->ill_ipif);
11521
11522 mutex_exit(&ill->ill_lock);
11523
11524 if (need_up) {
11525 /*
11526 * Now bring the interface back up. If this
11527 * is the only IPIF for the ILL, ipif_up
11528 * will have to re-bind to the device, so
11529 * we may get back EINPROGRESS, in which
11530 * case, this IOCTL will get completed in
11531 * ip_rput_dlpi when we see the DL_BIND_ACK.
11532 */
11533 err = ipif_up(ipif, q, mp);
11534 if (err == EINPROGRESS)
11535 return (err);
11536 }
11537 return (err);
11538 }
11539
11540 /* ARGSUSED */
11541 int
11542 ip_sioctl_get_token(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11543 ip_ioctl_cmd_t *ipi, void *if_req)
11544 {
11545 ill_t *ill;
11546 sin6_t *sin6 = (sin6_t *)sin;
11547 struct lifreq *lifr = (struct lifreq *)if_req;
11548
11549 ip1dbg(("ip_sioctl_get_token(%s:%u %p)\n",
11550 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11551 if (ipif->ipif_id != 0)
11552 return (EINVAL);
11553
11554 ill = ipif->ipif_ill;
11555 if (!ill->ill_isv6)
11556 return (ENXIO);
11557
11558 *sin6 = sin6_null;
11559 sin6->sin6_family = AF_INET6;
11560 ASSERT(!IN6_IS_ADDR_V4MAPPED(&ill->ill_token));
11561 sin6->sin6_addr = ill->ill_token;
11562 lifr->lifr_addrlen = ill->ill_token_length;
11563 return (0);
11564 }
11565
11566 /*
11567 * Set (hardware) link specific information that might override
11568 * what was acquired through the DL_INFO_ACK.
11569 */
11570 /* ARGSUSED */
11571 int
11572 ip_sioctl_lnkinfo(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11573 ip_ioctl_cmd_t *ipi, void *if_req)
11574 {
11575 ill_t *ill = ipif->ipif_ill;
11576 int ip_min_mtu;
11577 struct lifreq *lifr = (struct lifreq *)if_req;
11578 lif_ifinfo_req_t *lir;
11579
11580 ip1dbg(("ip_sioctl_lnkinfo(%s:%u %p)\n",
11581 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11582 lir = &lifr->lifr_ifinfo;
11583 ASSERT(IAM_WRITER_IPIF(ipif));
11584
11585 /* Only allow for logical unit zero i.e. not on "bge0:17" */
11586 if (ipif->ipif_id != 0)
11587 return (EINVAL);
11588
11589 /* Set interface MTU. */
11590 if (ipif->ipif_isv6)
11591 ip_min_mtu = IPV6_MIN_MTU;
11592 else
11593 ip_min_mtu = IP_MIN_MTU;
11594
11595 /*
11596 * Verify values before we set anything. Allow zero to
11597 * mean unspecified.
11598 *
11599 * XXX We should be able to set the user-defined lir_mtu to some value
11600 * that is greater than ill_current_frag but less than ill_max_frag- the
11601 * ill_max_frag value tells us the max MTU that can be handled by the
11602 * datalink, whereas the ill_current_frag is dynamically computed for
11603 * some link-types like tunnels, based on the tunnel PMTU. However,
11604 * since there is currently no way of distinguishing between
11605 * administratively fixed link mtu values (e.g., those set via
11606 * /sbin/dladm) and dynamically discovered MTUs (e.g., those discovered
11607 * for tunnels) we conservatively choose the ill_current_frag as the
11608 * upper-bound.
11609 */
11610 if (lir->lir_maxmtu != 0 &&
11611 (lir->lir_maxmtu > ill->ill_current_frag ||
11612 lir->lir_maxmtu < ip_min_mtu))
11613 return (EINVAL);
11614 if (lir->lir_reachtime != 0 &&
11615 lir->lir_reachtime > ND_MAX_REACHTIME)
11616 return (EINVAL);
11617 if (lir->lir_reachretrans != 0 &&
11618 lir->lir_reachretrans > ND_MAX_REACHRETRANSTIME)
11619 return (EINVAL);
11620
11621 mutex_enter(&ill->ill_lock);
11622 /*
11623 * The dce and fragmentation code can handle changes to ill_mtu
11624 * concurrent with sending/fragmenting packets.
11625 */
11626 if (lir->lir_maxmtu != 0)
11627 ill->ill_user_mtu = lir->lir_maxmtu;
11628
11629 if (lir->lir_reachtime != 0)
11630 ill->ill_reachable_time = lir->lir_reachtime;
11631
11632 if (lir->lir_reachretrans != 0)
11633 ill->ill_reachable_retrans_time = lir->lir_reachretrans;
11634
11635 ill->ill_max_hops = lir->lir_maxhops;
11636 ill->ill_max_buf = ND_MAX_Q;
11637 if (!(ill->ill_flags & ILLF_FIXEDMTU) && ill->ill_user_mtu != 0) {
11638 /*
11639 * ill_mtu is the actual interface MTU, obtained as the min
11640 * of user-configured mtu and the value announced by the
11641 * driver (via DL_NOTE_SDU_SIZE/DL_INFO_ACK). Note that since
11642 * we have already made the choice of requiring
11643 * ill_user_mtu < ill_current_frag by the time we get here,
11644 * the ill_mtu effectively gets assigned to the ill_user_mtu
11645 * here.
11646 */
11647 ill->ill_mtu = MIN(ill->ill_current_frag, ill->ill_user_mtu);
11648 ill->ill_mc_mtu = MIN(ill->ill_mc_mtu, ill->ill_user_mtu);
11649 }
11650 mutex_exit(&ill->ill_lock);
11651
11652 /*
11653 * Make sure all dce_generation checks find out
11654 * that ill_mtu/ill_mc_mtu has changed.
11655 */
11656 if (!(ill->ill_flags & ILLF_FIXEDMTU) && (lir->lir_maxmtu != 0))
11657 dce_increment_all_generations(ill->ill_isv6, ill->ill_ipst);
11658
11659 /*
11660 * Refresh IPMP meta-interface MTU if necessary.
11661 */
11662 if (IS_UNDER_IPMP(ill))
11663 ipmp_illgrp_refresh_mtu(ill->ill_grp);
11664
11665 return (0);
11666 }
11667
11668 /* ARGSUSED */
11669 int
11670 ip_sioctl_get_lnkinfo(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
11671 ip_ioctl_cmd_t *ipi, void *if_req)
11672 {
11673 struct lif_ifinfo_req *lir;
11674 ill_t *ill = ipif->ipif_ill;
11675
11676 ip1dbg(("ip_sioctl_get_lnkinfo(%s:%u %p)\n",
11677 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
11678 if (ipif->ipif_id != 0)
11679 return (EINVAL);
11680
11681 lir = &((struct lifreq *)if_req)->lifr_ifinfo;
11682 lir->lir_maxhops = ill->ill_max_hops;
11683 lir->lir_reachtime = ill->ill_reachable_time;
11684 lir->lir_reachretrans = ill->ill_reachable_retrans_time;
11685 lir->lir_maxmtu = ill->ill_mtu;
11686
11687 return (0);
11688 }
11689
11690 /*
11691 * Return best guess as to the subnet mask for the specified address.
11692 * Based on the subnet masks for all the configured interfaces.
11693 *
11694 * We end up returning a zero mask in the case of default, multicast or
11695 * experimental.
11696 */
11697 static ipaddr_t
11698 ip_subnet_mask(ipaddr_t addr, ipif_t **ipifp, ip_stack_t *ipst)
11699 {
11700 ipaddr_t net_mask;
11701 ill_t *ill;
11702 ipif_t *ipif;
11703 ill_walk_context_t ctx;
11704 ipif_t *fallback_ipif = NULL;
11705
11706 net_mask = ip_net_mask(addr);
11707 if (net_mask == 0) {
11708 *ipifp = NULL;
11709 return (0);
11710 }
11711
11712 /* Let's check to see if this is maybe a local subnet route. */
11713 /* this function only applies to IPv4 interfaces */
11714 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
11715 ill = ILL_START_WALK_V4(&ctx, ipst);
11716 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
11717 mutex_enter(&ill->ill_lock);
11718 for (ipif = ill->ill_ipif; ipif != NULL;
11719 ipif = ipif->ipif_next) {
11720 if (IPIF_IS_CONDEMNED(ipif))
11721 continue;
11722 if (!(ipif->ipif_flags & IPIF_UP))
11723 continue;
11724 if ((ipif->ipif_subnet & net_mask) ==
11725 (addr & net_mask)) {
11726 /*
11727 * Don't trust pt-pt interfaces if there are
11728 * other interfaces.
11729 */
11730 if (ipif->ipif_flags & IPIF_POINTOPOINT) {
11731 if (fallback_ipif == NULL) {
11732 ipif_refhold_locked(ipif);
11733 fallback_ipif = ipif;
11734 }
11735 continue;
11736 }
11737
11738 /*
11739 * Fine. Just assume the same net mask as the
11740 * directly attached subnet interface is using.
11741 */
11742 ipif_refhold_locked(ipif);
11743 mutex_exit(&ill->ill_lock);
11744 rw_exit(&ipst->ips_ill_g_lock);
11745 if (fallback_ipif != NULL)
11746 ipif_refrele(fallback_ipif);
11747 *ipifp = ipif;
11748 return (ipif->ipif_net_mask);
11749 }
11750 }
11751 mutex_exit(&ill->ill_lock);
11752 }
11753 rw_exit(&ipst->ips_ill_g_lock);
11754
11755 *ipifp = fallback_ipif;
11756 return ((fallback_ipif != NULL) ?
11757 fallback_ipif->ipif_net_mask : net_mask);
11758 }
11759
11760 /*
11761 * ip_sioctl_copyin_setup calls ip_wput_ioctl to process the IP_IOCTL ioctl.
11762 */
11763 static void
11764 ip_wput_ioctl(queue_t *q, mblk_t *mp)
11765 {
11766 IOCP iocp;
11767 ipft_t *ipft;
11768 ipllc_t *ipllc;
11769 mblk_t *mp1;
11770 cred_t *cr;
11771 int error = 0;
11772 conn_t *connp;
11773
11774 ip1dbg(("ip_wput_ioctl"));
11775 iocp = (IOCP)mp->b_rptr;
11776 mp1 = mp->b_cont;
11777 if (mp1 == NULL) {
11778 iocp->ioc_error = EINVAL;
11779 mp->b_datap->db_type = M_IOCNAK;
11780 iocp->ioc_count = 0;
11781 qreply(q, mp);
11782 return;
11783 }
11784
11785 /*
11786 * These IOCTLs provide various control capabilities to
11787 * upstream agents such as ULPs and processes. There
11788 * are currently two such IOCTLs implemented. They
11789 * are used by TCP to provide update information for
11790 * existing IREs and to forcibly delete an IRE for a
11791 * host that is not responding, thereby forcing an
11792 * attempt at a new route.
11793 */
11794 iocp->ioc_error = EINVAL;
11795 if (!pullupmsg(mp1, sizeof (ipllc->ipllc_cmd)))
11796 goto done;
11797
11798 ipllc = (ipllc_t *)mp1->b_rptr;
11799 for (ipft = ip_ioctl_ftbl; ipft->ipft_pfi; ipft++) {
11800 if (ipllc->ipllc_cmd == ipft->ipft_cmd)
11801 break;
11802 }
11803 /*
11804 * prefer credential from mblk over ioctl;
11805 * see ip_sioctl_copyin_setup
11806 */
11807 cr = msg_getcred(mp, NULL);
11808 if (cr == NULL)
11809 cr = iocp->ioc_cr;
11810
11811 /*
11812 * Refhold the conn in case the request gets queued up in some lookup
11813 */
11814 ASSERT(CONN_Q(q));
11815 connp = Q_TO_CONN(q);
11816 CONN_INC_REF(connp);
11817 CONN_INC_IOCTLREF(connp);
11818 if (ipft->ipft_pfi &&
11819 ((mp1->b_wptr - mp1->b_rptr) >= ipft->ipft_min_size ||
11820 pullupmsg(mp1, ipft->ipft_min_size))) {
11821 error = (*ipft->ipft_pfi)(q,
11822 (ipft->ipft_flags & IPFT_F_SELF_REPLY) ? mp : mp1, cr);
11823 }
11824 if (ipft->ipft_flags & IPFT_F_SELF_REPLY) {
11825 /*
11826 * CONN_OPER_PENDING_DONE happens in the function called
11827 * through ipft_pfi above.
11828 */
11829 return;
11830 }
11831
11832 CONN_DEC_IOCTLREF(connp);
11833 CONN_OPER_PENDING_DONE(connp);
11834 if (ipft->ipft_flags & IPFT_F_NO_REPLY) {
11835 freemsg(mp);
11836 return;
11837 }
11838 iocp->ioc_error = error;
11839
11840 done:
11841 mp->b_datap->db_type = M_IOCACK;
11842 if (iocp->ioc_error)
11843 iocp->ioc_count = 0;
11844 qreply(q, mp);
11845 }
11846
11847 /*
11848 * Assign a unique id for the ipif. This is used by sctp_addr.c
11849 * Note: remove if sctp_addr.c is redone to not shadow ill/ipif data structures.
11850 */
11851 static void
11852 ipif_assign_seqid(ipif_t *ipif)
11853 {
11854 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
11855
11856 ipif->ipif_seqid = atomic_inc_64_nv(&ipst->ips_ipif_g_seqid);
11857 }
11858
11859 /*
11860 * Clone the contents of `sipif' to `dipif'. Requires that both ipifs are
11861 * administratively down (i.e., no DAD), of the same type, and locked. Note
11862 * that the clone is complete -- including the seqid -- and the expectation is
11863 * that the caller will either free or overwrite `sipif' before it's unlocked.
11864 */
11865 static void
11866 ipif_clone(const ipif_t *sipif, ipif_t *dipif)
11867 {
11868 ASSERT(MUTEX_HELD(&sipif->ipif_ill->ill_lock));
11869 ASSERT(MUTEX_HELD(&dipif->ipif_ill->ill_lock));
11870 ASSERT(!(sipif->ipif_flags & (IPIF_UP|IPIF_DUPLICATE)));
11871 ASSERT(!(dipif->ipif_flags & (IPIF_UP|IPIF_DUPLICATE)));
11872 ASSERT(sipif->ipif_ire_type == dipif->ipif_ire_type);
11873
11874 dipif->ipif_flags = sipif->ipif_flags;
11875 dipif->ipif_zoneid = sipif->ipif_zoneid;
11876 dipif->ipif_v6subnet = sipif->ipif_v6subnet;
11877 dipif->ipif_v6lcl_addr = sipif->ipif_v6lcl_addr;
11878 dipif->ipif_v6net_mask = sipif->ipif_v6net_mask;
11879 dipif->ipif_v6brd_addr = sipif->ipif_v6brd_addr;
11880 dipif->ipif_v6pp_dst_addr = sipif->ipif_v6pp_dst_addr;
11881
11882 /*
11883 * As per the comment atop the function, we assume that these sipif
11884 * fields will be changed before sipif is unlocked.
11885 */
11886 dipif->ipif_seqid = sipif->ipif_seqid;
11887 dipif->ipif_state_flags = sipif->ipif_state_flags;
11888 }
11889
11890 /*
11891 * Transfer the contents of `sipif' to `dipif', and then free (if `virgipif'
11892 * is NULL) or overwrite `sipif' with `virgipif', which must be a virgin
11893 * (unreferenced) ipif. Also, if `sipif' is used by the current xop, then
11894 * transfer the xop to `dipif'. Requires that all ipifs are administratively
11895 * down (i.e., no DAD), of the same type, and unlocked.
11896 */
11897 static void
11898 ipif_transfer(ipif_t *sipif, ipif_t *dipif, ipif_t *virgipif)
11899 {
11900 ipsq_t *ipsq = sipif->ipif_ill->ill_phyint->phyint_ipsq;
11901 ipxop_t *ipx = ipsq->ipsq_xop;
11902
11903 ASSERT(sipif != dipif);
11904 ASSERT(sipif != virgipif);
11905
11906 /*
11907 * Grab all of the locks that protect the ipif in a defined order.
11908 */
11909 GRAB_ILL_LOCKS(sipif->ipif_ill, dipif->ipif_ill);
11910
11911 ipif_clone(sipif, dipif);
11912 if (virgipif != NULL) {
11913 ipif_clone(virgipif, sipif);
11914 mi_free(virgipif);
11915 }
11916
11917 RELEASE_ILL_LOCKS(sipif->ipif_ill, dipif->ipif_ill);
11918
11919 /*
11920 * Transfer ownership of the current xop, if necessary.
11921 */
11922 if (ipx->ipx_current_ipif == sipif) {
11923 ASSERT(ipx->ipx_pending_ipif == NULL);
11924 mutex_enter(&ipx->ipx_lock);
11925 ipx->ipx_current_ipif = dipif;
11926 mutex_exit(&ipx->ipx_lock);
11927 }
11928
11929 if (virgipif == NULL)
11930 mi_free(sipif);
11931 }
11932
11933 /*
11934 * checks if:
11935 * - <ill_name>:<ipif_id> is at most LIFNAMSIZ - 1 and
11936 * - logical interface is within the allowed range
11937 */
11938 static int
11939 is_lifname_valid(ill_t *ill, unsigned int ipif_id)
11940 {
11941 if (snprintf(NULL, 0, "%s:%d", ill->ill_name, ipif_id) >= LIFNAMSIZ)
11942 return (ENAMETOOLONG);
11943
11944 if (ipif_id >= ill->ill_ipst->ips_ip_addrs_per_if)
11945 return (ERANGE);
11946 return (0);
11947 }
11948
11949 /*
11950 * Insert the ipif, so that the list of ipifs on the ill will be sorted
11951 * with respect to ipif_id. Note that an ipif with an ipif_id of -1 will
11952 * be inserted into the first space available in the list. The value of
11953 * ipif_id will then be set to the appropriate value for its position.
11954 */
11955 static int
11956 ipif_insert(ipif_t *ipif, boolean_t acquire_g_lock)
11957 {
11958 ill_t *ill;
11959 ipif_t *tipif;
11960 ipif_t **tipifp;
11961 int id, err;
11962 ip_stack_t *ipst;
11963
11964 ASSERT(ipif->ipif_ill->ill_net_type == IRE_LOOPBACK ||
11965 IAM_WRITER_IPIF(ipif));
11966
11967 ill = ipif->ipif_ill;
11968 ASSERT(ill != NULL);
11969 ipst = ill->ill_ipst;
11970
11971 /*
11972 * In the case of lo0:0 we already hold the ill_g_lock.
11973 * ill_lookup_on_name (acquires ill_g_lock) -> ipif_allocate ->
11974 * ipif_insert.
11975 */
11976 if (acquire_g_lock)
11977 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
11978 mutex_enter(&ill->ill_lock);
11979 id = ipif->ipif_id;
11980 tipifp = &(ill->ill_ipif);
11981 if (id == -1) { /* need to find a real id */
11982 id = 0;
11983 while ((tipif = *tipifp) != NULL) {
11984 ASSERT(tipif->ipif_id >= id);
11985 if (tipif->ipif_id != id)
11986 break; /* non-consecutive id */
11987 id++;
11988 tipifp = &(tipif->ipif_next);
11989 }
11990 if ((err = is_lifname_valid(ill, id)) != 0) {
11991 mutex_exit(&ill->ill_lock);
11992 if (acquire_g_lock)
11993 rw_exit(&ipst->ips_ill_g_lock);
11994 return (err);
11995 }
11996 ipif->ipif_id = id; /* assign new id */
11997 } else if ((err = is_lifname_valid(ill, id)) == 0) {
11998 /* we have a real id; insert ipif in the right place */
11999 while ((tipif = *tipifp) != NULL) {
12000 ASSERT(tipif->ipif_id != id);
12001 if (tipif->ipif_id > id)
12002 break; /* found correct location */
12003 tipifp = &(tipif->ipif_next);
12004 }
12005 } else {
12006 mutex_exit(&ill->ill_lock);
12007 if (acquire_g_lock)
12008 rw_exit(&ipst->ips_ill_g_lock);
12009 return (err);
12010 }
12011
12012 ASSERT(tipifp != &(ill->ill_ipif) || id == 0);
12013
12014 ipif->ipif_next = tipif;
12015 *tipifp = ipif;
12016 mutex_exit(&ill->ill_lock);
12017 if (acquire_g_lock)
12018 rw_exit(&ipst->ips_ill_g_lock);
12019
12020 return (0);
12021 }
12022
12023 static void
12024 ipif_remove(ipif_t *ipif)
12025 {
12026 ipif_t **ipifp;
12027 ill_t *ill = ipif->ipif_ill;
12028
12029 ASSERT(RW_WRITE_HELD(&ill->ill_ipst->ips_ill_g_lock));
12030
12031 mutex_enter(&ill->ill_lock);
12032 ipifp = &ill->ill_ipif;
12033 for (; *ipifp != NULL; ipifp = &ipifp[0]->ipif_next) {
12034 if (*ipifp == ipif) {
12035 *ipifp = ipif->ipif_next;
12036 break;
12037 }
12038 }
12039 mutex_exit(&ill->ill_lock);
12040 }
12041
12042 /*
12043 * Allocate and initialize a new interface control structure. (Always
12044 * called as writer.)
12045 * When ipif_allocate() is called from ip_ll_subnet_defaults, the ill
12046 * is not part of the global linked list of ills. ipif_seqid is unique
12047 * in the system and to preserve the uniqueness, it is assigned only
12048 * when ill becomes part of the global list. At that point ill will
12049 * have a name. If it doesn't get assigned here, it will get assigned
12050 * in ipif_set_values() as part of SIOCSLIFNAME processing.
12051 * Aditionally, if we come here from ip_ll_subnet_defaults, we don't set
12052 * the interface flags or any other information from the DL_INFO_ACK for
12053 * DL_STYLE2 drivers (initialize == B_FALSE), since we won't have them at
12054 * this point. The flags etc. will be set in ip_ll_subnet_defaults when the
12055 * second DL_INFO_ACK comes in from the driver.
12056 */
12057 static ipif_t *
12058 ipif_allocate(ill_t *ill, int id, uint_t ire_type, boolean_t initialize,
12059 boolean_t insert, int *errorp)
12060 {
12061 int err;
12062 ipif_t *ipif;
12063 ip_stack_t *ipst = ill->ill_ipst;
12064
12065 ip1dbg(("ipif_allocate(%s:%d ill %p)\n",
12066 ill->ill_name, id, (void *)ill));
12067 ASSERT(ire_type == IRE_LOOPBACK || IAM_WRITER_ILL(ill));
12068
12069 if (errorp != NULL)
12070 *errorp = 0;
12071
12072 if ((ipif = mi_alloc(sizeof (ipif_t), BPRI_MED)) == NULL) {
12073 if (errorp != NULL)
12074 *errorp = ENOMEM;
12075 return (NULL);
12076 }
12077 *ipif = ipif_zero; /* start clean */
12078
12079 ipif->ipif_ill = ill;
12080 ipif->ipif_id = id; /* could be -1 */
12081 /*
12082 * Inherit the zoneid from the ill; for the shared stack instance
12083 * this is always the global zone
12084 */
12085 ipif->ipif_zoneid = ill->ill_zoneid;
12086
12087 ipif->ipif_refcnt = 0;
12088
12089 if (insert) {
12090 if ((err = ipif_insert(ipif, ire_type != IRE_LOOPBACK)) != 0) {
12091 mi_free(ipif);
12092 if (errorp != NULL)
12093 *errorp = err;
12094 return (NULL);
12095 }
12096 /* -1 id should have been replaced by real id */
12097 id = ipif->ipif_id;
12098 ASSERT(id >= 0);
12099 }
12100
12101 if (ill->ill_name[0] != '\0')
12102 ipif_assign_seqid(ipif);
12103
12104 /*
12105 * If this is the zeroth ipif on the IPMP ill, create the illgrp
12106 * (which must not exist yet because the zeroth ipif is created once
12107 * per ill). However, do not not link it to the ipmp_grp_t until
12108 * I_PLINK is called; see ip_sioctl_plink_ipmp() for details.
12109 */
12110 if (id == 0 && IS_IPMP(ill)) {
12111 if (ipmp_illgrp_create(ill) == NULL) {
12112 if (insert) {
12113 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
12114 ipif_remove(ipif);
12115 rw_exit(&ipst->ips_ill_g_lock);
12116 }
12117 mi_free(ipif);
12118 if (errorp != NULL)
12119 *errorp = ENOMEM;
12120 return (NULL);
12121 }
12122 }
12123
12124 /*
12125 * We grab ill_lock to protect the flag changes. The ipif is still
12126 * not up and can't be looked up until the ioctl completes and the
12127 * IPIF_CHANGING flag is cleared.
12128 */
12129 mutex_enter(&ill->ill_lock);
12130
12131 ipif->ipif_ire_type = ire_type;
12132
12133 if (ipif->ipif_isv6) {
12134 ill->ill_flags |= ILLF_IPV6;
12135 } else {
12136 ipaddr_t inaddr_any = INADDR_ANY;
12137
12138 ill->ill_flags |= ILLF_IPV4;
12139
12140 /* Keep the IN6_IS_ADDR_V4MAPPED assertions happy */
12141 IN6_IPADDR_TO_V4MAPPED(inaddr_any,
12142 &ipif->ipif_v6lcl_addr);
12143 IN6_IPADDR_TO_V4MAPPED(inaddr_any,
12144 &ipif->ipif_v6subnet);
12145 IN6_IPADDR_TO_V4MAPPED(inaddr_any,
12146 &ipif->ipif_v6net_mask);
12147 IN6_IPADDR_TO_V4MAPPED(inaddr_any,
12148 &ipif->ipif_v6brd_addr);
12149 IN6_IPADDR_TO_V4MAPPED(inaddr_any,
12150 &ipif->ipif_v6pp_dst_addr);
12151 }
12152
12153 /*
12154 * Don't set the interface flags etc. now, will do it in
12155 * ip_ll_subnet_defaults.
12156 */
12157 if (!initialize)
12158 goto out;
12159
12160 /*
12161 * NOTE: The IPMP meta-interface is special-cased because it starts
12162 * with no underlying interfaces (and thus an unknown broadcast
12163 * address length), but all interfaces that can be placed into an IPMP
12164 * group are required to be broadcast-capable.
12165 */
12166 if (ill->ill_bcast_addr_length != 0 || IS_IPMP(ill)) {
12167 /*
12168 * Later detect lack of DLPI driver multicast capability by
12169 * catching DL_ENABMULTI_REQ errors in ip_rput_dlpi().
12170 */
12171 ill->ill_flags |= ILLF_MULTICAST;
12172 if (!ipif->ipif_isv6)
12173 ipif->ipif_flags |= IPIF_BROADCAST;
12174 } else {
12175 if (ill->ill_net_type != IRE_LOOPBACK) {
12176 if (ipif->ipif_isv6)
12177 /*
12178 * Note: xresolv interfaces will eventually need
12179 * NOARP set here as well, but that will require
12180 * those external resolvers to have some
12181 * knowledge of that flag and act appropriately.
12182 * Not to be changed at present.
12183 */
12184 ill->ill_flags |= ILLF_NONUD;
12185 else
12186 ill->ill_flags |= ILLF_NOARP;
12187 }
12188 if (ill->ill_phys_addr_length == 0) {
12189 if (IS_VNI(ill)) {
12190 ipif->ipif_flags |= IPIF_NOXMIT;
12191 } else {
12192 /* pt-pt supports multicast. */
12193 ill->ill_flags |= ILLF_MULTICAST;
12194 if (ill->ill_net_type != IRE_LOOPBACK)
12195 ipif->ipif_flags |= IPIF_POINTOPOINT;
12196 }
12197 }
12198 }
12199 out:
12200 mutex_exit(&ill->ill_lock);
12201 return (ipif);
12202 }
12203
12204 /*
12205 * Remove the neighbor cache entries associated with this logical
12206 * interface.
12207 */
12208 int
12209 ipif_arp_down(ipif_t *ipif)
12210 {
12211 ill_t *ill = ipif->ipif_ill;
12212 int err = 0;
12213
12214 ip1dbg(("ipif_arp_down(%s:%u)\n", ill->ill_name, ipif->ipif_id));
12215 ASSERT(IAM_WRITER_IPIF(ipif));
12216
12217 DTRACE_PROBE3(ipif__downup, char *, "ipif_arp_down",
12218 ill_t *, ill, ipif_t *, ipif);
12219 ipif_nce_down(ipif);
12220
12221 /*
12222 * If this is the last ipif that is going down and there are no
12223 * duplicate addresses we may yet attempt to re-probe, then we need to
12224 * clean up ARP completely.
12225 */
12226 if (ill->ill_ipif_up_count == 0 && ill->ill_ipif_dup_count == 0 &&
12227 !ill->ill_logical_down && ill->ill_net_type == IRE_IF_RESOLVER) {
12228 /*
12229 * If this was the last ipif on an IPMP interface, purge any
12230 * static ARP entries associated with it.
12231 */
12232 if (IS_IPMP(ill))
12233 ipmp_illgrp_refresh_arpent(ill->ill_grp);
12234
12235 /* UNBIND, DETACH */
12236 err = arp_ll_down(ill);
12237 }
12238
12239 return (err);
12240 }
12241
12242 /*
12243 * Get the resolver set up for a new IP address. (Always called as writer.)
12244 * Called both for IPv4 and IPv6 interfaces, though it only does some
12245 * basic DAD related initialization for IPv6. Honors ILLF_NOARP.
12246 *
12247 * The enumerated value res_act tunes the behavior:
12248 * * Res_act_initial: set up all the resolver structures for a new
12249 * IP address.
12250 * * Res_act_defend: tell ARP that it needs to send a single gratuitous
12251 * ARP message in defense of the address.
12252 * * Res_act_rebind: tell ARP to change the hardware address for an IP
12253 * address (and issue gratuitous ARPs). Used by ipmp_ill_bind_ipif().
12254 *
12255 * Returns zero on success, or an errno upon failure.
12256 */
12257 int
12258 ipif_resolver_up(ipif_t *ipif, enum ip_resolver_action res_act)
12259 {
12260 ill_t *ill = ipif->ipif_ill;
12261 int err;
12262 boolean_t was_dup;
12263
12264 ip1dbg(("ipif_resolver_up(%s:%u) flags 0x%x\n",
12265 ill->ill_name, ipif->ipif_id, (uint_t)ipif->ipif_flags));
12266 ASSERT(IAM_WRITER_IPIF(ipif));
12267
12268 was_dup = B_FALSE;
12269 if (res_act == Res_act_initial) {
12270 ipif->ipif_addr_ready = 0;
12271 /*
12272 * We're bringing an interface up here. There's no way that we
12273 * should need to shut down ARP now.
12274 */
12275 mutex_enter(&ill->ill_lock);
12276 if (ipif->ipif_flags & IPIF_DUPLICATE) {
12277 ipif->ipif_flags &= ~IPIF_DUPLICATE;
12278 ill->ill_ipif_dup_count--;
12279 was_dup = B_TRUE;
12280 }
12281 mutex_exit(&ill->ill_lock);
12282 }
12283 if (ipif->ipif_recovery_id != 0)
12284 (void) untimeout(ipif->ipif_recovery_id);
12285 ipif->ipif_recovery_id = 0;
12286 if (ill->ill_net_type != IRE_IF_RESOLVER) {
12287 ipif->ipif_addr_ready = 1;
12288 return (0);
12289 }
12290 /* NDP will set the ipif_addr_ready flag when it's ready */
12291 if (ill->ill_isv6)
12292 return (0);
12293
12294 err = ipif_arp_up(ipif, res_act, was_dup);
12295 return (err);
12296 }
12297
12298 /*
12299 * This routine restarts IPv4/IPv6 duplicate address detection (DAD)
12300 * when a link has just gone back up.
12301 */
12302 static void
12303 ipif_nce_start_dad(ipif_t *ipif)
12304 {
12305 ncec_t *ncec;
12306 ill_t *ill = ipif->ipif_ill;
12307 boolean_t isv6 = ill->ill_isv6;
12308
12309 if (isv6) {
12310 ncec = ncec_lookup_illgrp_v6(ipif->ipif_ill,
12311 &ipif->ipif_v6lcl_addr);
12312 } else {
12313 ipaddr_t v4addr;
12314
12315 if (ill->ill_net_type != IRE_IF_RESOLVER ||
12316 (ipif->ipif_flags & IPIF_UNNUMBERED) ||
12317 ipif->ipif_lcl_addr == INADDR_ANY) {
12318 /*
12319 * If we can't contact ARP for some reason,
12320 * that's not really a problem. Just send
12321 * out the routing socket notification that
12322 * DAD completion would have done, and continue.
12323 */
12324 ipif_mask_reply(ipif);
12325 ipif_up_notify(ipif);
12326 ipif->ipif_addr_ready = 1;
12327 return;
12328 }
12329
12330 IN6_V4MAPPED_TO_IPADDR(&ipif->ipif_v6lcl_addr, v4addr);
12331 ncec = ncec_lookup_illgrp_v4(ipif->ipif_ill, &v4addr);
12332 }
12333
12334 if (ncec == NULL) {
12335 ip1dbg(("couldn't find ncec for ipif %p leaving !ready\n",
12336 (void *)ipif));
12337 return;
12338 }
12339 if (!nce_restart_dad(ncec)) {
12340 /*
12341 * If we can't restart DAD for some reason, that's not really a
12342 * problem. Just send out the routing socket notification that
12343 * DAD completion would have done, and continue.
12344 */
12345 ipif_up_notify(ipif);
12346 ipif->ipif_addr_ready = 1;
12347 }
12348 ncec_refrele(ncec);
12349 }
12350
12351 /*
12352 * Restart duplicate address detection on all interfaces on the given ill.
12353 *
12354 * This is called when an interface transitions from down to up
12355 * (DL_NOTE_LINK_UP) or up to down (DL_NOTE_LINK_DOWN).
12356 *
12357 * Note that since the underlying physical link has transitioned, we must cause
12358 * at least one routing socket message to be sent here, either via DAD
12359 * completion or just by default on the first ipif. (If we don't do this, then
12360 * in.mpathd will see long delays when doing link-based failure recovery.)
12361 */
12362 void
12363 ill_restart_dad(ill_t *ill, boolean_t went_up)
12364 {
12365 ipif_t *ipif;
12366
12367 if (ill == NULL)
12368 return;
12369
12370 /*
12371 * If layer two doesn't support duplicate address detection, then just
12372 * send the routing socket message now and be done with it.
12373 */
12374 if (!ill->ill_isv6 && arp_no_defense) {
12375 ip_rts_ifmsg(ill->ill_ipif, RTSQ_DEFAULT);
12376 return;
12377 }
12378
12379 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
12380 if (went_up) {
12381
12382 if (ipif->ipif_flags & IPIF_UP) {
12383 ipif_nce_start_dad(ipif);
12384 } else if (ipif->ipif_flags & IPIF_DUPLICATE) {
12385 /*
12386 * kick off the bring-up process now.
12387 */
12388 ipif_do_recovery(ipif);
12389 } else {
12390 /*
12391 * Unfortunately, the first ipif is "special"
12392 * and represents the underlying ill in the
12393 * routing socket messages. Thus, when this
12394 * one ipif is down, we must still notify so
12395 * that the user knows the IFF_RUNNING status
12396 * change. (If the first ipif is up, then
12397 * we'll handle eventual routing socket
12398 * notification via DAD completion.)
12399 */
12400 if (ipif == ill->ill_ipif) {
12401 ip_rts_ifmsg(ill->ill_ipif,
12402 RTSQ_DEFAULT);
12403 }
12404 }
12405 } else {
12406 /*
12407 * After link down, we'll need to send a new routing
12408 * message when the link comes back, so clear
12409 * ipif_addr_ready.
12410 */
12411 ipif->ipif_addr_ready = 0;
12412 }
12413 }
12414
12415 /*
12416 * If we've torn down links, then notify the user right away.
12417 */
12418 if (!went_up)
12419 ip_rts_ifmsg(ill->ill_ipif, RTSQ_DEFAULT);
12420 }
12421
12422 static void
12423 ipsq_delete(ipsq_t *ipsq)
12424 {
12425 ipxop_t *ipx = ipsq->ipsq_xop;
12426
12427 ipsq->ipsq_ipst = NULL;
12428 ASSERT(ipsq->ipsq_phyint == NULL);
12429 ASSERT(ipsq->ipsq_xop != NULL);
12430 ASSERT(ipsq->ipsq_xopq_mphead == NULL && ipx->ipx_mphead == NULL);
12431 ASSERT(ipx->ipx_pending_mp == NULL);
12432 kmem_free(ipsq, sizeof (ipsq_t));
12433 }
12434
12435 static int
12436 ill_up_ipifs_on_ill(ill_t *ill, queue_t *q, mblk_t *mp)
12437 {
12438 int err = 0;
12439 ipif_t *ipif;
12440
12441 if (ill == NULL)
12442 return (0);
12443
12444 ASSERT(IAM_WRITER_ILL(ill));
12445 ill->ill_up_ipifs = B_TRUE;
12446 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
12447 if (ipif->ipif_was_up) {
12448 if (!(ipif->ipif_flags & IPIF_UP))
12449 err = ipif_up(ipif, q, mp);
12450 ipif->ipif_was_up = B_FALSE;
12451 if (err != 0) {
12452 ASSERT(err == EINPROGRESS);
12453 return (err);
12454 }
12455 }
12456 }
12457 ill->ill_up_ipifs = B_FALSE;
12458 return (0);
12459 }
12460
12461 /*
12462 * This function is called to bring up all the ipifs that were up before
12463 * bringing the ill down via ill_down_ipifs().
12464 */
12465 int
12466 ill_up_ipifs(ill_t *ill, queue_t *q, mblk_t *mp)
12467 {
12468 int err;
12469
12470 ASSERT(IAM_WRITER_ILL(ill));
12471
12472 if (ill->ill_replumbing) {
12473 ill->ill_replumbing = 0;
12474 /*
12475 * Send down REPLUMB_DONE notification followed by the
12476 * BIND_REQ on the arp stream.
12477 */
12478 if (!ill->ill_isv6)
12479 arp_send_replumb_conf(ill);
12480 }
12481 err = ill_up_ipifs_on_ill(ill->ill_phyint->phyint_illv4, q, mp);
12482 if (err != 0)
12483 return (err);
12484
12485 return (ill_up_ipifs_on_ill(ill->ill_phyint->phyint_illv6, q, mp));
12486 }
12487
12488 /*
12489 * Bring down any IPIF_UP ipifs on ill. If "logical" is B_TRUE, we bring
12490 * down the ipifs without sending DL_UNBIND_REQ to the driver.
12491 */
12492 static void
12493 ill_down_ipifs(ill_t *ill, boolean_t logical)
12494 {
12495 ipif_t *ipif;
12496
12497 ASSERT(IAM_WRITER_ILL(ill));
12498
12499 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
12500 /*
12501 * We go through the ipif_down logic even if the ipif
12502 * is already down, since routes can be added based
12503 * on down ipifs. Going through ipif_down once again
12504 * will delete any IREs created based on these routes.
12505 */
12506 if (ipif->ipif_flags & IPIF_UP)
12507 ipif->ipif_was_up = B_TRUE;
12508
12509 if (logical) {
12510 (void) ipif_logical_down(ipif, NULL, NULL);
12511 ipif_non_duplicate(ipif);
12512 (void) ipif_down_tail(ipif);
12513 } else {
12514 (void) ipif_down(ipif, NULL, NULL);
12515 }
12516 }
12517 }
12518
12519 /*
12520 * Redo source address selection. This makes IXAF_VERIFY_SOURCE take
12521 * a look again at valid source addresses.
12522 * This should be called each time after the set of source addresses has been
12523 * changed.
12524 */
12525 void
12526 ip_update_source_selection(ip_stack_t *ipst)
12527 {
12528 /* We skip past SRC_GENERATION_VERIFY */
12529 if (atomic_inc_32_nv(&ipst->ips_src_generation) ==
12530 SRC_GENERATION_VERIFY)
12531 atomic_inc_32(&ipst->ips_src_generation);
12532 }
12533
12534 /*
12535 * Finish the group join started in ip_sioctl_groupname().
12536 */
12537 /* ARGSUSED */
12538 static void
12539 ip_join_illgrps(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy)
12540 {
12541 ill_t *ill = q->q_ptr;
12542 phyint_t *phyi = ill->ill_phyint;
12543 ipmp_grp_t *grp = phyi->phyint_grp;
12544 ip_stack_t *ipst = ill->ill_ipst;
12545
12546 /* IS_UNDER_IPMP() won't work until ipmp_ill_join_illgrp() is called */
12547 ASSERT(!IS_IPMP(ill) && grp != NULL);
12548 ASSERT(IAM_WRITER_IPSQ(ipsq));
12549
12550 if (phyi->phyint_illv4 != NULL) {
12551 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
12552 VERIFY(grp->gr_pendv4-- > 0);
12553 rw_exit(&ipst->ips_ipmp_lock);
12554 ipmp_ill_join_illgrp(phyi->phyint_illv4, grp->gr_v4);
12555 }
12556 if (phyi->phyint_illv6 != NULL) {
12557 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
12558 VERIFY(grp->gr_pendv6-- > 0);
12559 rw_exit(&ipst->ips_ipmp_lock);
12560 ipmp_ill_join_illgrp(phyi->phyint_illv6, grp->gr_v6);
12561 }
12562 freemsg(mp);
12563 }
12564
12565 /*
12566 * Process an SIOCSLIFGROUPNAME request.
12567 */
12568 /* ARGSUSED */
12569 int
12570 ip_sioctl_groupname(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
12571 ip_ioctl_cmd_t *ipip, void *ifreq)
12572 {
12573 struct lifreq *lifr = ifreq;
12574 ill_t *ill = ipif->ipif_ill;
12575 ip_stack_t *ipst = ill->ill_ipst;
12576 phyint_t *phyi = ill->ill_phyint;
12577 ipmp_grp_t *grp = phyi->phyint_grp;
12578 mblk_t *ipsq_mp;
12579 int err = 0;
12580
12581 /*
12582 * Note that phyint_grp can only change here, where we're exclusive.
12583 */
12584 ASSERT(IAM_WRITER_ILL(ill));
12585
12586 if (ipif->ipif_id != 0 || ill->ill_usesrc_grp_next != NULL ||
12587 (phyi->phyint_flags & PHYI_VIRTUAL))
12588 return (EINVAL);
12589
12590 lifr->lifr_groupname[LIFGRNAMSIZ - 1] = '\0';
12591
12592 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
12593
12594 /*
12595 * If the name hasn't changed, there's nothing to do.
12596 */
12597 if (grp != NULL && strcmp(grp->gr_name, lifr->lifr_groupname) == 0)
12598 goto unlock;
12599
12600 /*
12601 * Handle requests to rename an IPMP meta-interface.
12602 *
12603 * Note that creation of the IPMP meta-interface is handled in
12604 * userland through the standard plumbing sequence. As part of the
12605 * plumbing the IPMP meta-interface, its initial groupname is set to
12606 * the name of the interface (see ipif_set_values_tail()).
12607 */
12608 if (IS_IPMP(ill)) {
12609 err = ipmp_grp_rename(grp, lifr->lifr_groupname);
12610 goto unlock;
12611 }
12612
12613 /*
12614 * Handle requests to add or remove an IP interface from a group.
12615 */
12616 if (lifr->lifr_groupname[0] != '\0') { /* add */
12617 /*
12618 * Moves are handled by first removing the interface from
12619 * its existing group, and then adding it to another group.
12620 * So, fail if it's already in a group.
12621 */
12622 if (IS_UNDER_IPMP(ill)) {
12623 err = EALREADY;
12624 goto unlock;
12625 }
12626
12627 grp = ipmp_grp_lookup(lifr->lifr_groupname, ipst);
12628 if (grp == NULL) {
12629 err = ENOENT;
12630 goto unlock;
12631 }
12632
12633 /*
12634 * Check if the phyint and its ills are suitable for
12635 * inclusion into the group.
12636 */
12637 if ((err = ipmp_grp_vet_phyint(grp, phyi)) != 0)
12638 goto unlock;
12639
12640 /*
12641 * Checks pass; join the group, and enqueue the remaining
12642 * illgrp joins for when we've become part of the group xop
12643 * and are exclusive across its IPSQs. Since qwriter_ip()
12644 * requires an mblk_t to scribble on, and since `mp' will be
12645 * freed as part of completing the ioctl, allocate another.
12646 */
12647 if ((ipsq_mp = allocb(0, BPRI_MED)) == NULL) {
12648 err = ENOMEM;
12649 goto unlock;
12650 }
12651
12652 /*
12653 * Before we drop ipmp_lock, bump gr_pend* to ensure that the
12654 * IPMP meta-interface ills needed by `phyi' cannot go away
12655 * before ip_join_illgrps() is called back. See the comments
12656 * in ip_sioctl_plink_ipmp() for more.
12657 */
12658 if (phyi->phyint_illv4 != NULL)
12659 grp->gr_pendv4++;
12660 if (phyi->phyint_illv6 != NULL)
12661 grp->gr_pendv6++;
12662
12663 rw_exit(&ipst->ips_ipmp_lock);
12664
12665 ipmp_phyint_join_grp(phyi, grp);
12666 ill_refhold(ill);
12667 qwriter_ip(ill, ill->ill_rq, ipsq_mp, ip_join_illgrps,
12668 SWITCH_OP, B_FALSE);
12669 return (0);
12670 } else {
12671 /*
12672 * Request to remove the interface from a group. If the
12673 * interface is not in a group, this trivially succeeds.
12674 */
12675 rw_exit(&ipst->ips_ipmp_lock);
12676 if (IS_UNDER_IPMP(ill))
12677 ipmp_phyint_leave_grp(phyi);
12678 return (0);
12679 }
12680 unlock:
12681 rw_exit(&ipst->ips_ipmp_lock);
12682 return (err);
12683 }
12684
12685 /*
12686 * Process an SIOCGLIFBINDING request.
12687 */
12688 /* ARGSUSED */
12689 int
12690 ip_sioctl_get_binding(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
12691 ip_ioctl_cmd_t *ipip, void *ifreq)
12692 {
12693 ill_t *ill;
12694 struct lifreq *lifr = ifreq;
12695 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
12696
12697 if (!IS_IPMP(ipif->ipif_ill))
12698 return (EINVAL);
12699
12700 rw_enter(&ipst->ips_ipmp_lock, RW_READER);
12701 if ((ill = ipif->ipif_bound_ill) == NULL)
12702 lifr->lifr_binding[0] = '\0';
12703 else
12704 (void) strlcpy(lifr->lifr_binding, ill->ill_name, LIFNAMSIZ);
12705 rw_exit(&ipst->ips_ipmp_lock);
12706 return (0);
12707 }
12708
12709 /*
12710 * Process an SIOCGLIFGROUPNAME request.
12711 */
12712 /* ARGSUSED */
12713 int
12714 ip_sioctl_get_groupname(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
12715 ip_ioctl_cmd_t *ipip, void *ifreq)
12716 {
12717 ipmp_grp_t *grp;
12718 struct lifreq *lifr = ifreq;
12719 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
12720
12721 rw_enter(&ipst->ips_ipmp_lock, RW_READER);
12722 if ((grp = ipif->ipif_ill->ill_phyint->phyint_grp) == NULL)
12723 lifr->lifr_groupname[0] = '\0';
12724 else
12725 (void) strlcpy(lifr->lifr_groupname, grp->gr_name, LIFGRNAMSIZ);
12726 rw_exit(&ipst->ips_ipmp_lock);
12727 return (0);
12728 }
12729
12730 /*
12731 * Process an SIOCGLIFGROUPINFO request.
12732 */
12733 /* ARGSUSED */
12734 int
12735 ip_sioctl_groupinfo(ipif_t *dummy_ipif, sin_t *sin, queue_t *q, mblk_t *mp,
12736 ip_ioctl_cmd_t *ipip, void *dummy)
12737 {
12738 ipmp_grp_t *grp;
12739 lifgroupinfo_t *lifgr;
12740 ip_stack_t *ipst = CONNQ_TO_IPST(q);
12741
12742 /* ip_wput_nondata() verified mp->b_cont->b_cont */
12743 lifgr = (lifgroupinfo_t *)mp->b_cont->b_cont->b_rptr;
12744 lifgr->gi_grname[LIFGRNAMSIZ - 1] = '\0';
12745
12746 rw_enter(&ipst->ips_ipmp_lock, RW_READER);
12747 if ((grp = ipmp_grp_lookup(lifgr->gi_grname, ipst)) == NULL) {
12748 rw_exit(&ipst->ips_ipmp_lock);
12749 return (ENOENT);
12750 }
12751 ipmp_grp_info(grp, lifgr);
12752 rw_exit(&ipst->ips_ipmp_lock);
12753 return (0);
12754 }
12755
12756 static void
12757 ill_dl_down(ill_t *ill)
12758 {
12759 DTRACE_PROBE2(ill__downup, char *, "ill_dl_down", ill_t *, ill);
12760
12761 /*
12762 * The ill is down; unbind but stay attached since we're still
12763 * associated with a PPA. If we have negotiated DLPI capabilites
12764 * with the data link service provider (IDS_OK) then reset them.
12765 * The interval between unbinding and rebinding is potentially
12766 * unbounded hence we cannot assume things will be the same.
12767 * The DLPI capabilities will be probed again when the data link
12768 * is brought up.
12769 */
12770 mblk_t *mp = ill->ill_unbind_mp;
12771
12772 ip1dbg(("ill_dl_down(%s)\n", ill->ill_name));
12773
12774 if (!ill->ill_replumbing) {
12775 /* Free all ilms for this ill */
12776 update_conn_ill(ill, ill->ill_ipst);
12777 } else {
12778 ill_leave_multicast(ill);
12779 }
12780
12781 ill->ill_unbind_mp = NULL;
12782
12783 mutex_enter(&ill->ill_lock);
12784 ill->ill_dl_up = 0;
12785 ill_nic_event_dispatch(ill, 0, NE_DOWN, NULL, 0);
12786 mutex_exit(&ill->ill_lock);
12787
12788 if (mp != NULL) {
12789 ip1dbg(("ill_dl_down: %s (%u) for %s\n",
12790 dl_primstr(*(int *)mp->b_rptr), *(int *)mp->b_rptr,
12791 ill->ill_name));
12792 mutex_enter(&ill->ill_lock);
12793 ill->ill_state_flags |= ILL_DL_UNBIND_IN_PROGRESS;
12794 mutex_exit(&ill->ill_lock);
12795 /*
12796 * ip_rput does not pass up normal (M_PROTO) DLPI messages
12797 * after ILL_CONDEMNED is set. So in the unplumb case, we call
12798 * ill_capability_dld_disable disable rightaway. If this is not
12799 * an unplumb operation then the disable happens on receipt of
12800 * the capab ack via ip_rput_dlpi_writer ->
12801 * ill_capability_ack_thr. In both cases the order of
12802 * the operations seen by DLD is capability disable followed
12803 * by DL_UNBIND. Also the DLD capability disable needs a
12804 * cv_wait'able context.
12805 */
12806 if (ill->ill_state_flags & ILL_CONDEMNED)
12807 ill_capability_dld_disable(ill);
12808 ill_capability_reset(ill, B_FALSE);
12809 ill_dlpi_send(ill, mp);
12810
12811 /*
12812 * Wait for the capability reset to finish.
12813 * In this case, it doesn't matter WHY or HOW it finished.
12814 */
12815 (void) ill_capability_wait(ill);
12816 }
12817 }
12818
12819 void
12820 ill_dlpi_dispatch(ill_t *ill, mblk_t *mp)
12821 {
12822 union DL_primitives *dlp;
12823 t_uscalar_t prim;
12824 boolean_t waitack = B_FALSE;
12825
12826 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
12827
12828 dlp = (union DL_primitives *)mp->b_rptr;
12829 prim = dlp->dl_primitive;
12830
12831 ip1dbg(("ill_dlpi_dispatch: sending %s (%u) to %s\n",
12832 dl_primstr(prim), prim, ill->ill_name));
12833
12834 switch (prim) {
12835 case DL_PHYS_ADDR_REQ:
12836 {
12837 dl_phys_addr_req_t *dlpap = (dl_phys_addr_req_t *)mp->b_rptr;
12838 ill->ill_phys_addr_pend = dlpap->dl_addr_type;
12839 break;
12840 }
12841 case DL_BIND_REQ:
12842 mutex_enter(&ill->ill_lock);
12843 ill->ill_state_flags &= ~ILL_DL_UNBIND_IN_PROGRESS;
12844 mutex_exit(&ill->ill_lock);
12845 break;
12846 }
12847
12848 /*
12849 * Except for the ACKs for the M_PCPROTO messages, all other ACKs
12850 * are dropped by ip_rput() if ILL_CONDEMNED is set. Therefore
12851 * we only wait for the ACK of the DL_UNBIND_REQ.
12852 */
12853 mutex_enter(&ill->ill_lock);
12854 if (!(ill->ill_state_flags & ILL_CONDEMNED) ||
12855 (prim == DL_UNBIND_REQ)) {
12856 ill->ill_dlpi_pending = prim;
12857 waitack = B_TRUE;
12858 }
12859
12860 mutex_exit(&ill->ill_lock);
12861 DTRACE_PROBE3(ill__dlpi, char *, "ill_dlpi_dispatch",
12862 char *, dl_primstr(prim), ill_t *, ill);
12863 putnext(ill->ill_wq, mp);
12864
12865 /*
12866 * There is no ack for DL_NOTIFY_CONF messages
12867 */
12868 if (waitack && prim == DL_NOTIFY_CONF)
12869 ill_dlpi_done(ill, prim);
12870 }
12871
12872 /*
12873 * Helper function for ill_dlpi_send().
12874 */
12875 /* ARGSUSED */
12876 static void
12877 ill_dlpi_send_writer(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg)
12878 {
12879 ill_dlpi_send(q->q_ptr, mp);
12880 }
12881
12882 /*
12883 * Send a DLPI control message to the driver but make sure there
12884 * is only one outstanding message. Uses ill_dlpi_pending to tell
12885 * when it must queue. ip_rput_dlpi_writer calls ill_dlpi_done()
12886 * when an ACK or a NAK is received to process the next queued message.
12887 */
12888 void
12889 ill_dlpi_send(ill_t *ill, mblk_t *mp)
12890 {
12891 mblk_t **mpp;
12892
12893 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
12894
12895 /*
12896 * To ensure that any DLPI requests for current exclusive operation
12897 * are always completely sent before any DLPI messages for other
12898 * operations, require writer access before enqueuing.
12899 */
12900 if (!IAM_WRITER_ILL(ill)) {
12901 ill_refhold(ill);
12902 /* qwriter_ip() does the ill_refrele() */
12903 qwriter_ip(ill, ill->ill_wq, mp, ill_dlpi_send_writer,
12904 NEW_OP, B_TRUE);
12905 return;
12906 }
12907
12908 mutex_enter(&ill->ill_lock);
12909 if (ill->ill_dlpi_pending != DL_PRIM_INVAL) {
12910 /* Must queue message. Tail insertion */
12911 mpp = &ill->ill_dlpi_deferred;
12912 while (*mpp != NULL)
12913 mpp = &((*mpp)->b_next);
12914
12915 ip1dbg(("ill_dlpi_send: deferring request for %s "
12916 "while %s pending\n", ill->ill_name,
12917 dl_primstr(ill->ill_dlpi_pending)));
12918
12919 *mpp = mp;
12920 mutex_exit(&ill->ill_lock);
12921 return;
12922 }
12923 mutex_exit(&ill->ill_lock);
12924 ill_dlpi_dispatch(ill, mp);
12925 }
12926
12927 void
12928 ill_capability_send(ill_t *ill, mblk_t *mp)
12929 {
12930 ill->ill_capab_pending_cnt++;
12931 ill_dlpi_send(ill, mp);
12932 }
12933
12934 void
12935 ill_capability_done(ill_t *ill)
12936 {
12937 ASSERT(ill->ill_capab_pending_cnt != 0);
12938
12939 ill_dlpi_done(ill, DL_CAPABILITY_REQ);
12940
12941 ill->ill_capab_pending_cnt--;
12942 if (ill->ill_capab_pending_cnt == 0 &&
12943 ill->ill_dlpi_capab_state == IDCS_OK)
12944 ill_capability_reset_alloc(ill);
12945
12946 mutex_enter(&ill->ill_dlpi_capab_lock);
12947 cv_broadcast(&ill->ill_dlpi_capab_cv);
12948 mutex_exit(&ill->ill_dlpi_capab_lock);
12949 }
12950
12951 /*
12952 * Send all deferred DLPI messages without waiting for their ACKs.
12953 */
12954 void
12955 ill_dlpi_send_deferred(ill_t *ill)
12956 {
12957 mblk_t *mp, *nextmp;
12958
12959 /*
12960 * Clear ill_dlpi_pending so that the message is not queued in
12961 * ill_dlpi_send().
12962 */
12963 mutex_enter(&ill->ill_lock);
12964 ill->ill_dlpi_pending = DL_PRIM_INVAL;
12965 mp = ill->ill_dlpi_deferred;
12966 ill->ill_dlpi_deferred = NULL;
12967 mutex_exit(&ill->ill_lock);
12968
12969 for (; mp != NULL; mp = nextmp) {
12970 nextmp = mp->b_next;
12971 mp->b_next = NULL;
12972 ill_dlpi_send(ill, mp);
12973 }
12974 }
12975
12976 /*
12977 * Clear all the deferred DLPI messages. Called on receiving an M_ERROR
12978 * or M_HANGUP
12979 */
12980 static void
12981 ill_dlpi_clear_deferred(ill_t *ill)
12982 {
12983 mblk_t *mp, *nextmp;
12984
12985 mutex_enter(&ill->ill_lock);
12986 ill->ill_dlpi_pending = DL_PRIM_INVAL;
12987 mp = ill->ill_dlpi_deferred;
12988 ill->ill_dlpi_deferred = NULL;
12989 mutex_exit(&ill->ill_lock);
12990
12991 for (; mp != NULL; mp = nextmp) {
12992 nextmp = mp->b_next;
12993 inet_freemsg(mp);
12994 }
12995 }
12996
12997 /*
12998 * Check if the DLPI primitive `prim' is pending; print a warning if not.
12999 */
13000 boolean_t
13001 ill_dlpi_pending(ill_t *ill, t_uscalar_t prim)
13002 {
13003 t_uscalar_t pending;
13004
13005 mutex_enter(&ill->ill_lock);
13006 if (ill->ill_dlpi_pending == prim) {
13007 mutex_exit(&ill->ill_lock);
13008 return (B_TRUE);
13009 }
13010
13011 /*
13012 * During teardown, ill_dlpi_dispatch() will send DLPI requests
13013 * without waiting, so don't print any warnings in that case.
13014 */
13015 if (ill->ill_state_flags & ILL_CONDEMNED) {
13016 mutex_exit(&ill->ill_lock);
13017 return (B_FALSE);
13018 }
13019 pending = ill->ill_dlpi_pending;
13020 mutex_exit(&ill->ill_lock);
13021
13022 if (pending == DL_PRIM_INVAL) {
13023 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
13024 "received unsolicited ack for %s on %s\n",
13025 dl_primstr(prim), ill->ill_name);
13026 } else {
13027 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
13028 "received unexpected ack for %s on %s (expecting %s)\n",
13029 dl_primstr(prim), ill->ill_name, dl_primstr(pending));
13030 }
13031 return (B_FALSE);
13032 }
13033
13034 /*
13035 * Complete the current DLPI operation associated with `prim' on `ill' and
13036 * start the next queued DLPI operation (if any). If there are no queued DLPI
13037 * operations and the ill's current exclusive IPSQ operation has finished
13038 * (i.e., ipsq_current_finish() was called), then clear ipsq_current_ipif to
13039 * allow the next exclusive IPSQ operation to begin upon ipsq_exit(). See
13040 * the comments above ipsq_current_finish() for details.
13041 */
13042 void
13043 ill_dlpi_done(ill_t *ill, t_uscalar_t prim)
13044 {
13045 mblk_t *mp;
13046 ipsq_t *ipsq = ill->ill_phyint->phyint_ipsq;
13047 ipxop_t *ipx = ipsq->ipsq_xop;
13048
13049 ASSERT(IAM_WRITER_IPSQ(ipsq));
13050 mutex_enter(&ill->ill_lock);
13051
13052 ASSERT(prim != DL_PRIM_INVAL);
13053 ASSERT(ill->ill_dlpi_pending == prim);
13054
13055 ip1dbg(("ill_dlpi_done: %s has completed %s (%u)\n", ill->ill_name,
13056 dl_primstr(ill->ill_dlpi_pending), ill->ill_dlpi_pending));
13057
13058 if ((mp = ill->ill_dlpi_deferred) == NULL) {
13059 ill->ill_dlpi_pending = DL_PRIM_INVAL;
13060 if (ipx->ipx_current_done) {
13061 mutex_enter(&ipx->ipx_lock);
13062 ipx->ipx_current_ipif = NULL;
13063 mutex_exit(&ipx->ipx_lock);
13064 }
13065 cv_signal(&ill->ill_cv);
13066 mutex_exit(&ill->ill_lock);
13067 return;
13068 }
13069
13070 ill->ill_dlpi_deferred = mp->b_next;
13071 mp->b_next = NULL;
13072 mutex_exit(&ill->ill_lock);
13073
13074 ill_dlpi_dispatch(ill, mp);
13075 }
13076
13077 /*
13078 * Queue a (multicast) DLPI control message to be sent to the driver by
13079 * later calling ill_dlpi_send_queued.
13080 * We queue them while holding a lock (ill_mcast_lock) to ensure that they
13081 * are sent in order i.e., prevent a DL_DISABMULTI_REQ and DL_ENABMULTI_REQ
13082 * for the same group to race.
13083 * We send DLPI control messages in order using ill_lock.
13084 * For IPMP we should be called on the cast_ill.
13085 */
13086 void
13087 ill_dlpi_queue(ill_t *ill, mblk_t *mp)
13088 {
13089 mblk_t **mpp;
13090
13091 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
13092
13093 mutex_enter(&ill->ill_lock);
13094 /* Must queue message. Tail insertion */
13095 mpp = &ill->ill_dlpi_deferred;
13096 while (*mpp != NULL)
13097 mpp = &((*mpp)->b_next);
13098
13099 *mpp = mp;
13100 mutex_exit(&ill->ill_lock);
13101 }
13102
13103 /*
13104 * Send the messages that were queued. Make sure there is only
13105 * one outstanding message. ip_rput_dlpi_writer calls ill_dlpi_done()
13106 * when an ACK or a NAK is received to process the next queued message.
13107 * For IPMP we are called on the upper ill, but when send what is queued
13108 * on the cast_ill.
13109 */
13110 void
13111 ill_dlpi_send_queued(ill_t *ill)
13112 {
13113 mblk_t *mp;
13114 union DL_primitives *dlp;
13115 t_uscalar_t prim;
13116 ill_t *release_ill = NULL;
13117
13118 if (IS_IPMP(ill)) {
13119 /* On the upper IPMP ill. */
13120 release_ill = ipmp_illgrp_hold_cast_ill(ill->ill_grp);
13121 if (release_ill == NULL) {
13122 /* Avoid ever sending anything down to the ipmpstub */
13123 return;
13124 }
13125 ill = release_ill;
13126 }
13127 mutex_enter(&ill->ill_lock);
13128 while ((mp = ill->ill_dlpi_deferred) != NULL) {
13129 if (ill->ill_dlpi_pending != DL_PRIM_INVAL) {
13130 /* Can't send. Somebody else will send it */
13131 mutex_exit(&ill->ill_lock);
13132 goto done;
13133 }
13134 ill->ill_dlpi_deferred = mp->b_next;
13135 mp->b_next = NULL;
13136 if (!ill->ill_dl_up) {
13137 /*
13138 * Nobody there. All multicast addresses will be
13139 * re-joined when we get the DL_BIND_ACK bringing the
13140 * interface up.
13141 */
13142 freemsg(mp);
13143 continue;
13144 }
13145 dlp = (union DL_primitives *)mp->b_rptr;
13146 prim = dlp->dl_primitive;
13147
13148 if (!(ill->ill_state_flags & ILL_CONDEMNED) ||
13149 (prim == DL_UNBIND_REQ)) {
13150 ill->ill_dlpi_pending = prim;
13151 }
13152 mutex_exit(&ill->ill_lock);
13153
13154 DTRACE_PROBE3(ill__dlpi, char *, "ill_dlpi_send_queued",
13155 char *, dl_primstr(prim), ill_t *, ill);
13156 putnext(ill->ill_wq, mp);
13157 mutex_enter(&ill->ill_lock);
13158 }
13159 mutex_exit(&ill->ill_lock);
13160 done:
13161 if (release_ill != NULL)
13162 ill_refrele(release_ill);
13163 }
13164
13165 /*
13166 * Queue an IP (IGMP/MLD) message to be sent by IP from
13167 * ill_mcast_send_queued
13168 * We queue them while holding a lock (ill_mcast_lock) to ensure that they
13169 * are sent in order i.e., prevent a IGMP leave and IGMP join for the same
13170 * group to race.
13171 * We send them in order using ill_lock.
13172 * For IPMP we are called on the upper ill, but we queue on the cast_ill.
13173 */
13174 void
13175 ill_mcast_queue(ill_t *ill, mblk_t *mp)
13176 {
13177 mblk_t **mpp;
13178 ill_t *release_ill = NULL;
13179
13180 ASSERT(RW_LOCK_HELD(&ill->ill_mcast_lock));
13181
13182 if (IS_IPMP(ill)) {
13183 /* On the upper IPMP ill. */
13184 release_ill = ipmp_illgrp_hold_cast_ill(ill->ill_grp);
13185 if (release_ill == NULL) {
13186 /* Discard instead of queuing for the ipmp interface */
13187 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
13188 ip_drop_output("ipIfStatsOutDiscards - no cast_ill",
13189 mp, ill);
13190 freemsg(mp);
13191 return;
13192 }
13193 ill = release_ill;
13194 }
13195
13196 mutex_enter(&ill->ill_lock);
13197 /* Must queue message. Tail insertion */
13198 mpp = &ill->ill_mcast_deferred;
13199 while (*mpp != NULL)
13200 mpp = &((*mpp)->b_next);
13201
13202 *mpp = mp;
13203 mutex_exit(&ill->ill_lock);
13204 if (release_ill != NULL)
13205 ill_refrele(release_ill);
13206 }
13207
13208 /*
13209 * Send the IP packets that were queued by ill_mcast_queue.
13210 * These are IGMP/MLD packets.
13211 *
13212 * For IPMP we are called on the upper ill, but when send what is queued
13213 * on the cast_ill.
13214 *
13215 * Request loopback of the report if we are acting as a multicast
13216 * router, so that the process-level routing demon can hear it.
13217 * This will run multiple times for the same group if there are members
13218 * on the same group for multiple ipif's on the same ill. The
13219 * igmp_input/mld_input code will suppress this due to the loopback thus we
13220 * always loopback membership report.
13221 *
13222 * We also need to make sure that this does not get load balanced
13223 * by IPMP. We do this by passing an ill to ip_output_simple.
13224 */
13225 void
13226 ill_mcast_send_queued(ill_t *ill)
13227 {
13228 mblk_t *mp;
13229 ip_xmit_attr_t ixas;
13230 ill_t *release_ill = NULL;
13231
13232 if (IS_IPMP(ill)) {
13233 /* On the upper IPMP ill. */
13234 release_ill = ipmp_illgrp_hold_cast_ill(ill->ill_grp);
13235 if (release_ill == NULL) {
13236 /*
13237 * We should have no messages on the ipmp interface
13238 * but no point in trying to send them.
13239 */
13240 return;
13241 }
13242 ill = release_ill;
13243 }
13244 bzero(&ixas, sizeof (ixas));
13245 ixas.ixa_zoneid = ALL_ZONES;
13246 ixas.ixa_cred = kcred;
13247 ixas.ixa_cpid = NOPID;
13248 ixas.ixa_tsl = NULL;
13249 /*
13250 * Here we set ixa_ifindex. If IPMP it will be the lower ill which
13251 * makes ip_select_route pick the IRE_MULTICAST for the cast_ill.
13252 * That is necessary to handle IGMP/MLD snooping switches.
13253 */
13254 ixas.ixa_ifindex = ill->ill_phyint->phyint_ifindex;
13255 ixas.ixa_ipst = ill->ill_ipst;
13256
13257 mutex_enter(&ill->ill_lock);
13258 while ((mp = ill->ill_mcast_deferred) != NULL) {
13259 ill->ill_mcast_deferred = mp->b_next;
13260 mp->b_next = NULL;
13261 if (!ill->ill_dl_up) {
13262 /*
13263 * Nobody there. Just drop the ip packets.
13264 * IGMP/MLD will resend later, if this is a replumb.
13265 */
13266 freemsg(mp);
13267 continue;
13268 }
13269 mutex_enter(&ill->ill_phyint->phyint_lock);
13270 if (IS_UNDER_IPMP(ill) && !ipmp_ill_is_active(ill)) {
13271 /*
13272 * When the ill is getting deactivated, we only want to
13273 * send the DLPI messages, so drop IGMP/MLD packets.
13274 * DLPI messages are handled by ill_dlpi_send_queued()
13275 */
13276 mutex_exit(&ill->ill_phyint->phyint_lock);
13277 freemsg(mp);
13278 continue;
13279 }
13280 mutex_exit(&ill->ill_phyint->phyint_lock);
13281 mutex_exit(&ill->ill_lock);
13282
13283 /* Check whether we are sending IPv4 or IPv6. */
13284 if (ill->ill_isv6) {
13285 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
13286
13287 ixas.ixa_multicast_ttl = ip6h->ip6_hops;
13288 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V6;
13289 } else {
13290 ipha_t *ipha = (ipha_t *)mp->b_rptr;
13291
13292 ixas.ixa_multicast_ttl = ipha->ipha_ttl;
13293 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
13294 ixas.ixa_flags &= ~IXAF_SET_ULP_CKSUM;
13295 }
13296 ixas.ixa_flags &= ~IXAF_VERIFY_SOURCE;
13297 ixas.ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_SOURCE;
13298 (void) ip_output_simple(mp, &ixas);
13299 ixa_cleanup(&ixas);
13300
13301 mutex_enter(&ill->ill_lock);
13302 }
13303 mutex_exit(&ill->ill_lock);
13304
13305 done:
13306 if (release_ill != NULL)
13307 ill_refrele(release_ill);
13308 }
13309
13310 /*
13311 * Take down a specific interface, but don't lose any information about it.
13312 * (Always called as writer.)
13313 * This function goes through the down sequence even if the interface is
13314 * already down. There are 2 reasons.
13315 * a. Currently we permit interface routes that depend on down interfaces
13316 * to be added. This behaviour itself is questionable. However it appears
13317 * that both Solaris and 4.3 BSD have exhibited this behaviour for a long
13318 * time. We go thru the cleanup in order to remove these routes.
13319 * b. The bringup of the interface could fail in ill_dl_up i.e. we get
13320 * DL_ERROR_ACK in response to the DL_BIND request. The interface is
13321 * down, but we need to cleanup i.e. do ill_dl_down and
13322 * ip_rput_dlpi_writer (DL_ERROR_ACK) -> ipif_down.
13323 *
13324 * IP-MT notes:
13325 *
13326 * Model of reference to interfaces.
13327 *
13328 * The following members in ipif_t track references to the ipif.
13329 * int ipif_refcnt; Active reference count
13330 *
13331 * The following members in ill_t track references to the ill.
13332 * int ill_refcnt; active refcnt
13333 * uint_t ill_ire_cnt; Number of ires referencing ill
13334 * uint_t ill_ncec_cnt; Number of ncecs referencing ill
13335 * uint_t ill_nce_cnt; Number of nces referencing ill
13336 * uint_t ill_ilm_cnt; Number of ilms referencing ill
13337 *
13338 * Reference to an ipif or ill can be obtained in any of the following ways.
13339 *
13340 * Through the lookup functions ipif_lookup_* / ill_lookup_* functions
13341 * Pointers to ipif / ill from other data structures viz ire and conn.
13342 * Implicit reference to the ipif / ill by holding a reference to the ire.
13343 *
13344 * The ipif/ill lookup functions return a reference held ipif / ill.
13345 * ipif_refcnt and ill_refcnt track the reference counts respectively.
13346 * This is a purely dynamic reference count associated with threads holding
13347 * references to the ipif / ill. Pointers from other structures do not
13348 * count towards this reference count.
13349 *
13350 * ill_ire_cnt is the number of ire's associated with the
13351 * ill. This is incremented whenever a new ire is created referencing the
13352 * ill. This is done atomically inside ire_add_v[46] where the ire is
13353 * actually added to the ire hash table. The count is decremented in
13354 * ire_inactive where the ire is destroyed.
13355 *
13356 * ill_ncec_cnt is the number of ncec's referencing the ill thru ncec_ill.
13357 * This is incremented atomically in
13358 * ndp_add_v4()/ndp_add_v6() where the nce is actually added to the
13359 * table. Similarly it is decremented in ncec_inactive() where the ncec
13360 * is destroyed.
13361 *
13362 * ill_nce_cnt is the number of nce's referencing the ill thru nce_ill. This is
13363 * incremented atomically in nce_add() where the nce is actually added to the
13364 * ill_nce. Similarly it is decremented in nce_inactive() where the nce
13365 * is destroyed.
13366 *
13367 * ill_ilm_cnt is the ilm's reference to the ill. It is incremented in
13368 * ilm_add() and decremented before the ilm is freed in ilm_delete().
13369 *
13370 * Flow of ioctls involving interface down/up
13371 *
13372 * The following is the sequence of an attempt to set some critical flags on an
13373 * up interface.
13374 * ip_sioctl_flags
13375 * ipif_down
13376 * wait for ipif to be quiescent
13377 * ipif_down_tail
13378 * ip_sioctl_flags_tail
13379 *
13380 * All set ioctls that involve down/up sequence would have a skeleton similar
13381 * to the above. All the *tail functions are called after the refcounts have
13382 * dropped to the appropriate values.
13383 *
13384 * SIOC ioctls during the IPIF_CHANGING interval.
13385 *
13386 * Threads handling SIOC set ioctls serialize on the squeue, but this
13387 * is not done for SIOC get ioctls. Since a set ioctl can cause several
13388 * steps of internal changes to the state, some of which are visible in
13389 * ipif_flags (such as IFF_UP being cleared and later set), and we want
13390 * the set ioctl to be atomic related to the get ioctls, the SIOC get code
13391 * will wait and restart ioctls if IPIF_CHANGING is set. The mblk is then
13392 * enqueued in the ipsq and the operation is restarted by ipsq_exit() when
13393 * the current exclusive operation completes. The IPIF_CHANGING check
13394 * and enqueue is atomic using the ill_lock and ipsq_lock. The
13395 * lookup is done holding the ill_lock. Hence the ill/ipif state flags can't
13396 * change while the ill_lock is held. Before dropping the ill_lock we acquire
13397 * the ipsq_lock and call ipsq_enq. This ensures that ipsq_exit can't finish
13398 * until we release the ipsq_lock, even though the ill/ipif state flags
13399 * can change after we drop the ill_lock.
13400 */
13401 int
13402 ipif_down(ipif_t *ipif, queue_t *q, mblk_t *mp)
13403 {
13404 ill_t *ill = ipif->ipif_ill;
13405 conn_t *connp;
13406 boolean_t success;
13407 boolean_t ipif_was_up = B_FALSE;
13408 ip_stack_t *ipst = ill->ill_ipst;
13409
13410 ASSERT(IAM_WRITER_IPIF(ipif));
13411
13412 ip1dbg(("ipif_down(%s:%u)\n", ill->ill_name, ipif->ipif_id));
13413
13414 DTRACE_PROBE3(ipif__downup, char *, "ipif_down",
13415 ill_t *, ill, ipif_t *, ipif);
13416
13417 if (ipif->ipif_flags & IPIF_UP) {
13418 mutex_enter(&ill->ill_lock);
13419 ipif->ipif_flags &= ~IPIF_UP;
13420 ASSERT(ill->ill_ipif_up_count > 0);
13421 --ill->ill_ipif_up_count;
13422 mutex_exit(&ill->ill_lock);
13423 ipif_was_up = B_TRUE;
13424 /* Update status in SCTP's list */
13425 sctp_update_ipif(ipif, SCTP_IPIF_DOWN);
13426 ill_nic_event_dispatch(ipif->ipif_ill,
13427 MAP_IPIF_ID(ipif->ipif_id), NE_LIF_DOWN, NULL, 0);
13428 }
13429
13430 /*
13431 * Removal of the last ipif from an ill may result in a DL_UNBIND
13432 * being sent to the driver, and we must not send any data packets to
13433 * the driver after the DL_UNBIND_REQ. To ensure this, all the
13434 * ire and nce entries used in the data path will be cleaned
13435 * up, and we also set the ILL_DOWN_IN_PROGRESS bit to make
13436 * sure on new entries will be added until the ill is bound
13437 * again. The ILL_DOWN_IN_PROGRESS bit is turned off upon
13438 * receipt of a DL_BIND_ACK.
13439 */
13440 if (ill->ill_wq != NULL && !ill->ill_logical_down &&
13441 ill->ill_ipif_up_count == 0 && ill->ill_ipif_dup_count == 0 &&
13442 ill->ill_dl_up) {
13443 ill->ill_state_flags |= ILL_DOWN_IN_PROGRESS;
13444 }
13445
13446 /*
13447 * Blow away memberships we established in ipif_multicast_up().
13448 */
13449 ipif_multicast_down(ipif);
13450
13451 /*
13452 * Remove from the mapping for __sin6_src_id. We insert only
13453 * when the address is not INADDR_ANY. As IPv4 addresses are
13454 * stored as mapped addresses, we need to check for mapped
13455 * INADDR_ANY also.
13456 */
13457 if (ipif_was_up && !IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr) &&
13458 !IN6_IS_ADDR_V4MAPPED_ANY(&ipif->ipif_v6lcl_addr) &&
13459 !(ipif->ipif_flags & IPIF_NOLOCAL)) {
13460 int err;
13461
13462 err = ip_srcid_remove(&ipif->ipif_v6lcl_addr,
13463 ipif->ipif_zoneid, ipst);
13464 if (err != 0) {
13465 ip0dbg(("ipif_down: srcid_remove %d\n", err));
13466 }
13467 }
13468
13469 if (ipif_was_up) {
13470 /* only delete if we'd added ire's before */
13471 if (ipif->ipif_isv6)
13472 ipif_delete_ires_v6(ipif);
13473 else
13474 ipif_delete_ires_v4(ipif);
13475 }
13476
13477 if (ipif_was_up && ill->ill_ipif_up_count == 0) {
13478 /*
13479 * Since the interface is now down, it may have just become
13480 * inactive. Note that this needs to be done even for a
13481 * lll_logical_down(), or ARP entries will not get correctly
13482 * restored when the interface comes back up.
13483 */
13484 if (IS_UNDER_IPMP(ill))
13485 ipmp_ill_refresh_active(ill);
13486 }
13487
13488 /*
13489 * neighbor-discovery or arp entries for this interface. The ipif
13490 * has to be quiesced, so we walk all the nce's and delete those
13491 * that point at the ipif->ipif_ill. At the same time, we also
13492 * update IPMP so that ipifs for data addresses are unbound. We dont
13493 * call ipif_arp_down to DL_UNBIND the arp stream itself here, but defer
13494 * that for ipif_down_tail()
13495 */
13496 ipif_nce_down(ipif);
13497
13498 /*
13499 * If this is the last ipif on the ill, we also need to remove
13500 * any IREs with ire_ill set. Otherwise ipif_is_quiescent() will
13501 * never succeed.
13502 */
13503 if (ill->ill_ipif_up_count == 0 && ill->ill_ipif_dup_count == 0)
13504 ire_walk_ill(0, 0, ill_downi, ill, ill);
13505
13506 /*
13507 * Walk all CONNs that can have a reference on an ire for this
13508 * ipif (we actually walk all that now have stale references).
13509 */
13510 ipcl_walk(conn_ixa_cleanup, (void *)B_TRUE, ipst);
13511
13512 /*
13513 * If mp is NULL the caller will wait for the appropriate refcnt.
13514 * Eg. ip_sioctl_removeif -> ipif_free -> ipif_down
13515 * and ill_delete -> ipif_free -> ipif_down
13516 */
13517 if (mp == NULL) {
13518 ASSERT(q == NULL);
13519 return (0);
13520 }
13521
13522 if (CONN_Q(q)) {
13523 connp = Q_TO_CONN(q);
13524 mutex_enter(&connp->conn_lock);
13525 } else {
13526 connp = NULL;
13527 }
13528 mutex_enter(&ill->ill_lock);
13529 /*
13530 * Are there any ire's pointing to this ipif that are still active ?
13531 * If this is the last ipif going down, are there any ire's pointing
13532 * to this ill that are still active ?
13533 */
13534 if (ipif_is_quiescent(ipif)) {
13535 mutex_exit(&ill->ill_lock);
13536 if (connp != NULL)
13537 mutex_exit(&connp->conn_lock);
13538 return (0);
13539 }
13540
13541 ip1dbg(("ipif_down: need to wait, adding pending mp %s ill %p",
13542 ill->ill_name, (void *)ill));
13543 /*
13544 * Enqueue the mp atomically in ipsq_pending_mp. When the refcount
13545 * drops down, the operation will be restarted by ipif_ill_refrele_tail
13546 * which in turn is called by the last refrele on the ipif/ill/ire.
13547 */
13548 success = ipsq_pending_mp_add(connp, ipif, q, mp, IPIF_DOWN);
13549 if (!success) {
13550 /* The conn is closing. So just return */
13551 ASSERT(connp != NULL);
13552 mutex_exit(&ill->ill_lock);
13553 mutex_exit(&connp->conn_lock);
13554 return (EINTR);
13555 }
13556
13557 mutex_exit(&ill->ill_lock);
13558 if (connp != NULL)
13559 mutex_exit(&connp->conn_lock);
13560 return (EINPROGRESS);
13561 }
13562
13563 int
13564 ipif_down_tail(ipif_t *ipif)
13565 {
13566 ill_t *ill = ipif->ipif_ill;
13567 int err = 0;
13568
13569 DTRACE_PROBE3(ipif__downup, char *, "ipif_down_tail",
13570 ill_t *, ill, ipif_t *, ipif);
13571
13572 /*
13573 * Skip any loopback interface (null wq).
13574 * If this is the last logical interface on the ill
13575 * have ill_dl_down tell the driver we are gone (unbind)
13576 * Note that lun 0 can ipif_down even though
13577 * there are other logical units that are up.
13578 * This occurs e.g. when we change a "significant" IFF_ flag.
13579 */
13580 if (ill->ill_wq != NULL && !ill->ill_logical_down &&
13581 ill->ill_ipif_up_count == 0 && ill->ill_ipif_dup_count == 0 &&
13582 ill->ill_dl_up) {
13583 ill_dl_down(ill);
13584 }
13585 if (!ipif->ipif_isv6)
13586 err = ipif_arp_down(ipif);
13587
13588 ill->ill_logical_down = 0;
13589
13590 ip_rts_ifmsg(ipif, RTSQ_DEFAULT);
13591 ip_rts_newaddrmsg(RTM_DELETE, 0, ipif, RTSQ_DEFAULT);
13592 return (err);
13593 }
13594
13595 /*
13596 * Bring interface logically down without bringing the physical interface
13597 * down e.g. when the netmask is changed. This avoids long lasting link
13598 * negotiations between an ethernet interface and a certain switches.
13599 */
13600 static int
13601 ipif_logical_down(ipif_t *ipif, queue_t *q, mblk_t *mp)
13602 {
13603 DTRACE_PROBE3(ipif__downup, char *, "ipif_logical_down",
13604 ill_t *, ipif->ipif_ill, ipif_t *, ipif);
13605
13606 /*
13607 * The ill_logical_down flag is a transient flag. It is set here
13608 * and is cleared once the down has completed in ipif_down_tail.
13609 * This flag does not indicate whether the ill stream is in the
13610 * DL_BOUND state with the driver. Instead this flag is used by
13611 * ipif_down_tail to determine whether to DL_UNBIND the stream with
13612 * the driver. The state of the ill stream i.e. whether it is
13613 * DL_BOUND with the driver or not is indicated by the ill_dl_up flag.
13614 */
13615 ipif->ipif_ill->ill_logical_down = 1;
13616 return (ipif_down(ipif, q, mp));
13617 }
13618
13619 /*
13620 * Initiate deallocate of an IPIF. Always called as writer. Called by
13621 * ill_delete or ip_sioctl_removeif.
13622 */
13623 static void
13624 ipif_free(ipif_t *ipif)
13625 {
13626 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
13627
13628 ASSERT(IAM_WRITER_IPIF(ipif));
13629
13630 if (ipif->ipif_recovery_id != 0)
13631 (void) untimeout(ipif->ipif_recovery_id);
13632 ipif->ipif_recovery_id = 0;
13633
13634 /*
13635 * Take down the interface. We can be called either from ill_delete
13636 * or from ip_sioctl_removeif.
13637 */
13638 (void) ipif_down(ipif, NULL, NULL);
13639
13640 /*
13641 * Now that the interface is down, there's no chance it can still
13642 * become a duplicate. Cancel any timer that may have been set while
13643 * tearing down.
13644 */
13645 if (ipif->ipif_recovery_id != 0)
13646 (void) untimeout(ipif->ipif_recovery_id);
13647 ipif->ipif_recovery_id = 0;
13648
13649 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
13650 /* Remove pointers to this ill in the multicast routing tables */
13651 reset_mrt_vif_ipif(ipif);
13652 /* If necessary, clear the cached source ipif rotor. */
13653 if (ipif->ipif_ill->ill_src_ipif == ipif)
13654 ipif->ipif_ill->ill_src_ipif = NULL;
13655 rw_exit(&ipst->ips_ill_g_lock);
13656 }
13657
13658 static void
13659 ipif_free_tail(ipif_t *ipif)
13660 {
13661 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
13662
13663 /*
13664 * Need to hold both ill_g_lock and ill_lock while
13665 * inserting or removing an ipif from the linked list
13666 * of ipifs hanging off the ill.
13667 */
13668 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
13669
13670 #ifdef DEBUG
13671 ipif_trace_cleanup(ipif);
13672 #endif
13673
13674 /* Ask SCTP to take it out of it list */
13675 sctp_update_ipif(ipif, SCTP_IPIF_REMOVE);
13676 ip_rts_newaddrmsg(RTM_FREEADDR, 0, ipif, RTSQ_DEFAULT);
13677
13678 /* Get it out of the ILL interface list. */
13679 ipif_remove(ipif);
13680 rw_exit(&ipst->ips_ill_g_lock);
13681
13682 ASSERT(!(ipif->ipif_flags & (IPIF_UP | IPIF_DUPLICATE)));
13683 ASSERT(ipif->ipif_recovery_id == 0);
13684 ASSERT(ipif->ipif_ire_local == NULL);
13685 ASSERT(ipif->ipif_ire_if == NULL);
13686
13687 /* Free the memory. */
13688 mi_free(ipif);
13689 }
13690
13691 /*
13692 * Sets `buf' to an ipif name of the form "ill_name:id", or "ill_name" if "id"
13693 * is zero.
13694 */
13695 void
13696 ipif_get_name(const ipif_t *ipif, char *buf, int len)
13697 {
13698 char lbuf[LIFNAMSIZ];
13699 char *name;
13700 size_t name_len;
13701
13702 buf[0] = '\0';
13703 name = ipif->ipif_ill->ill_name;
13704 name_len = ipif->ipif_ill->ill_name_length;
13705 if (ipif->ipif_id != 0) {
13706 (void) sprintf(lbuf, "%s%c%d", name, IPIF_SEPARATOR_CHAR,
13707 ipif->ipif_id);
13708 name = lbuf;
13709 name_len = mi_strlen(name) + 1;
13710 }
13711 len -= 1;
13712 buf[len] = '\0';
13713 len = MIN(len, name_len);
13714 bcopy(name, buf, len);
13715 }
13716
13717 /*
13718 * Sets `buf' to an ill name.
13719 */
13720 void
13721 ill_get_name(const ill_t *ill, char *buf, int len)
13722 {
13723 char *name;
13724 size_t name_len;
13725
13726 name = ill->ill_name;
13727 name_len = ill->ill_name_length;
13728 len -= 1;
13729 buf[len] = '\0';
13730 len = MIN(len, name_len);
13731 bcopy(name, buf, len);
13732 }
13733
13734 /*
13735 * Find an IPIF based on the name passed in. Names can be of the form <phys>
13736 * (e.g., le0) or <phys>:<#> (e.g., le0:1). When there is no colon, the
13737 * implied unit id is zero. <phys> must correspond to the name of an ILL.
13738 * (May be called as writer.)
13739 */
13740 static ipif_t *
13741 ipif_lookup_on_name(char *name, size_t namelen, boolean_t do_alloc,
13742 boolean_t *exists, boolean_t isv6, zoneid_t zoneid, ip_stack_t *ipst)
13743 {
13744 char *cp;
13745 char *endp;
13746 long id;
13747 ill_t *ill;
13748 ipif_t *ipif;
13749 uint_t ire_type;
13750 boolean_t did_alloc = B_FALSE;
13751 char last;
13752
13753 /*
13754 * If the caller wants to us to create the ipif, make sure we have a
13755 * valid zoneid
13756 */
13757 ASSERT(!do_alloc || zoneid != ALL_ZONES);
13758
13759 if (namelen == 0) {
13760 return (NULL);
13761 }
13762
13763 *exists = B_FALSE;
13764 /* Look for a colon in the name. */
13765 endp = &name[namelen];
13766 for (cp = endp; --cp > name; ) {
13767 if (*cp == IPIF_SEPARATOR_CHAR)
13768 break;
13769 }
13770
13771 if (*cp == IPIF_SEPARATOR_CHAR) {
13772 /*
13773 * Reject any non-decimal aliases for logical
13774 * interfaces. Aliases with leading zeroes
13775 * are also rejected as they introduce ambiguity
13776 * in the naming of the interfaces.
13777 * In order to confirm with existing semantics,
13778 * and to not break any programs/script relying
13779 * on that behaviour, if<0>:0 is considered to be
13780 * a valid interface.
13781 *
13782 * If alias has two or more digits and the first
13783 * is zero, fail.
13784 */
13785 if (&cp[2] < endp && cp[1] == '0') {
13786 return (NULL);
13787 }
13788 }
13789
13790 if (cp <= name) {
13791 cp = endp;
13792 }
13793 last = *cp;
13794 *cp = '\0';
13795
13796 /*
13797 * Look up the ILL, based on the portion of the name
13798 * before the slash. ill_lookup_on_name returns a held ill.
13799 * Temporary to check whether ill exists already. If so
13800 * ill_lookup_on_name will clear it.
13801 */
13802 ill = ill_lookup_on_name(name, do_alloc, isv6,
13803 &did_alloc, ipst);
13804 *cp = last;
13805 if (ill == NULL)
13806 return (NULL);
13807
13808 /* Establish the unit number in the name. */
13809 id = 0;
13810 if (cp < endp && *endp == '\0') {
13811 /* If there was a colon, the unit number follows. */
13812 cp++;
13813 if (ddi_strtol(cp, NULL, 0, &id) != 0) {
13814 ill_refrele(ill);
13815 return (NULL);
13816 }
13817 }
13818
13819 mutex_enter(&ill->ill_lock);
13820 /* Now see if there is an IPIF with this unit number. */
13821 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
13822 if (ipif->ipif_id == id) {
13823 if (zoneid != ALL_ZONES &&
13824 zoneid != ipif->ipif_zoneid &&
13825 ipif->ipif_zoneid != ALL_ZONES) {
13826 mutex_exit(&ill->ill_lock);
13827 ill_refrele(ill);
13828 return (NULL);
13829 }
13830 if (IPIF_CAN_LOOKUP(ipif)) {
13831 ipif_refhold_locked(ipif);
13832 mutex_exit(&ill->ill_lock);
13833 if (!did_alloc)
13834 *exists = B_TRUE;
13835 /*
13836 * Drop locks before calling ill_refrele
13837 * since it can potentially call into
13838 * ipif_ill_refrele_tail which can end up
13839 * in trying to acquire any lock.
13840 */
13841 ill_refrele(ill);
13842 return (ipif);
13843 }
13844 }
13845 }
13846
13847 if (!do_alloc) {
13848 mutex_exit(&ill->ill_lock);
13849 ill_refrele(ill);
13850 return (NULL);
13851 }
13852
13853 /*
13854 * If none found, atomically allocate and return a new one.
13855 * Historically, we used IRE_LOOPBACK only for lun 0, and IRE_LOCAL
13856 * to support "receive only" use of lo0:1 etc. as is still done
13857 * below as an initial guess.
13858 * However, this is now likely to be overriden later in ipif_up_done()
13859 * when we know for sure what address has been configured on the
13860 * interface, since we might have more than one loopback interface
13861 * with a loopback address, e.g. in the case of zones, and all the
13862 * interfaces with loopback addresses need to be marked IRE_LOOPBACK.
13863 */
13864 if (ill->ill_net_type == IRE_LOOPBACK && id == 0)
13865 ire_type = IRE_LOOPBACK;
13866 else
13867 ire_type = IRE_LOCAL;
13868 ipif = ipif_allocate(ill, id, ire_type, B_TRUE, B_TRUE, NULL);
13869 if (ipif != NULL)
13870 ipif_refhold_locked(ipif);
13871 mutex_exit(&ill->ill_lock);
13872 ill_refrele(ill);
13873 return (ipif);
13874 }
13875
13876 /*
13877 * Variant of the above that queues the request on the ipsq when
13878 * IPIF_CHANGING is set.
13879 */
13880 static ipif_t *
13881 ipif_lookup_on_name_async(char *name, size_t namelen, boolean_t isv6,
13882 zoneid_t zoneid, queue_t *q, mblk_t *mp, ipsq_func_t func, int *error,
13883 ip_stack_t *ipst)
13884 {
13885 char *cp;
13886 char *endp;
13887 long id;
13888 ill_t *ill;
13889 ipif_t *ipif;
13890 boolean_t did_alloc = B_FALSE;
13891 ipsq_t *ipsq;
13892
13893 if (error != NULL)
13894 *error = 0;
13895
13896 if (namelen == 0) {
13897 if (error != NULL)
13898 *error = ENXIO;
13899 return (NULL);
13900 }
13901
13902 /* Look for a colon in the name. */
13903 endp = &name[namelen];
13904 for (cp = endp; --cp > name; ) {
13905 if (*cp == IPIF_SEPARATOR_CHAR)
13906 break;
13907 }
13908
13909 if (*cp == IPIF_SEPARATOR_CHAR) {
13910 /*
13911 * Reject any non-decimal aliases for logical
13912 * interfaces. Aliases with leading zeroes
13913 * are also rejected as they introduce ambiguity
13914 * in the naming of the interfaces.
13915 * In order to confirm with existing semantics,
13916 * and to not break any programs/script relying
13917 * on that behaviour, if<0>:0 is considered to be
13918 * a valid interface.
13919 *
13920 * If alias has two or more digits and the first
13921 * is zero, fail.
13922 */
13923 if (&cp[2] < endp && cp[1] == '0') {
13924 if (error != NULL)
13925 *error = EINVAL;
13926 return (NULL);
13927 }
13928 }
13929
13930 if (cp <= name) {
13931 cp = endp;
13932 } else {
13933 *cp = '\0';
13934 }
13935
13936 /*
13937 * Look up the ILL, based on the portion of the name
13938 * before the slash. ill_lookup_on_name returns a held ill.
13939 * Temporary to check whether ill exists already. If so
13940 * ill_lookup_on_name will clear it.
13941 */
13942 ill = ill_lookup_on_name(name, B_FALSE, isv6, &did_alloc, ipst);
13943 if (cp != endp)
13944 *cp = IPIF_SEPARATOR_CHAR;
13945 if (ill == NULL)
13946 return (NULL);
13947
13948 /* Establish the unit number in the name. */
13949 id = 0;
13950 if (cp < endp && *endp == '\0') {
13951 /* If there was a colon, the unit number follows. */
13952 cp++;
13953 if (ddi_strtol(cp, NULL, 0, &id) != 0) {
13954 ill_refrele(ill);
13955 if (error != NULL)
13956 *error = ENXIO;
13957 return (NULL);
13958 }
13959 }
13960
13961 GRAB_CONN_LOCK(q);
13962 mutex_enter(&ill->ill_lock);
13963 /* Now see if there is an IPIF with this unit number. */
13964 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
13965 if (ipif->ipif_id == id) {
13966 if (zoneid != ALL_ZONES &&
13967 zoneid != ipif->ipif_zoneid &&
13968 ipif->ipif_zoneid != ALL_ZONES) {
13969 mutex_exit(&ill->ill_lock);
13970 RELEASE_CONN_LOCK(q);
13971 ill_refrele(ill);
13972 if (error != NULL)
13973 *error = ENXIO;
13974 return (NULL);
13975 }
13976
13977 if (!(IPIF_IS_CHANGING(ipif) ||
13978 IPIF_IS_CONDEMNED(ipif)) ||
13979 IAM_WRITER_IPIF(ipif)) {
13980 ipif_refhold_locked(ipif);
13981 mutex_exit(&ill->ill_lock);
13982 /*
13983 * Drop locks before calling ill_refrele
13984 * since it can potentially call into
13985 * ipif_ill_refrele_tail which can end up
13986 * in trying to acquire any lock.
13987 */
13988 RELEASE_CONN_LOCK(q);
13989 ill_refrele(ill);
13990 return (ipif);
13991 } else if (q != NULL && !IPIF_IS_CONDEMNED(ipif)) {
13992 ipsq = ill->ill_phyint->phyint_ipsq;
13993 mutex_enter(&ipsq->ipsq_lock);
13994 mutex_enter(&ipsq->ipsq_xop->ipx_lock);
13995 mutex_exit(&ill->ill_lock);
13996 ipsq_enq(ipsq, q, mp, func, NEW_OP, ill);
13997 mutex_exit(&ipsq->ipsq_xop->ipx_lock);
13998 mutex_exit(&ipsq->ipsq_lock);
13999 RELEASE_CONN_LOCK(q);
14000 ill_refrele(ill);
14001 if (error != NULL)
14002 *error = EINPROGRESS;
14003 return (NULL);
14004 }
14005 }
14006 }
14007 RELEASE_CONN_LOCK(q);
14008 mutex_exit(&ill->ill_lock);
14009 ill_refrele(ill);
14010 if (error != NULL)
14011 *error = ENXIO;
14012 return (NULL);
14013 }
14014
14015 /*
14016 * This routine is called whenever a new address comes up on an ipif. If
14017 * we are configured to respond to address mask requests, then we are supposed
14018 * to broadcast an address mask reply at this time. This routine is also
14019 * called if we are already up, but a netmask change is made. This is legal
14020 * but might not make the system manager very popular. (May be called
14021 * as writer.)
14022 */
14023 void
14024 ipif_mask_reply(ipif_t *ipif)
14025 {
14026 icmph_t *icmph;
14027 ipha_t *ipha;
14028 mblk_t *mp;
14029 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
14030 ip_xmit_attr_t ixas;
14031
14032 #define REPLY_LEN (sizeof (icmp_ipha) + sizeof (icmph_t) + IP_ADDR_LEN)
14033
14034 if (!ipst->ips_ip_respond_to_address_mask_broadcast)
14035 return;
14036
14037 /* ICMP mask reply is IPv4 only */
14038 ASSERT(!ipif->ipif_isv6);
14039 /* ICMP mask reply is not for a loopback interface */
14040 ASSERT(ipif->ipif_ill->ill_wq != NULL);
14041
14042 if (ipif->ipif_lcl_addr == INADDR_ANY)
14043 return;
14044
14045 mp = allocb(REPLY_LEN, BPRI_HI);
14046 if (mp == NULL)
14047 return;
14048 mp->b_wptr = mp->b_rptr + REPLY_LEN;
14049
14050 ipha = (ipha_t *)mp->b_rptr;
14051 bzero(ipha, REPLY_LEN);
14052 *ipha = icmp_ipha;
14053 ipha->ipha_ttl = ipst->ips_ip_broadcast_ttl;
14054 ipha->ipha_src = ipif->ipif_lcl_addr;
14055 ipha->ipha_dst = ipif->ipif_brd_addr;
14056 ipha->ipha_length = htons(REPLY_LEN);
14057 ipha->ipha_ident = 0;
14058
14059 icmph = (icmph_t *)&ipha[1];
14060 icmph->icmph_type = ICMP_ADDRESS_MASK_REPLY;
14061 bcopy(&ipif->ipif_net_mask, &icmph[1], IP_ADDR_LEN);
14062 icmph->icmph_checksum = IP_CSUM(mp, sizeof (ipha_t), 0);
14063
14064 bzero(&ixas, sizeof (ixas));
14065 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
14066 ixas.ixa_zoneid = ALL_ZONES;
14067 ixas.ixa_ifindex = 0;
14068 ixas.ixa_ipst = ipst;
14069 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
14070 (void) ip_output_simple(mp, &ixas);
14071 ixa_cleanup(&ixas);
14072 #undef REPLY_LEN
14073 }
14074
14075 /*
14076 * Join the ipif specific multicast groups.
14077 * Must be called after a mapping has been set up in the resolver. (Always
14078 * called as writer.)
14079 */
14080 void
14081 ipif_multicast_up(ipif_t *ipif)
14082 {
14083 int err;
14084 ill_t *ill;
14085 ilm_t *ilm;
14086
14087 ASSERT(IAM_WRITER_IPIF(ipif));
14088
14089 ill = ipif->ipif_ill;
14090
14091 ip1dbg(("ipif_multicast_up\n"));
14092 if (!(ill->ill_flags & ILLF_MULTICAST) ||
14093 ipif->ipif_allhosts_ilm != NULL)
14094 return;
14095
14096 if (ipif->ipif_isv6) {
14097 in6_addr_t v6allmc = ipv6_all_hosts_mcast;
14098 in6_addr_t v6solmc = ipv6_solicited_node_mcast;
14099
14100 v6solmc.s6_addr32[3] |= ipif->ipif_v6lcl_addr.s6_addr32[3];
14101
14102 if (IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr))
14103 return;
14104
14105 ip1dbg(("ipif_multicast_up - addmulti\n"));
14106
14107 /*
14108 * Join the all hosts multicast address. We skip this for
14109 * underlying IPMP interfaces since they should be invisible.
14110 */
14111 if (!IS_UNDER_IPMP(ill)) {
14112 ilm = ip_addmulti(&v6allmc, ill, ipif->ipif_zoneid,
14113 &err);
14114 if (ilm == NULL) {
14115 ASSERT(err != 0);
14116 ip0dbg(("ipif_multicast_up: "
14117 "all_hosts_mcast failed %d\n", err));
14118 return;
14119 }
14120 ipif->ipif_allhosts_ilm = ilm;
14121 }
14122
14123 /*
14124 * Enable multicast for the solicited node multicast address.
14125 * If IPMP we need to put the membership on the upper ill.
14126 */
14127 if (!(ipif->ipif_flags & IPIF_NOLOCAL)) {
14128 ill_t *mcast_ill = NULL;
14129 boolean_t need_refrele;
14130
14131 if (IS_UNDER_IPMP(ill) &&
14132 (mcast_ill = ipmp_ill_hold_ipmp_ill(ill)) != NULL) {
14133 need_refrele = B_TRUE;
14134 } else {
14135 mcast_ill = ill;
14136 need_refrele = B_FALSE;
14137 }
14138
14139 ilm = ip_addmulti(&v6solmc, mcast_ill,
14140 ipif->ipif_zoneid, &err);
14141 if (need_refrele)
14142 ill_refrele(mcast_ill);
14143
14144 if (ilm == NULL) {
14145 ASSERT(err != 0);
14146 ip0dbg(("ipif_multicast_up: solicited MC"
14147 " failed %d\n", err));
14148 if ((ilm = ipif->ipif_allhosts_ilm) != NULL) {
14149 ipif->ipif_allhosts_ilm = NULL;
14150 (void) ip_delmulti(ilm);
14151 }
14152 return;
14153 }
14154 ipif->ipif_solmulti_ilm = ilm;
14155 }
14156 } else {
14157 in6_addr_t v6group;
14158
14159 if (ipif->ipif_lcl_addr == INADDR_ANY || IS_UNDER_IPMP(ill))
14160 return;
14161
14162 /* Join the all hosts multicast address */
14163 ip1dbg(("ipif_multicast_up - addmulti\n"));
14164 IN6_IPADDR_TO_V4MAPPED(htonl(INADDR_ALLHOSTS_GROUP), &v6group);
14165
14166 ilm = ip_addmulti(&v6group, ill, ipif->ipif_zoneid, &err);
14167 if (ilm == NULL) {
14168 ASSERT(err != 0);
14169 ip0dbg(("ipif_multicast_up: failed %d\n", err));
14170 return;
14171 }
14172 ipif->ipif_allhosts_ilm = ilm;
14173 }
14174 }
14175
14176 /*
14177 * Blow away any multicast groups that we joined in ipif_multicast_up().
14178 * (ilms from explicit memberships are handled in conn_update_ill.)
14179 */
14180 void
14181 ipif_multicast_down(ipif_t *ipif)
14182 {
14183 ASSERT(IAM_WRITER_IPIF(ipif));
14184
14185 ip1dbg(("ipif_multicast_down\n"));
14186
14187 if (ipif->ipif_allhosts_ilm != NULL) {
14188 (void) ip_delmulti(ipif->ipif_allhosts_ilm);
14189 ipif->ipif_allhosts_ilm = NULL;
14190 }
14191 if (ipif->ipif_solmulti_ilm != NULL) {
14192 (void) ip_delmulti(ipif->ipif_solmulti_ilm);
14193 ipif->ipif_solmulti_ilm = NULL;
14194 }
14195 }
14196
14197 /*
14198 * Used when an interface comes up to recreate any extra routes on this
14199 * interface.
14200 */
14201 int
14202 ill_recover_saved_ire(ill_t *ill)
14203 {
14204 mblk_t *mp;
14205 ip_stack_t *ipst = ill->ill_ipst;
14206
14207 ip1dbg(("ill_recover_saved_ire(%s)", ill->ill_name));
14208
14209 mutex_enter(&ill->ill_saved_ire_lock);
14210 for (mp = ill->ill_saved_ire_mp; mp != NULL; mp = mp->b_cont) {
14211 ire_t *ire, *nire;
14212 ifrt_t *ifrt;
14213
14214 ifrt = (ifrt_t *)mp->b_rptr;
14215 /*
14216 * Create a copy of the IRE with the saved address and netmask.
14217 */
14218 if (ill->ill_isv6) {
14219 ire = ire_create_v6(
14220 &ifrt->ifrt_v6addr,
14221 &ifrt->ifrt_v6mask,
14222 &ifrt->ifrt_v6gateway_addr,
14223 ifrt->ifrt_type,
14224 ill,
14225 ifrt->ifrt_zoneid,
14226 ifrt->ifrt_flags,
14227 NULL,
14228 ipst);
14229 } else {
14230 ire = ire_create(
14231 (uint8_t *)&ifrt->ifrt_addr,
14232 (uint8_t *)&ifrt->ifrt_mask,
14233 (uint8_t *)&ifrt->ifrt_gateway_addr,
14234 ifrt->ifrt_type,
14235 ill,
14236 ifrt->ifrt_zoneid,
14237 ifrt->ifrt_flags,
14238 NULL,
14239 ipst);
14240 }
14241 if (ire == NULL) {
14242 mutex_exit(&ill->ill_saved_ire_lock);
14243 return (ENOMEM);
14244 }
14245
14246 if (ifrt->ifrt_flags & RTF_SETSRC) {
14247 if (ill->ill_isv6) {
14248 ire->ire_setsrc_addr_v6 =
14249 ifrt->ifrt_v6setsrc_addr;
14250 } else {
14251 ire->ire_setsrc_addr = ifrt->ifrt_setsrc_addr;
14252 }
14253 }
14254
14255 /*
14256 * Some software (for example, GateD and Sun Cluster) attempts
14257 * to create (what amount to) IRE_PREFIX routes with the
14258 * loopback address as the gateway. This is primarily done to
14259 * set up prefixes with the RTF_REJECT flag set (for example,
14260 * when generating aggregate routes.)
14261 *
14262 * If the IRE type (as defined by ill->ill_net_type) is
14263 * IRE_LOOPBACK, then we map the request into a
14264 * IRE_IF_NORESOLVER.
14265 */
14266 if (ill->ill_net_type == IRE_LOOPBACK)
14267 ire->ire_type = IRE_IF_NORESOLVER;
14268
14269 /*
14270 * ire held by ire_add, will be refreled' towards the
14271 * the end of ipif_up_done
14272 */
14273 nire = ire_add(ire);
14274 /*
14275 * Check if it was a duplicate entry. This handles
14276 * the case of two racing route adds for the same route
14277 */
14278 if (nire == NULL) {
14279 ip1dbg(("ill_recover_saved_ire: FAILED\n"));
14280 } else if (nire != ire) {
14281 ip1dbg(("ill_recover_saved_ire: duplicate ire %p\n",
14282 (void *)nire));
14283 ire_delete(nire);
14284 } else {
14285 ip1dbg(("ill_recover_saved_ire: added ire %p\n",
14286 (void *)nire));
14287 }
14288 if (nire != NULL)
14289 ire_refrele(nire);
14290 }
14291 mutex_exit(&ill->ill_saved_ire_lock);
14292 return (0);
14293 }
14294
14295 /*
14296 * Used to set the netmask and broadcast address to default values when the
14297 * interface is brought up. (Always called as writer.)
14298 */
14299 static void
14300 ipif_set_default(ipif_t *ipif)
14301 {
14302 ASSERT(MUTEX_HELD(&ipif->ipif_ill->ill_lock));
14303
14304 if (!ipif->ipif_isv6) {
14305 /*
14306 * Interface holds an IPv4 address. Default
14307 * mask is the natural netmask.
14308 */
14309 if (!ipif->ipif_net_mask) {
14310 ipaddr_t v4mask;
14311
14312 v4mask = ip_net_mask(ipif->ipif_lcl_addr);
14313 V4MASK_TO_V6(v4mask, ipif->ipif_v6net_mask);
14314 }
14315 if (ipif->ipif_flags & IPIF_POINTOPOINT) {
14316 /* ipif_subnet is ipif_pp_dst_addr for pt-pt */
14317 ipif->ipif_v6subnet = ipif->ipif_v6pp_dst_addr;
14318 } else {
14319 V6_MASK_COPY(ipif->ipif_v6lcl_addr,
14320 ipif->ipif_v6net_mask, ipif->ipif_v6subnet);
14321 }
14322 /*
14323 * NOTE: SunOS 4.X does this even if the broadcast address
14324 * has been already set thus we do the same here.
14325 */
14326 if (ipif->ipif_flags & IPIF_BROADCAST) {
14327 ipaddr_t v4addr;
14328
14329 v4addr = ipif->ipif_subnet | ~ipif->ipif_net_mask;
14330 IN6_IPADDR_TO_V4MAPPED(v4addr, &ipif->ipif_v6brd_addr);
14331 }
14332 } else {
14333 /*
14334 * Interface holds an IPv6-only address. Default
14335 * mask is all-ones.
14336 */
14337 if (IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6net_mask))
14338 ipif->ipif_v6net_mask = ipv6_all_ones;
14339 if (ipif->ipif_flags & IPIF_POINTOPOINT) {
14340 /* ipif_subnet is ipif_pp_dst_addr for pt-pt */
14341 ipif->ipif_v6subnet = ipif->ipif_v6pp_dst_addr;
14342 } else {
14343 V6_MASK_COPY(ipif->ipif_v6lcl_addr,
14344 ipif->ipif_v6net_mask, ipif->ipif_v6subnet);
14345 }
14346 }
14347 }
14348
14349 /*
14350 * Return 0 if this address can be used as local address without causing
14351 * duplicate address problems. Otherwise, return EADDRNOTAVAIL if the address
14352 * is already up on a different ill, and EADDRINUSE if it's up on the same ill.
14353 * Note that the same IPv6 link-local address is allowed as long as the ills
14354 * are not on the same link.
14355 */
14356 int
14357 ip_addr_availability_check(ipif_t *new_ipif)
14358 {
14359 in6_addr_t our_v6addr;
14360 ill_t *ill;
14361 ipif_t *ipif;
14362 ill_walk_context_t ctx;
14363 ip_stack_t *ipst = new_ipif->ipif_ill->ill_ipst;
14364
14365 ASSERT(IAM_WRITER_IPIF(new_ipif));
14366 ASSERT(MUTEX_HELD(&ipst->ips_ip_addr_avail_lock));
14367 ASSERT(RW_READ_HELD(&ipst->ips_ill_g_lock));
14368
14369 new_ipif->ipif_flags &= ~IPIF_UNNUMBERED;
14370 if (IN6_IS_ADDR_UNSPECIFIED(&new_ipif->ipif_v6lcl_addr) ||
14371 IN6_IS_ADDR_V4MAPPED_ANY(&new_ipif->ipif_v6lcl_addr))
14372 return (0);
14373
14374 our_v6addr = new_ipif->ipif_v6lcl_addr;
14375
14376 if (new_ipif->ipif_isv6)
14377 ill = ILL_START_WALK_V6(&ctx, ipst);
14378 else
14379 ill = ILL_START_WALK_V4(&ctx, ipst);
14380
14381 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
14382 for (ipif = ill->ill_ipif; ipif != NULL;
14383 ipif = ipif->ipif_next) {
14384 if ((ipif == new_ipif) ||
14385 !(ipif->ipif_flags & IPIF_UP) ||
14386 (ipif->ipif_flags & IPIF_UNNUMBERED) ||
14387 !IN6_ARE_ADDR_EQUAL(&ipif->ipif_v6lcl_addr,
14388 &our_v6addr))
14389 continue;
14390
14391 if (new_ipif->ipif_flags & IPIF_POINTOPOINT)
14392 new_ipif->ipif_flags |= IPIF_UNNUMBERED;
14393 else if (ipif->ipif_flags & IPIF_POINTOPOINT)
14394 ipif->ipif_flags |= IPIF_UNNUMBERED;
14395 else if ((IN6_IS_ADDR_LINKLOCAL(&our_v6addr) ||
14396 IN6_IS_ADDR_SITELOCAL(&our_v6addr)) &&
14397 !IS_ON_SAME_LAN(ill, new_ipif->ipif_ill))
14398 continue;
14399 else if (new_ipif->ipif_zoneid != ipif->ipif_zoneid &&
14400 ipif->ipif_zoneid != ALL_ZONES && IS_LOOPBACK(ill))
14401 continue;
14402 else if (new_ipif->ipif_ill == ill)
14403 return (EADDRINUSE);
14404 else
14405 return (EADDRNOTAVAIL);
14406 }
14407 }
14408
14409 return (0);
14410 }
14411
14412 /*
14413 * Bring up an ipif: bring up arp/ndp, bring up the DLPI stream, and add
14414 * IREs for the ipif.
14415 * When the routine returns EINPROGRESS then mp has been consumed and
14416 * the ioctl will be acked from ip_rput_dlpi.
14417 */
14418 int
14419 ipif_up(ipif_t *ipif, queue_t *q, mblk_t *mp)
14420 {
14421 ill_t *ill = ipif->ipif_ill;
14422 boolean_t isv6 = ipif->ipif_isv6;
14423 int err = 0;
14424 boolean_t success;
14425 uint_t ipif_orig_id;
14426 ip_stack_t *ipst = ill->ill_ipst;
14427
14428 ASSERT(IAM_WRITER_IPIF(ipif));
14429
14430 ip1dbg(("ipif_up(%s:%u)\n", ill->ill_name, ipif->ipif_id));
14431 DTRACE_PROBE3(ipif__downup, char *, "ipif_up",
14432 ill_t *, ill, ipif_t *, ipif);
14433
14434 /* Shouldn't get here if it is already up. */
14435 if (ipif->ipif_flags & IPIF_UP)
14436 return (EALREADY);
14437
14438 /*
14439 * If this is a request to bring up a data address on an interface
14440 * under IPMP, then move the address to its IPMP meta-interface and
14441 * try to bring it up. One complication is that the zeroth ipif for
14442 * an ill is special, in that every ill always has one, and that code
14443 * throughout IP deferences ill->ill_ipif without holding any locks.
14444 */
14445 if (IS_UNDER_IPMP(ill) && ipmp_ipif_is_dataaddr(ipif) &&
14446 (!ipif->ipif_isv6 || !V6_IPIF_LINKLOCAL(ipif))) {
14447 ipif_t *stubipif = NULL, *moveipif = NULL;
14448 ill_t *ipmp_ill = ipmp_illgrp_ipmp_ill(ill->ill_grp);
14449
14450 /*
14451 * The ipif being brought up should be quiesced. If it's not,
14452 * something has gone amiss and we need to bail out. (If it's
14453 * quiesced, we know it will remain so via IPIF_CONDEMNED.)
14454 */
14455 mutex_enter(&ill->ill_lock);
14456 if (!ipif_is_quiescent(ipif)) {
14457 mutex_exit(&ill->ill_lock);
14458 return (EINVAL);
14459 }
14460 mutex_exit(&ill->ill_lock);
14461
14462 /*
14463 * If we're going to need to allocate ipifs, do it prior
14464 * to starting the move (and grabbing locks).
14465 */
14466 if (ipif->ipif_id == 0) {
14467 if ((moveipif = ipif_allocate(ill, 0, IRE_LOCAL, B_TRUE,
14468 B_FALSE, &err)) == NULL) {
14469 return (err);
14470 }
14471 if ((stubipif = ipif_allocate(ill, 0, IRE_LOCAL, B_TRUE,
14472 B_FALSE, &err)) == NULL) {
14473 mi_free(moveipif);
14474 return (err);
14475 }
14476 }
14477
14478 /*
14479 * Grab or transfer the ipif to move. During the move, keep
14480 * ill_g_lock held to prevent any ill walker threads from
14481 * seeing things in an inconsistent state.
14482 */
14483 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
14484 if (ipif->ipif_id != 0) {
14485 ipif_remove(ipif);
14486 } else {
14487 ipif_transfer(ipif, moveipif, stubipif);
14488 ipif = moveipif;
14489 }
14490
14491 /*
14492 * Place the ipif on the IPMP ill. If the zeroth ipif on
14493 * the IPMP ill is a stub (0.0.0.0 down address) then we
14494 * replace that one. Otherwise, pick the next available slot.
14495 */
14496 ipif->ipif_ill = ipmp_ill;
14497 ipif_orig_id = ipif->ipif_id;
14498
14499 if (ipmp_ipif_is_stubaddr(ipmp_ill->ill_ipif)) {
14500 ipif_transfer(ipif, ipmp_ill->ill_ipif, NULL);
14501 ipif = ipmp_ill->ill_ipif;
14502 } else {
14503 ipif->ipif_id = -1;
14504 if ((err = ipif_insert(ipif, B_FALSE)) != 0) {
14505 /*
14506 * No more available ipif_id's -- put it back
14507 * on the original ill and fail the operation.
14508 * Since we're writer on the ill, we can be
14509 * sure our old slot is still available.
14510 */
14511 ipif->ipif_id = ipif_orig_id;
14512 ipif->ipif_ill = ill;
14513 if (ipif_orig_id == 0) {
14514 ipif_transfer(ipif, ill->ill_ipif,
14515 NULL);
14516 } else {
14517 VERIFY(ipif_insert(ipif, B_FALSE) == 0);
14518 }
14519 rw_exit(&ipst->ips_ill_g_lock);
14520 return (err);
14521 }
14522 }
14523 rw_exit(&ipst->ips_ill_g_lock);
14524
14525 /*
14526 * Tell SCTP that the ipif has moved. Note that even if we
14527 * had to allocate a new ipif, the original sequence id was
14528 * preserved and therefore SCTP won't know.
14529 */
14530 sctp_move_ipif(ipif, ill, ipmp_ill);
14531
14532 /*
14533 * If the ipif being brought up was on slot zero, then we
14534 * first need to bring up the placeholder we stuck there. In
14535 * ip_rput_dlpi_writer(), arp_bringup_done(), or the recursive
14536 * call to ipif_up() itself, if we successfully bring up the
14537 * placeholder, we'll check ill_move_ipif and bring it up too.
14538 */
14539 if (ipif_orig_id == 0) {
14540 ASSERT(ill->ill_move_ipif == NULL);
14541 ill->ill_move_ipif = ipif;
14542 if ((err = ipif_up(ill->ill_ipif, q, mp)) == 0)
14543 ASSERT(ill->ill_move_ipif == NULL);
14544 if (err != EINPROGRESS)
14545 ill->ill_move_ipif = NULL;
14546 return (err);
14547 }
14548
14549 /*
14550 * Bring it up on the IPMP ill.
14551 */
14552 return (ipif_up(ipif, q, mp));
14553 }
14554
14555 /* Skip arp/ndp for any loopback interface. */
14556 if (ill->ill_wq != NULL) {
14557 conn_t *connp = CONN_Q(q) ? Q_TO_CONN(q) : NULL;
14558 ipsq_t *ipsq = ill->ill_phyint->phyint_ipsq;
14559
14560 if (!ill->ill_dl_up) {
14561 /*
14562 * ill_dl_up is not yet set. i.e. we are yet to
14563 * DL_BIND with the driver and this is the first
14564 * logical interface on the ill to become "up".
14565 * Tell the driver to get going (via DL_BIND_REQ).
14566 * Note that changing "significant" IFF_ flags
14567 * address/netmask etc cause a down/up dance, but
14568 * does not cause an unbind (DL_UNBIND) with the driver
14569 */
14570 if ((err = ill_dl_up(ill, ipif)) != 0) {
14571 return (err);
14572 }
14573 }
14574
14575 /* Reject bringing up interfaces with unusable IP addresses */
14576 if (!ill_ipcheck_addr(ill, &ipif->ipif_v6lcl_addr)) {
14577 return (EPERM);
14578 }
14579
14580 /*
14581 * ipif_resolver_up may end up needeing to bind/attach
14582 * the ARP stream, which in turn necessitates a
14583 * DLPI message exchange with the driver. ioctls are
14584 * serialized and so we cannot send more than one
14585 * interface up message at a time. If ipif_resolver_up
14586 * does need to wait for the DLPI handshake for the ARP stream,
14587 * we get EINPROGRESS and we will complete in arp_bringup_done.
14588 */
14589
14590 ASSERT(connp != NULL || !CONN_Q(q));
14591 if (connp != NULL)
14592 mutex_enter(&connp->conn_lock);
14593 mutex_enter(&ill->ill_lock);
14594 success = ipsq_pending_mp_add(connp, ipif, q, mp, 0);
14595 mutex_exit(&ill->ill_lock);
14596 if (connp != NULL)
14597 mutex_exit(&connp->conn_lock);
14598 if (!success)
14599 return (EINTR);
14600
14601 /*
14602 * Crank up IPv6 neighbor discovery. Unlike ARP, this should
14603 * complete when ipif_ndp_up returns.
14604 */
14605 err = ipif_resolver_up(ipif, Res_act_initial);
14606 if (err == EINPROGRESS) {
14607 /* We will complete it in arp_bringup_done() */
14608 return (err);
14609 }
14610
14611 if (isv6 && err == 0)
14612 err = ipif_ndp_up(ipif, B_TRUE);
14613
14614 ASSERT(err != EINPROGRESS);
14615 mp = ipsq_pending_mp_get(ipsq, &connp);
14616 ASSERT(mp != NULL);
14617 if (err != 0)
14618 return (err);
14619 } else {
14620 /*
14621 * Interfaces without underlying hardware don't do duplicate
14622 * address detection.
14623 */
14624 ASSERT(!(ipif->ipif_flags & IPIF_DUPLICATE));
14625 ipif->ipif_addr_ready = 1;
14626 err = ill_add_ires(ill);
14627 /* allocation failure? */
14628 if (err != 0)
14629 return (err);
14630 }
14631
14632 err = (isv6 ? ipif_up_done_v6(ipif) : ipif_up_done(ipif));
14633 if (err == 0 && ill->ill_move_ipif != NULL) {
14634 ipif = ill->ill_move_ipif;
14635 ill->ill_move_ipif = NULL;
14636 return (ipif_up(ipif, q, mp));
14637 }
14638 return (err);
14639 }
14640
14641 /*
14642 * Add any IREs tied to the ill. For now this is just an IRE_MULTICAST.
14643 * The identical set of IREs need to be removed in ill_delete_ires().
14644 */
14645 int
14646 ill_add_ires(ill_t *ill)
14647 {
14648 ire_t *ire;
14649 in6_addr_t dummy6 = {(uint32_t)V6_MCAST, 0, 0, 1};
14650 in_addr_t dummy4 = htonl(INADDR_ALLHOSTS_GROUP);
14651
14652 if (ill->ill_ire_multicast != NULL)
14653 return (0);
14654
14655 /*
14656 * provide some dummy ire_addr for creating the ire.
14657 */
14658 if (ill->ill_isv6) {
14659 ire = ire_create_v6(&dummy6, 0, 0, IRE_MULTICAST, ill,
14660 ALL_ZONES, RTF_UP, NULL, ill->ill_ipst);
14661 } else {
14662 ire = ire_create((uchar_t *)&dummy4, 0, 0, IRE_MULTICAST, ill,
14663 ALL_ZONES, RTF_UP, NULL, ill->ill_ipst);
14664 }
14665 if (ire == NULL)
14666 return (ENOMEM);
14667
14668 ill->ill_ire_multicast = ire;
14669 return (0);
14670 }
14671
14672 void
14673 ill_delete_ires(ill_t *ill)
14674 {
14675 if (ill->ill_ire_multicast != NULL) {
14676 /*
14677 * BIND/ATTACH completed; Release the ref for ill_ire_multicast
14678 * which was taken without any th_tracing enabled.
14679 * We also mark it as condemned (note that it was never added)
14680 * so that caching conn's can move off of it.
14681 */
14682 ire_make_condemned(ill->ill_ire_multicast);
14683 ire_refrele_notr(ill->ill_ire_multicast);
14684 ill->ill_ire_multicast = NULL;
14685 }
14686 }
14687
14688 /*
14689 * Perform a bind for the physical device.
14690 *
14691 * When the routine returns successfully then dlpi has been bound and
14692 * capabilities negotiated. An unbind message will have been allocated
14693 * for later use in ipif_down.
14694 */
14695 static int
14696 ill_dl_up(ill_t *ill, ipif_t *ipif)
14697 {
14698 mblk_t *bind_mp = NULL;
14699 mblk_t *unbind_mp = NULL;
14700 int err;
14701
14702 DTRACE_PROBE2(ill__downup, char *, "ill_dl_up", ill_t *, ill);
14703
14704 ip1dbg(("ill_dl_up(%s)\n", ill->ill_name));
14705 ASSERT(IAM_WRITER_ILL(ill));
14706
14707 /*
14708 * Make sure we have an IRE_MULTICAST in case we immediately
14709 * start receiving packets.
14710 */
14711 err = ill_add_ires(ill);
14712 if (err != 0)
14713 goto bad;
14714
14715 bind_mp = ip_dlpi_alloc(sizeof (dl_bind_req_t) + sizeof (long),
14716 DL_BIND_REQ);
14717 if (bind_mp == NULL)
14718 goto bad;
14719 ((dl_bind_req_t *)bind_mp->b_rptr)->dl_sap = ill->ill_sap;
14720 ((dl_bind_req_t *)bind_mp->b_rptr)->dl_service_mode = DL_CLDLS;
14721
14722 /*
14723 * ill_unbind_mp would be non-null if the following sequence had
14724 * happened:
14725 * - send DL_BIND_REQ to driver, wait for response
14726 * - multiple ioctls that need to bring the ipif up are encountered,
14727 * but they cannot enter the ipsq due to the outstanding DL_BIND_REQ.
14728 * These ioctls will then be enqueued on the ipsq
14729 * - a DL_ERROR_ACK is returned for the DL_BIND_REQ
14730 * At this point, the pending ioctls in the ipsq will be drained, and
14731 * since ill->ill_dl_up was not set, ill_dl_up would be invoked with
14732 * a non-null ill->ill_unbind_mp
14733 */
14734 if (ill->ill_unbind_mp == NULL) {
14735 unbind_mp = ip_dlpi_alloc(sizeof (dl_unbind_req_t),
14736 DL_UNBIND_REQ);
14737 if (unbind_mp == NULL)
14738 goto bad;
14739 }
14740
14741 /*
14742 * Save the unbind message for ill_dl_down(); it will be consumed when
14743 * the interface goes down.
14744 */
14745 if (ill->ill_unbind_mp == NULL)
14746 ill->ill_unbind_mp = unbind_mp;
14747
14748 ill_dlpi_send(ill, bind_mp);
14749 /* Send down link-layer capabilities probe if not already done. */
14750 ill_capability_probe(ill);
14751 /*
14752 * Wait for DLPI to be bound and the capability probe to finish.
14753 * The call drops-and-reacquires the squeue. If it couldn't because
14754 * ILL_CONDEMNED got set, bail.
14755 */
14756 if (!ill_capability_wait(ill))
14757 return (ENXIO);
14758
14759 /* DLPI failed to bind. Return the saved error */
14760 if (!ill->ill_dl_up) {
14761 return (ill->ill_dl_bind_err);
14762 }
14763
14764 /*
14765 * Sysid used to rely on the fact that netboots set domainname
14766 * and the like. Now that miniroot boots aren't strictly netboots
14767 * and miniroot network configuration is driven from userland
14768 * these things still need to be set. This situation can be detected
14769 * by comparing the interface being configured here to the one
14770 * dhcifname was set to reference by the boot loader. Once sysid is
14771 * converted to use dhcp_ipc_getinfo() this call can go away.
14772 */
14773 if ((ipif->ipif_flags & IPIF_DHCPRUNNING) &&
14774 (strcmp(ill->ill_name, dhcifname) == 0) &&
14775 (strlen(srpc_domain) == 0)) {
14776 if (dhcpinit() != 0)
14777 cmn_err(CE_WARN, "no cached dhcp response");
14778 }
14779
14780 return (0);
14781 bad:
14782 ip1dbg(("ill_dl_up(%s) FAILED\n", ill->ill_name));
14783
14784 freemsg(bind_mp);
14785 freemsg(unbind_mp);
14786 return (ENOMEM);
14787 }
14788
14789 /* Add room for tcp+ip headers */
14790 uint_t ip_loopback_mtuplus = IP_LOOPBACK_MTU + IP_SIMPLE_HDR_LENGTH + 20;
14791
14792 /*
14793 * DLPI and ARP is up.
14794 * Create all the IREs associated with an interface. Bring up multicast.
14795 * Set the interface flag and finish other initialization
14796 * that potentially had to be deferred to after DL_BIND_ACK.
14797 */
14798 int
14799 ipif_up_done(ipif_t *ipif)
14800 {
14801 ill_t *ill = ipif->ipif_ill;
14802 int err = 0;
14803 boolean_t loopback = B_FALSE;
14804 boolean_t update_src_selection = B_TRUE;
14805 ipif_t *tmp_ipif;
14806
14807 ip1dbg(("ipif_up_done(%s:%u)\n",
14808 ipif->ipif_ill->ill_name, ipif->ipif_id));
14809 DTRACE_PROBE3(ipif__downup, char *, "ipif_up_done",
14810 ill_t *, ill, ipif_t *, ipif);
14811
14812 /* Check if this is a loopback interface */
14813 if (ipif->ipif_ill->ill_wq == NULL)
14814 loopback = B_TRUE;
14815
14816 ASSERT(!MUTEX_HELD(&ipif->ipif_ill->ill_lock));
14817
14818 /*
14819 * If all other interfaces for this ill are down or DEPRECATED,
14820 * or otherwise unsuitable for source address selection,
14821 * reset the src generation numbers to make sure source
14822 * address selection gets to take this new ipif into account.
14823 * No need to hold ill_lock while traversing the ipif list since
14824 * we are writer
14825 */
14826 for (tmp_ipif = ill->ill_ipif; tmp_ipif;
14827 tmp_ipif = tmp_ipif->ipif_next) {
14828 if (((tmp_ipif->ipif_flags &
14829 (IPIF_NOXMIT|IPIF_ANYCAST|IPIF_NOLOCAL|IPIF_DEPRECATED)) ||
14830 !(tmp_ipif->ipif_flags & IPIF_UP)) ||
14831 (tmp_ipif == ipif))
14832 continue;
14833 /* first useable pre-existing interface */
14834 update_src_selection = B_FALSE;
14835 break;
14836 }
14837 if (update_src_selection)
14838 ip_update_source_selection(ill->ill_ipst);
14839
14840 if (IS_LOOPBACK(ill) || ill->ill_net_type == IRE_IF_NORESOLVER) {
14841 nce_t *loop_nce = NULL;
14842 uint16_t flags = (NCE_F_MYADDR | NCE_F_AUTHORITY | NCE_F_NONUD);
14843
14844 /*
14845 * lo0:1 and subsequent ipifs were marked IRE_LOCAL in
14846 * ipif_lookup_on_name(), but in the case of zones we can have
14847 * several loopback addresses on lo0. So all the interfaces with
14848 * loopback addresses need to be marked IRE_LOOPBACK.
14849 */
14850 if (V4_PART_OF_V6(ipif->ipif_v6lcl_addr) ==
14851 htonl(INADDR_LOOPBACK))
14852 ipif->ipif_ire_type = IRE_LOOPBACK;
14853 else
14854 ipif->ipif_ire_type = IRE_LOCAL;
14855 if (ill->ill_net_type != IRE_LOOPBACK)
14856 flags |= NCE_F_PUBLISH;
14857
14858 /* add unicast nce for the local addr */
14859 err = nce_lookup_then_add_v4(ill, NULL,
14860 ill->ill_phys_addr_length, &ipif->ipif_lcl_addr, flags,
14861 ND_REACHABLE, &loop_nce);
14862 /* A shared-IP zone sees EEXIST for lo0:N */
14863 if (err == 0 || err == EEXIST) {
14864 ipif->ipif_added_nce = 1;
14865 loop_nce->nce_ipif_cnt++;
14866 nce_refrele(loop_nce);
14867 err = 0;
14868 } else {
14869 ASSERT(loop_nce == NULL);
14870 return (err);
14871 }
14872 }
14873
14874 /* Create all the IREs associated with this interface */
14875 err = ipif_add_ires_v4(ipif, loopback);
14876 if (err != 0) {
14877 /*
14878 * see comments about return value from
14879 * ip_addr_availability_check() in ipif_add_ires_v4().
14880 */
14881 if (err != EADDRINUSE) {
14882 (void) ipif_arp_down(ipif);
14883 } else {
14884 /*
14885 * Make IPMP aware of the deleted ipif so that
14886 * the needed ipmp cleanup (e.g., of ipif_bound_ill)
14887 * can be completed. Note that we do not want to
14888 * destroy the nce that was created on the ipmp_ill
14889 * for the active copy of the duplicate address in
14890 * use.
14891 */
14892 if (IS_IPMP(ill))
14893 ipmp_illgrp_del_ipif(ill->ill_grp, ipif);
14894 err = EADDRNOTAVAIL;
14895 }
14896 return (err);
14897 }
14898
14899 if (ill->ill_ipif_up_count == 1 && !loopback) {
14900 /* Recover any additional IREs entries for this ill */
14901 (void) ill_recover_saved_ire(ill);
14902 }
14903
14904 if (ill->ill_need_recover_multicast) {
14905 /*
14906 * Need to recover all multicast memberships in the driver.
14907 * This had to be deferred until we had attached. The same
14908 * code exists in ipif_up_done_v6() to recover IPv6
14909 * memberships.
14910 *
14911 * Note that it would be preferable to unconditionally do the
14912 * ill_recover_multicast() in ill_dl_up(), but we cannot do
14913 * that since ill_join_allmulti() depends on ill_dl_up being
14914 * set, and it is not set until we receive a DL_BIND_ACK after
14915 * having called ill_dl_up().
14916 */
14917 ill_recover_multicast(ill);
14918 }
14919
14920 if (ill->ill_ipif_up_count == 1) {
14921 /*
14922 * Since the interface is now up, it may now be active.
14923 */
14924 if (IS_UNDER_IPMP(ill))
14925 ipmp_ill_refresh_active(ill);
14926
14927 /*
14928 * If this is an IPMP interface, we may now be able to
14929 * establish ARP entries.
14930 */
14931 if (IS_IPMP(ill))
14932 ipmp_illgrp_refresh_arpent(ill->ill_grp);
14933 }
14934
14935 /* Join the allhosts multicast address */
14936 ipif_multicast_up(ipif);
14937
14938 if (!loopback && !update_src_selection &&
14939 !(ipif->ipif_flags & (IPIF_NOLOCAL|IPIF_ANYCAST|IPIF_DEPRECATED)))
14940 ip_update_source_selection(ill->ill_ipst);
14941
14942 if (!loopback && ipif->ipif_addr_ready) {
14943 /* Broadcast an address mask reply. */
14944 ipif_mask_reply(ipif);
14945 }
14946 /* Perhaps ilgs should use this ill */
14947 update_conn_ill(NULL, ill->ill_ipst);
14948
14949 /*
14950 * This had to be deferred until we had bound. Tell routing sockets and
14951 * others that this interface is up if it looks like the address has
14952 * been validated. Otherwise, if it isn't ready yet, wait for
14953 * duplicate address detection to do its thing.
14954 */
14955 if (ipif->ipif_addr_ready)
14956 ipif_up_notify(ipif);
14957 return (0);
14958 }
14959
14960 /*
14961 * Add the IREs associated with the ipif.
14962 * Those MUST be explicitly removed in ipif_delete_ires_v4.
14963 */
14964 static int
14965 ipif_add_ires_v4(ipif_t *ipif, boolean_t loopback)
14966 {
14967 ill_t *ill = ipif->ipif_ill;
14968 ip_stack_t *ipst = ill->ill_ipst;
14969 ire_t *ire_array[20];
14970 ire_t **irep = ire_array;
14971 ire_t **irep1;
14972 ipaddr_t net_mask = 0;
14973 ipaddr_t subnet_mask, route_mask;
14974 int err;
14975 ire_t *ire_local = NULL; /* LOCAL or LOOPBACK */
14976 ire_t *ire_if = NULL;
14977 uchar_t *gw;
14978
14979 if ((ipif->ipif_lcl_addr != INADDR_ANY) &&
14980 !(ipif->ipif_flags & IPIF_NOLOCAL)) {
14981 /*
14982 * If we're on a labeled system then make sure that zone-
14983 * private addresses have proper remote host database entries.
14984 */
14985 if (is_system_labeled() &&
14986 ipif->ipif_ire_type != IRE_LOOPBACK &&
14987 !tsol_check_interface_address(ipif))
14988 return (EINVAL);
14989
14990 /* Register the source address for __sin6_src_id */
14991 err = ip_srcid_insert(&ipif->ipif_v6lcl_addr,
14992 ipif->ipif_zoneid, ipst);
14993 if (err != 0) {
14994 ip0dbg(("ipif_add_ires: srcid_insert %d\n", err));
14995 return (err);
14996 }
14997
14998 if (loopback)
14999 gw = (uchar_t *)&ipif->ipif_lcl_addr;
15000 else
15001 gw = NULL;
15002
15003 /* If the interface address is set, create the local IRE. */
15004 ire_local = ire_create(
15005 (uchar_t *)&ipif->ipif_lcl_addr, /* dest address */
15006 (uchar_t *)&ip_g_all_ones, /* mask */
15007 gw, /* gateway */
15008 ipif->ipif_ire_type, /* LOCAL or LOOPBACK */
15009 ipif->ipif_ill,
15010 ipif->ipif_zoneid,
15011 ((ipif->ipif_flags & IPIF_PRIVATE) ?
15012 RTF_PRIVATE : 0) | RTF_KERNEL,
15013 NULL,
15014 ipst);
15015 ip1dbg(("ipif_add_ires: 0x%p creating IRE %p type 0x%x"
15016 " for 0x%x\n", (void *)ipif, (void *)ire_local,
15017 ipif->ipif_ire_type,
15018 ntohl(ipif->ipif_lcl_addr)));
15019 if (ire_local == NULL) {
15020 ip1dbg(("ipif_up_done: NULL ire_local\n"));
15021 err = ENOMEM;
15022 goto bad;
15023 }
15024 } else {
15025 ip1dbg((
15026 "ipif_add_ires: not creating IRE %d for 0x%x: flags 0x%x\n",
15027 ipif->ipif_ire_type,
15028 ntohl(ipif->ipif_lcl_addr),
15029 (uint_t)ipif->ipif_flags));
15030 }
15031 if ((ipif->ipif_lcl_addr != INADDR_ANY) &&
15032 !(ipif->ipif_flags & IPIF_NOLOCAL)) {
15033 net_mask = ip_net_mask(ipif->ipif_lcl_addr);
15034 } else {
15035 net_mask = htonl(IN_CLASSA_NET); /* fallback */
15036 }
15037
15038 subnet_mask = ipif->ipif_net_mask;
15039
15040 /*
15041 * If mask was not specified, use natural netmask of
15042 * interface address. Also, store this mask back into the
15043 * ipif struct.
15044 */
15045 if (subnet_mask == 0) {
15046 subnet_mask = net_mask;
15047 V4MASK_TO_V6(subnet_mask, ipif->ipif_v6net_mask);
15048 V6_MASK_COPY(ipif->ipif_v6lcl_addr, ipif->ipif_v6net_mask,
15049 ipif->ipif_v6subnet);
15050 }
15051
15052 /* Set up the IRE_IF_RESOLVER or IRE_IF_NORESOLVER, as appropriate. */
15053 if (!loopback && !(ipif->ipif_flags & IPIF_NOXMIT) &&
15054 ipif->ipif_subnet != INADDR_ANY) {
15055 /* ipif_subnet is ipif_pp_dst_addr for pt-pt */
15056
15057 if (ipif->ipif_flags & IPIF_POINTOPOINT) {
15058 route_mask = IP_HOST_MASK;
15059 } else {
15060 route_mask = subnet_mask;
15061 }
15062
15063 ip1dbg(("ipif_add_ires: ipif 0x%p ill 0x%p "
15064 "creating if IRE ill_net_type 0x%x for 0x%x\n",
15065 (void *)ipif, (void *)ill, ill->ill_net_type,
15066 ntohl(ipif->ipif_subnet)));
15067 ire_if = ire_create(
15068 (uchar_t *)&ipif->ipif_subnet,
15069 (uchar_t *)&route_mask,
15070 (uchar_t *)&ipif->ipif_lcl_addr,
15071 ill->ill_net_type,
15072 ill,
15073 ipif->ipif_zoneid,
15074 ((ipif->ipif_flags & IPIF_PRIVATE) ?
15075 RTF_PRIVATE: 0) | RTF_KERNEL,
15076 NULL,
15077 ipst);
15078 if (ire_if == NULL) {
15079 ip1dbg(("ipif_up_done: NULL ire_if\n"));
15080 err = ENOMEM;
15081 goto bad;
15082 }
15083 }
15084
15085 /*
15086 * Create any necessary broadcast IREs.
15087 */
15088 if ((ipif->ipif_flags & IPIF_BROADCAST) &&
15089 !(ipif->ipif_flags & IPIF_NOXMIT))
15090 irep = ipif_create_bcast_ires(ipif, irep);
15091
15092 /* If an earlier ire_create failed, get out now */
15093 for (irep1 = irep; irep1 > ire_array; ) {
15094 irep1--;
15095 if (*irep1 == NULL) {
15096 ip1dbg(("ipif_up_done: NULL ire found in ire_array\n"));
15097 err = ENOMEM;
15098 goto bad;
15099 }
15100 }
15101
15102 /*
15103 * Need to atomically check for IP address availability under
15104 * ip_addr_avail_lock. ill_g_lock is held as reader to ensure no new
15105 * ills or new ipifs can be added while we are checking availability.
15106 */
15107 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
15108 mutex_enter(&ipst->ips_ip_addr_avail_lock);
15109 /* Mark it up, and increment counters. */
15110 ipif->ipif_flags |= IPIF_UP;
15111 ill->ill_ipif_up_count++;
15112 err = ip_addr_availability_check(ipif);
15113 mutex_exit(&ipst->ips_ip_addr_avail_lock);
15114 rw_exit(&ipst->ips_ill_g_lock);
15115
15116 if (err != 0) {
15117 /*
15118 * Our address may already be up on the same ill. In this case,
15119 * the ARP entry for our ipif replaced the one for the other
15120 * ipif. So we don't want to delete it (otherwise the other ipif
15121 * would be unable to send packets).
15122 * ip_addr_availability_check() identifies this case for us and
15123 * returns EADDRINUSE; Caller should turn it into EADDRNOTAVAIL
15124 * which is the expected error code.
15125 */
15126 ill->ill_ipif_up_count--;
15127 ipif->ipif_flags &= ~IPIF_UP;
15128 goto bad;
15129 }
15130
15131 /*
15132 * Add in all newly created IREs. ire_create_bcast() has
15133 * already checked for duplicates of the IRE_BROADCAST type.
15134 * We add the IRE_INTERFACE before the IRE_LOCAL to ensure
15135 * that lookups find the IRE_LOCAL even if the IRE_INTERFACE is
15136 * a /32 route.
15137 */
15138 if (ire_if != NULL) {
15139 ire_if = ire_add(ire_if);
15140 if (ire_if == NULL) {
15141 err = ENOMEM;
15142 goto bad2;
15143 }
15144 #ifdef DEBUG
15145 ire_refhold_notr(ire_if);
15146 ire_refrele(ire_if);
15147 #endif
15148 }
15149 if (ire_local != NULL) {
15150 ire_local = ire_add(ire_local);
15151 if (ire_local == NULL) {
15152 err = ENOMEM;
15153 goto bad2;
15154 }
15155 #ifdef DEBUG
15156 ire_refhold_notr(ire_local);
15157 ire_refrele(ire_local);
15158 #endif
15159 }
15160 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
15161 if (ire_local != NULL)
15162 ipif->ipif_ire_local = ire_local;
15163 if (ire_if != NULL)
15164 ipif->ipif_ire_if = ire_if;
15165 rw_exit(&ipst->ips_ill_g_lock);
15166 ire_local = NULL;
15167 ire_if = NULL;
15168
15169 /*
15170 * We first add all of them, and if that succeeds we refrele the
15171 * bunch. That enables us to delete all of them should any of the
15172 * ire_adds fail.
15173 */
15174 for (irep1 = irep; irep1 > ire_array; ) {
15175 irep1--;
15176 ASSERT(!MUTEX_HELD(&((*irep1)->ire_ill->ill_lock)));
15177 *irep1 = ire_add(*irep1);
15178 if (*irep1 == NULL) {
15179 err = ENOMEM;
15180 goto bad2;
15181 }
15182 }
15183
15184 for (irep1 = irep; irep1 > ire_array; ) {
15185 irep1--;
15186 /* refheld by ire_add. */
15187 if (*irep1 != NULL) {
15188 ire_refrele(*irep1);
15189 *irep1 = NULL;
15190 }
15191 }
15192
15193 if (!loopback) {
15194 /*
15195 * If the broadcast address has been set, make sure it makes
15196 * sense based on the interface address.
15197 * Only match on ill since we are sharing broadcast addresses.
15198 */
15199 if ((ipif->ipif_brd_addr != INADDR_ANY) &&
15200 (ipif->ipif_flags & IPIF_BROADCAST)) {
15201 ire_t *ire;
15202
15203 ire = ire_ftable_lookup_v4(ipif->ipif_brd_addr, 0, 0,
15204 IRE_BROADCAST, ipif->ipif_ill, ALL_ZONES, NULL,
15205 (MATCH_IRE_TYPE | MATCH_IRE_ILL), 0, ipst, NULL);
15206
15207 if (ire == NULL) {
15208 /*
15209 * If there isn't a matching broadcast IRE,
15210 * revert to the default for this netmask.
15211 */
15212 ipif->ipif_v6brd_addr = ipv6_all_zeros;
15213 mutex_enter(&ipif->ipif_ill->ill_lock);
15214 ipif_set_default(ipif);
15215 mutex_exit(&ipif->ipif_ill->ill_lock);
15216 } else {
15217 ire_refrele(ire);
15218 }
15219 }
15220
15221 }
15222 return (0);
15223
15224 bad2:
15225 ill->ill_ipif_up_count--;
15226 ipif->ipif_flags &= ~IPIF_UP;
15227
15228 bad:
15229 ip1dbg(("ipif_add_ires: FAILED \n"));
15230 if (ire_local != NULL)
15231 ire_delete(ire_local);
15232 if (ire_if != NULL)
15233 ire_delete(ire_if);
15234
15235 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
15236 ire_local = ipif->ipif_ire_local;
15237 ipif->ipif_ire_local = NULL;
15238 ire_if = ipif->ipif_ire_if;
15239 ipif->ipif_ire_if = NULL;
15240 rw_exit(&ipst->ips_ill_g_lock);
15241 if (ire_local != NULL) {
15242 ire_delete(ire_local);
15243 ire_refrele_notr(ire_local);
15244 }
15245 if (ire_if != NULL) {
15246 ire_delete(ire_if);
15247 ire_refrele_notr(ire_if);
15248 }
15249
15250 while (irep > ire_array) {
15251 irep--;
15252 if (*irep != NULL) {
15253 ire_delete(*irep);
15254 }
15255 }
15256 (void) ip_srcid_remove(&ipif->ipif_v6lcl_addr, ipif->ipif_zoneid, ipst);
15257
15258 return (err);
15259 }
15260
15261 /* Remove all the IREs created by ipif_add_ires_v4 */
15262 void
15263 ipif_delete_ires_v4(ipif_t *ipif)
15264 {
15265 ill_t *ill = ipif->ipif_ill;
15266 ip_stack_t *ipst = ill->ill_ipst;
15267 ire_t *ire;
15268
15269 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
15270 ire = ipif->ipif_ire_local;
15271 ipif->ipif_ire_local = NULL;
15272 rw_exit(&ipst->ips_ill_g_lock);
15273 if (ire != NULL) {
15274 /*
15275 * Move count to ipif so we don't loose the count due to
15276 * a down/up dance.
15277 */
15278 atomic_add_32(&ipif->ipif_ib_pkt_count, ire->ire_ib_pkt_count);
15279
15280 ire_delete(ire);
15281 ire_refrele_notr(ire);
15282 }
15283 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
15284 ire = ipif->ipif_ire_if;
15285 ipif->ipif_ire_if = NULL;
15286 rw_exit(&ipst->ips_ill_g_lock);
15287 if (ire != NULL) {
15288 ire_delete(ire);
15289 ire_refrele_notr(ire);
15290 }
15291
15292 /*
15293 * Delete the broadcast IREs.
15294 */
15295 if ((ipif->ipif_flags & IPIF_BROADCAST) &&
15296 !(ipif->ipif_flags & IPIF_NOXMIT))
15297 ipif_delete_bcast_ires(ipif);
15298 }
15299
15300 /*
15301 * Checks for availbility of a usable source address (if there is one) when the
15302 * destination ILL has the ill_usesrc_ifindex pointing to another ILL. Note
15303 * this selection is done regardless of the destination.
15304 */
15305 boolean_t
15306 ipif_zone_avail(uint_t ifindex, boolean_t isv6, zoneid_t zoneid,
15307 ip_stack_t *ipst)
15308 {
15309 ipif_t *ipif = NULL;
15310 ill_t *uill;
15311
15312 ASSERT(ifindex != 0);
15313
15314 uill = ill_lookup_on_ifindex(ifindex, isv6, ipst);
15315 if (uill == NULL)
15316 return (B_FALSE);
15317
15318 mutex_enter(&uill->ill_lock);
15319 for (ipif = uill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
15320 if (IPIF_IS_CONDEMNED(ipif))
15321 continue;
15322 if (ipif->ipif_flags & (IPIF_NOLOCAL|IPIF_ANYCAST))
15323 continue;
15324 if (!(ipif->ipif_flags & IPIF_UP))
15325 continue;
15326 if (ipif->ipif_zoneid != zoneid)
15327 continue;
15328 if (isv6 ? IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr) :
15329 ipif->ipif_lcl_addr == INADDR_ANY)
15330 continue;
15331 mutex_exit(&uill->ill_lock);
15332 ill_refrele(uill);
15333 return (B_TRUE);
15334 }
15335 mutex_exit(&uill->ill_lock);
15336 ill_refrele(uill);
15337 return (B_FALSE);
15338 }
15339
15340 /*
15341 * Find an ipif with a good local address on the ill+zoneid.
15342 */
15343 ipif_t *
15344 ipif_good_addr(ill_t *ill, zoneid_t zoneid)
15345 {
15346 ipif_t *ipif;
15347
15348 mutex_enter(&ill->ill_lock);
15349 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
15350 if (IPIF_IS_CONDEMNED(ipif))
15351 continue;
15352 if (ipif->ipif_flags & (IPIF_NOLOCAL|IPIF_ANYCAST))
15353 continue;
15354 if (!(ipif->ipif_flags & IPIF_UP))
15355 continue;
15356 if (ipif->ipif_zoneid != zoneid &&
15357 ipif->ipif_zoneid != ALL_ZONES && zoneid != ALL_ZONES)
15358 continue;
15359 if (ill->ill_isv6 ?
15360 IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr) :
15361 ipif->ipif_lcl_addr == INADDR_ANY)
15362 continue;
15363 ipif_refhold_locked(ipif);
15364 mutex_exit(&ill->ill_lock);
15365 return (ipif);
15366 }
15367 mutex_exit(&ill->ill_lock);
15368 return (NULL);
15369 }
15370
15371 /*
15372 * IP source address type, sorted from worst to best. For a given type,
15373 * always prefer IP addresses on the same subnet. All-zones addresses are
15374 * suboptimal because they pose problems with unlabeled destinations.
15375 */
15376 typedef enum {
15377 IPIF_NONE,
15378 IPIF_DIFFNET_DEPRECATED, /* deprecated and different subnet */
15379 IPIF_SAMENET_DEPRECATED, /* deprecated and same subnet */
15380 IPIF_DIFFNET_ALLZONES, /* allzones and different subnet */
15381 IPIF_SAMENET_ALLZONES, /* allzones and same subnet */
15382 IPIF_DIFFNET, /* normal and different subnet */
15383 IPIF_SAMENET, /* normal and same subnet */
15384 IPIF_LOCALADDR /* local loopback */
15385 } ipif_type_t;
15386
15387 /*
15388 * Pick the optimal ipif on `ill' for sending to destination `dst' from zone
15389 * `zoneid'. We rate usable ipifs from low -> high as per the ipif_type_t
15390 * enumeration, and return the highest-rated ipif. If there's a tie, we pick
15391 * the first one, unless IPMP is used in which case we round-robin among them;
15392 * see below for more.
15393 *
15394 * Returns NULL if there is no suitable source address for the ill.
15395 * This only occurs when there is no valid source address for the ill.
15396 */
15397 ipif_t *
15398 ipif_select_source_v4(ill_t *ill, ipaddr_t dst, zoneid_t zoneid,
15399 boolean_t allow_usesrc, boolean_t *notreadyp)
15400 {
15401 ill_t *usill = NULL;
15402 ill_t *ipmp_ill = NULL;
15403 ipif_t *start_ipif, *next_ipif, *ipif, *best_ipif;
15404 ipif_type_t type, best_type;
15405 tsol_tpc_t *src_rhtp, *dst_rhtp;
15406 ip_stack_t *ipst = ill->ill_ipst;
15407 boolean_t samenet;
15408
15409 if (ill->ill_usesrc_ifindex != 0 && allow_usesrc) {
15410 usill = ill_lookup_on_ifindex(ill->ill_usesrc_ifindex,
15411 B_FALSE, ipst);
15412 if (usill != NULL)
15413 ill = usill; /* Select source from usesrc ILL */
15414 else
15415 return (NULL);
15416 }
15417
15418 /*
15419 * Test addresses should never be used for source address selection,
15420 * so if we were passed one, switch to the IPMP meta-interface.
15421 */
15422 if (IS_UNDER_IPMP(ill)) {
15423 if ((ipmp_ill = ipmp_ill_hold_ipmp_ill(ill)) != NULL)
15424 ill = ipmp_ill; /* Select source from IPMP ill */
15425 else
15426 return (NULL);
15427 }
15428
15429 /*
15430 * If we're dealing with an unlabeled destination on a labeled system,
15431 * make sure that we ignore source addresses that are incompatible with
15432 * the destination's default label. That destination's default label
15433 * must dominate the minimum label on the source address.
15434 */
15435 dst_rhtp = NULL;
15436 if (is_system_labeled()) {
15437 dst_rhtp = find_tpc(&dst, IPV4_VERSION, B_FALSE);
15438 if (dst_rhtp == NULL)
15439 return (NULL);
15440 if (dst_rhtp->tpc_tp.host_type != UNLABELED) {
15441 TPC_RELE(dst_rhtp);
15442 dst_rhtp = NULL;
15443 }
15444 }
15445
15446 /*
15447 * Hold the ill_g_lock as reader. This makes sure that no ipif/ill
15448 * can be deleted. But an ipif/ill can get CONDEMNED any time.
15449 * After selecting the right ipif, under ill_lock make sure ipif is
15450 * not condemned, and increment refcnt. If ipif is CONDEMNED,
15451 * we retry. Inside the loop we still need to check for CONDEMNED,
15452 * but not under a lock.
15453 */
15454 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
15455 retry:
15456 /*
15457 * For source address selection, we treat the ipif list as circular
15458 * and continue until we get back to where we started. This allows
15459 * IPMP to vary source address selection (which improves inbound load
15460 * spreading) by caching its last ending point and starting from
15461 * there. NOTE: we don't have to worry about ill_src_ipif changing
15462 * ills since that can't happen on the IPMP ill.
15463 */
15464 start_ipif = ill->ill_ipif;
15465 if (IS_IPMP(ill) && ill->ill_src_ipif != NULL)
15466 start_ipif = ill->ill_src_ipif;
15467
15468 ipif = start_ipif;
15469 best_ipif = NULL;
15470 best_type = IPIF_NONE;
15471 do {
15472 if ((next_ipif = ipif->ipif_next) == NULL)
15473 next_ipif = ill->ill_ipif;
15474
15475 if (IPIF_IS_CONDEMNED(ipif))
15476 continue;
15477 /* Always skip NOLOCAL and ANYCAST interfaces */
15478 if (ipif->ipif_flags & (IPIF_NOLOCAL|IPIF_ANYCAST))
15479 continue;
15480 /* Always skip NOACCEPT interfaces */
15481 if (ipif->ipif_ill->ill_flags & ILLF_NOACCEPT)
15482 continue;
15483 if (!(ipif->ipif_flags & IPIF_UP))
15484 continue;
15485
15486 if (!ipif->ipif_addr_ready) {
15487 if (notreadyp != NULL)
15488 *notreadyp = B_TRUE;
15489 continue;
15490 }
15491
15492 if (zoneid != ALL_ZONES &&
15493 ipif->ipif_zoneid != zoneid &&
15494 ipif->ipif_zoneid != ALL_ZONES)
15495 continue;
15496
15497 /*
15498 * Interfaces with 0.0.0.0 address are allowed to be UP, but
15499 * are not valid as source addresses.
15500 */
15501 if (ipif->ipif_lcl_addr == INADDR_ANY)
15502 continue;
15503
15504 /*
15505 * Check compatibility of local address for destination's
15506 * default label if we're on a labeled system. Incompatible
15507 * addresses can't be used at all.
15508 */
15509 if (dst_rhtp != NULL) {
15510 boolean_t incompat;
15511
15512 src_rhtp = find_tpc(&ipif->ipif_lcl_addr,
15513 IPV4_VERSION, B_FALSE);
15514 if (src_rhtp == NULL)
15515 continue;
15516 incompat = src_rhtp->tpc_tp.host_type != SUN_CIPSO ||
15517 src_rhtp->tpc_tp.tp_doi !=
15518 dst_rhtp->tpc_tp.tp_doi ||
15519 (!_blinrange(&dst_rhtp->tpc_tp.tp_def_label,
15520 &src_rhtp->tpc_tp.tp_sl_range_cipso) &&
15521 !blinlset(&dst_rhtp->tpc_tp.tp_def_label,
15522 src_rhtp->tpc_tp.tp_sl_set_cipso));
15523 TPC_RELE(src_rhtp);
15524 if (incompat)
15525 continue;
15526 }
15527
15528 samenet = ((ipif->ipif_net_mask & dst) == ipif->ipif_subnet);
15529
15530 if (ipif->ipif_lcl_addr == dst) {
15531 type = IPIF_LOCALADDR;
15532 } else if (ipif->ipif_flags & IPIF_DEPRECATED) {
15533 type = samenet ? IPIF_SAMENET_DEPRECATED :
15534 IPIF_DIFFNET_DEPRECATED;
15535 } else if (ipif->ipif_zoneid == ALL_ZONES) {
15536 type = samenet ? IPIF_SAMENET_ALLZONES :
15537 IPIF_DIFFNET_ALLZONES;
15538 } else {
15539 type = samenet ? IPIF_SAMENET : IPIF_DIFFNET;
15540 }
15541
15542 if (type > best_type) {
15543 best_type = type;
15544 best_ipif = ipif;
15545 if (best_type == IPIF_LOCALADDR)
15546 break; /* can't get better */
15547 }
15548 } while ((ipif = next_ipif) != start_ipif);
15549
15550 if ((ipif = best_ipif) != NULL) {
15551 mutex_enter(&ipif->ipif_ill->ill_lock);
15552 if (IPIF_IS_CONDEMNED(ipif)) {
15553 mutex_exit(&ipif->ipif_ill->ill_lock);
15554 goto retry;
15555 }
15556 ipif_refhold_locked(ipif);
15557
15558 /*
15559 * For IPMP, update the source ipif rotor to the next ipif,
15560 * provided we can look it up. (We must not use it if it's
15561 * IPIF_CONDEMNED since we may have grabbed ill_g_lock after
15562 * ipif_free() checked ill_src_ipif.)
15563 */
15564 if (IS_IPMP(ill) && ipif != NULL) {
15565 next_ipif = ipif->ipif_next;
15566 if (next_ipif != NULL && !IPIF_IS_CONDEMNED(next_ipif))
15567 ill->ill_src_ipif = next_ipif;
15568 else
15569 ill->ill_src_ipif = NULL;
15570 }
15571 mutex_exit(&ipif->ipif_ill->ill_lock);
15572 }
15573
15574 rw_exit(&ipst->ips_ill_g_lock);
15575 if (usill != NULL)
15576 ill_refrele(usill);
15577 if (ipmp_ill != NULL)
15578 ill_refrele(ipmp_ill);
15579 if (dst_rhtp != NULL)
15580 TPC_RELE(dst_rhtp);
15581
15582 #ifdef DEBUG
15583 if (ipif == NULL) {
15584 char buf1[INET6_ADDRSTRLEN];
15585
15586 ip1dbg(("ipif_select_source_v4(%s, %s) -> NULL\n",
15587 ill->ill_name,
15588 inet_ntop(AF_INET, &dst, buf1, sizeof (buf1))));
15589 } else {
15590 char buf1[INET6_ADDRSTRLEN];
15591 char buf2[INET6_ADDRSTRLEN];
15592
15593 ip1dbg(("ipif_select_source_v4(%s, %s) -> %s\n",
15594 ipif->ipif_ill->ill_name,
15595 inet_ntop(AF_INET, &dst, buf1, sizeof (buf1)),
15596 inet_ntop(AF_INET, &ipif->ipif_lcl_addr,
15597 buf2, sizeof (buf2))));
15598 }
15599 #endif /* DEBUG */
15600 return (ipif);
15601 }
15602
15603 /*
15604 * Pick a source address based on the destination ill and an optional setsrc
15605 * address.
15606 * The result is stored in srcp. If generation is set, then put the source
15607 * generation number there before we look for the source address (to avoid
15608 * missing changes in the set of source addresses.
15609 * If flagsp is set, then us it to pass back ipif_flags.
15610 *
15611 * If the caller wants to cache the returned source address and detect when
15612 * that might be stale, the caller should pass in a generation argument,
15613 * which the caller can later compare against ips_src_generation
15614 *
15615 * The precedence order for selecting an IPv4 source address is:
15616 * - RTF_SETSRC on the offlink ire always wins.
15617 * - If usrsrc is set, swap the ill to be the usesrc one.
15618 * - If IPMP is used on the ill, select a random address from the most
15619 * preferred ones below:
15620 * 1. If onlink destination, same subnet and not deprecated, not ALL_ZONES
15621 * 2. Not deprecated, not ALL_ZONES
15622 * 3. If onlink destination, same subnet and not deprecated, ALL_ZONES
15623 * 4. Not deprecated, ALL_ZONES
15624 * 5. If onlink destination, same subnet and deprecated
15625 * 6. Deprecated.
15626 *
15627 * We have lower preference for ALL_ZONES IP addresses,
15628 * as they pose problems with unlabeled destinations.
15629 *
15630 * Note that when multiple IP addresses match e.g., #1 we pick
15631 * the first one if IPMP is not in use. With IPMP we randomize.
15632 */
15633 int
15634 ip_select_source_v4(ill_t *ill, ipaddr_t setsrc, ipaddr_t dst,
15635 ipaddr_t multicast_ifaddr,
15636 zoneid_t zoneid, ip_stack_t *ipst, ipaddr_t *srcp,
15637 uint32_t *generation, uint64_t *flagsp)
15638 {
15639 ipif_t *ipif;
15640 boolean_t notready = B_FALSE; /* Set if !ipif_addr_ready found */
15641
15642 if (flagsp != NULL)
15643 *flagsp = 0;
15644
15645 /*
15646 * Need to grab the generation number before we check to
15647 * avoid a race with a change to the set of local addresses.
15648 * No lock needed since the thread which updates the set of local
15649 * addresses use ipif/ill locks and exit those (hence a store memory
15650 * barrier) before doing the atomic increase of ips_src_generation.
15651 */
15652 if (generation != NULL) {
15653 *generation = ipst->ips_src_generation;
15654 }
15655
15656 if (CLASSD(dst) && multicast_ifaddr != INADDR_ANY) {
15657 *srcp = multicast_ifaddr;
15658 return (0);
15659 }
15660
15661 /* Was RTF_SETSRC set on the first IRE in the recursive lookup? */
15662 if (setsrc != INADDR_ANY) {
15663 *srcp = setsrc;
15664 return (0);
15665 }
15666 ipif = ipif_select_source_v4(ill, dst, zoneid, B_TRUE, ¬ready);
15667 if (ipif == NULL) {
15668 if (notready)
15669 return (ENETDOWN);
15670 else
15671 return (EADDRNOTAVAIL);
15672 }
15673 *srcp = ipif->ipif_lcl_addr;
15674 if (flagsp != NULL)
15675 *flagsp = ipif->ipif_flags;
15676 ipif_refrele(ipif);
15677 return (0);
15678 }
15679
15680 /* ARGSUSED */
15681 int
15682 if_unitsel_restart(ipif_t *ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
15683 ip_ioctl_cmd_t *ipip, void *dummy_ifreq)
15684 {
15685 /*
15686 * ill_phyint_reinit merged the v4 and v6 into a single
15687 * ipsq. We might not have been able to complete the
15688 * operation in ipif_set_values, if we could not become
15689 * exclusive. If so restart it here.
15690 */
15691 return (ipif_set_values_tail(ipif->ipif_ill, ipif, mp, q));
15692 }
15693
15694 /*
15695 * Can operate on either a module or a driver queue.
15696 * Returns an error if not a module queue.
15697 */
15698 /* ARGSUSED */
15699 int
15700 if_unitsel(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
15701 ip_ioctl_cmd_t *ipip, void *dummy_ifreq)
15702 {
15703 queue_t *q1 = q;
15704 char *cp;
15705 char interf_name[LIFNAMSIZ];
15706 uint_t ppa = *(uint_t *)mp->b_cont->b_cont->b_rptr;
15707
15708 if (q->q_next == NULL) {
15709 ip1dbg((
15710 "if_unitsel: IF_UNITSEL: no q_next\n"));
15711 return (EINVAL);
15712 }
15713
15714 if (((ill_t *)(q->q_ptr))->ill_name[0] != '\0')
15715 return (EALREADY);
15716
15717 do {
15718 q1 = q1->q_next;
15719 } while (q1->q_next);
15720 cp = q1->q_qinfo->qi_minfo->mi_idname;
15721 (void) sprintf(interf_name, "%s%d", cp, ppa);
15722
15723 /*
15724 * Here we are not going to delay the ioack until after
15725 * ACKs from DL_ATTACH_REQ/DL_BIND_REQ. So no need to save the
15726 * original ioctl message before sending the requests.
15727 */
15728 return (ipif_set_values(q, mp, interf_name, &ppa));
15729 }
15730
15731 /* ARGSUSED */
15732 int
15733 ip_sioctl_sifname(ipif_t *dummy_ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
15734 ip_ioctl_cmd_t *ipip, void *dummy_ifreq)
15735 {
15736 return (ENXIO);
15737 }
15738
15739 /*
15740 * Create any IRE_BROADCAST entries for `ipif', and store those entries in
15741 * `irep'. Returns a pointer to the next free `irep' entry
15742 * A mirror exists in ipif_delete_bcast_ires().
15743 *
15744 * The management of any "extra" or seemingly duplicate IRE_BROADCASTs is
15745 * done in ire_add.
15746 */
15747 static ire_t **
15748 ipif_create_bcast_ires(ipif_t *ipif, ire_t **irep)
15749 {
15750 ipaddr_t addr;
15751 ipaddr_t netmask = ip_net_mask(ipif->ipif_lcl_addr);
15752 ipaddr_t subnetmask = ipif->ipif_net_mask;
15753 ill_t *ill = ipif->ipif_ill;
15754 zoneid_t zoneid = ipif->ipif_zoneid;
15755
15756 ip1dbg(("ipif_create_bcast_ires: creating broadcast IREs\n"));
15757
15758 ASSERT(ipif->ipif_flags & IPIF_BROADCAST);
15759 ASSERT(!(ipif->ipif_flags & IPIF_NOXMIT));
15760
15761 if (ipif->ipif_lcl_addr == INADDR_ANY ||
15762 (ipif->ipif_flags & IPIF_NOLOCAL))
15763 netmask = htonl(IN_CLASSA_NET); /* fallback */
15764
15765 irep = ire_create_bcast(ill, 0, zoneid, irep);
15766 irep = ire_create_bcast(ill, INADDR_BROADCAST, zoneid, irep);
15767
15768 /*
15769 * For backward compatibility, we create net broadcast IREs based on
15770 * the old "IP address class system", since some old machines only
15771 * respond to these class derived net broadcast. However, we must not
15772 * create these net broadcast IREs if the subnetmask is shorter than
15773 * the IP address class based derived netmask. Otherwise, we may
15774 * create a net broadcast address which is the same as an IP address
15775 * on the subnet -- and then TCP will refuse to talk to that address.
15776 */
15777 if (netmask < subnetmask) {
15778 addr = netmask & ipif->ipif_subnet;
15779 irep = ire_create_bcast(ill, addr, zoneid, irep);
15780 irep = ire_create_bcast(ill, ~netmask | addr, zoneid, irep);
15781 }
15782
15783 /*
15784 * Don't create IRE_BROADCAST IREs for the interface if the subnetmask
15785 * is 0xFFFFFFFF, as an IRE_LOCAL for that interface is already
15786 * created. Creating these broadcast IREs will only create confusion
15787 * as `addr' will be the same as the IP address.
15788 */
15789 if (subnetmask != 0xFFFFFFFF) {
15790 addr = ipif->ipif_subnet;
15791 irep = ire_create_bcast(ill, addr, zoneid, irep);
15792 irep = ire_create_bcast(ill, ~subnetmask | addr, zoneid, irep);
15793 }
15794
15795 return (irep);
15796 }
15797
15798 /*
15799 * Mirror of ipif_create_bcast_ires()
15800 */
15801 static void
15802 ipif_delete_bcast_ires(ipif_t *ipif)
15803 {
15804 ipaddr_t addr;
15805 ipaddr_t netmask = ip_net_mask(ipif->ipif_lcl_addr);
15806 ipaddr_t subnetmask = ipif->ipif_net_mask;
15807 ill_t *ill = ipif->ipif_ill;
15808 zoneid_t zoneid = ipif->ipif_zoneid;
15809 ire_t *ire;
15810
15811 ASSERT(ipif->ipif_flags & IPIF_BROADCAST);
15812 ASSERT(!(ipif->ipif_flags & IPIF_NOXMIT));
15813
15814 if (ipif->ipif_lcl_addr == INADDR_ANY ||
15815 (ipif->ipif_flags & IPIF_NOLOCAL))
15816 netmask = htonl(IN_CLASSA_NET); /* fallback */
15817
15818 ire = ire_lookup_bcast(ill, 0, zoneid);
15819 ASSERT(ire != NULL);
15820 ire_delete(ire); ire_refrele(ire);
15821 ire = ire_lookup_bcast(ill, INADDR_BROADCAST, zoneid);
15822 ASSERT(ire != NULL);
15823 ire_delete(ire); ire_refrele(ire);
15824
15825 /*
15826 * For backward compatibility, we create net broadcast IREs based on
15827 * the old "IP address class system", since some old machines only
15828 * respond to these class derived net broadcast. However, we must not
15829 * create these net broadcast IREs if the subnetmask is shorter than
15830 * the IP address class based derived netmask. Otherwise, we may
15831 * create a net broadcast address which is the same as an IP address
15832 * on the subnet -- and then TCP will refuse to talk to that address.
15833 */
15834 if (netmask < subnetmask) {
15835 addr = netmask & ipif->ipif_subnet;
15836 ire = ire_lookup_bcast(ill, addr, zoneid);
15837 ASSERT(ire != NULL);
15838 ire_delete(ire); ire_refrele(ire);
15839 ire = ire_lookup_bcast(ill, ~netmask | addr, zoneid);
15840 ASSERT(ire != NULL);
15841 ire_delete(ire); ire_refrele(ire);
15842 }
15843
15844 /*
15845 * Don't create IRE_BROADCAST IREs for the interface if the subnetmask
15846 * is 0xFFFFFFFF, as an IRE_LOCAL for that interface is already
15847 * created. Creating these broadcast IREs will only create confusion
15848 * as `addr' will be the same as the IP address.
15849 */
15850 if (subnetmask != 0xFFFFFFFF) {
15851 addr = ipif->ipif_subnet;
15852 ire = ire_lookup_bcast(ill, addr, zoneid);
15853 ASSERT(ire != NULL);
15854 ire_delete(ire); ire_refrele(ire);
15855 ire = ire_lookup_bcast(ill, ~subnetmask | addr, zoneid);
15856 ASSERT(ire != NULL);
15857 ire_delete(ire); ire_refrele(ire);
15858 }
15859 }
15860
15861 /*
15862 * Extract both the flags (including IFF_CANTCHANGE) such as IFF_IPV*
15863 * from lifr_flags and the name from lifr_name.
15864 * Set IFF_IPV* and ill_isv6 prior to doing the lookup
15865 * since ipif_lookup_on_name uses the _isv6 flags when matching.
15866 * Returns EINPROGRESS when mp has been consumed by queueing it on
15867 * ipx_pending_mp and the ioctl will complete in ip_rput.
15868 *
15869 * Can operate on either a module or a driver queue.
15870 * Returns an error if not a module queue.
15871 */
15872 /* ARGSUSED */
15873 int
15874 ip_sioctl_slifname(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
15875 ip_ioctl_cmd_t *ipip, void *if_req)
15876 {
15877 ill_t *ill = q->q_ptr;
15878 phyint_t *phyi;
15879 ip_stack_t *ipst;
15880 struct lifreq *lifr = if_req;
15881 uint64_t new_flags;
15882
15883 ASSERT(ipif != NULL);
15884 ip1dbg(("ip_sioctl_slifname %s\n", lifr->lifr_name));
15885
15886 if (q->q_next == NULL) {
15887 ip1dbg(("if_sioctl_slifname: SIOCSLIFNAME: no q_next\n"));
15888 return (EINVAL);
15889 }
15890
15891 /*
15892 * If we are not writer on 'q' then this interface exists already
15893 * and previous lookups (ip_extract_lifreq()) found this ipif --
15894 * so return EALREADY.
15895 */
15896 if (ill != ipif->ipif_ill)
15897 return (EALREADY);
15898
15899 if (ill->ill_name[0] != '\0')
15900 return (EALREADY);
15901
15902 /*
15903 * If there's another ill already with the requested name, ensure
15904 * that it's of the same type. Otherwise, ill_phyint_reinit() will
15905 * fuse together two unrelated ills, which will cause chaos.
15906 */
15907 ipst = ill->ill_ipst;
15908 phyi = avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
15909 lifr->lifr_name, NULL);
15910 if (phyi != NULL) {
15911 ill_t *ill_mate = phyi->phyint_illv4;
15912
15913 if (ill_mate == NULL)
15914 ill_mate = phyi->phyint_illv6;
15915 ASSERT(ill_mate != NULL);
15916
15917 if (ill_mate->ill_media->ip_m_mac_type !=
15918 ill->ill_media->ip_m_mac_type) {
15919 ip1dbg(("if_sioctl_slifname: SIOCSLIFNAME: attempt to "
15920 "use the same ill name on differing media\n"));
15921 return (EINVAL);
15922 }
15923 }
15924
15925 /*
15926 * We start off as IFF_IPV4 in ipif_allocate and become
15927 * IFF_IPV4 or IFF_IPV6 here depending on lifr_flags value.
15928 * The only flags that we read from user space are IFF_IPV4,
15929 * IFF_IPV6, and IFF_BROADCAST.
15930 *
15931 * This ill has not been inserted into the global list.
15932 * So we are still single threaded and don't need any lock
15933 *
15934 * Saniy check the flags.
15935 */
15936
15937 if ((lifr->lifr_flags & IFF_BROADCAST) &&
15938 ((lifr->lifr_flags & IFF_IPV6) ||
15939 (!ill->ill_needs_attach && ill->ill_bcast_addr_length == 0))) {
15940 ip1dbg(("ip_sioctl_slifname: link not broadcast capable "
15941 "or IPv6 i.e., no broadcast \n"));
15942 return (EINVAL);
15943 }
15944
15945 new_flags =
15946 lifr->lifr_flags & (IFF_IPV6|IFF_IPV4|IFF_BROADCAST);
15947
15948 if ((new_flags ^ (IFF_IPV6|IFF_IPV4)) == 0) {
15949 ip1dbg(("ip_sioctl_slifname: flags must be exactly one of "
15950 "IFF_IPV4 or IFF_IPV6\n"));
15951 return (EINVAL);
15952 }
15953
15954 /*
15955 * We always start off as IPv4, so only need to check for IPv6.
15956 */
15957 if ((new_flags & IFF_IPV6) != 0) {
15958 ill->ill_flags |= ILLF_IPV6;
15959 ill->ill_flags &= ~ILLF_IPV4;
15960
15961 if (lifr->lifr_flags & IFF_NOLINKLOCAL)
15962 ill->ill_flags |= ILLF_NOLINKLOCAL;
15963 }
15964
15965 if ((new_flags & IFF_BROADCAST) != 0)
15966 ipif->ipif_flags |= IPIF_BROADCAST;
15967 else
15968 ipif->ipif_flags &= ~IPIF_BROADCAST;
15969
15970 /* We started off as V4. */
15971 if (ill->ill_flags & ILLF_IPV6) {
15972 ill->ill_phyint->phyint_illv6 = ill;
15973 ill->ill_phyint->phyint_illv4 = NULL;
15974 }
15975
15976 return (ipif_set_values(q, mp, lifr->lifr_name, &lifr->lifr_ppa));
15977 }
15978
15979 /* ARGSUSED */
15980 int
15981 ip_sioctl_slifname_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
15982 ip_ioctl_cmd_t *ipip, void *if_req)
15983 {
15984 /*
15985 * ill_phyint_reinit merged the v4 and v6 into a single
15986 * ipsq. We might not have been able to complete the
15987 * slifname in ipif_set_values, if we could not become
15988 * exclusive. If so restart it here
15989 */
15990 return (ipif_set_values_tail(ipif->ipif_ill, ipif, mp, q));
15991 }
15992
15993 /*
15994 * Return a pointer to the ipif which matches the index, IP version type and
15995 * zoneid.
15996 */
15997 ipif_t *
15998 ipif_lookup_on_ifindex(uint_t index, boolean_t isv6, zoneid_t zoneid,
15999 ip_stack_t *ipst)
16000 {
16001 ill_t *ill;
16002 ipif_t *ipif = NULL;
16003
16004 ill = ill_lookup_on_ifindex(index, isv6, ipst);
16005 if (ill != NULL) {
16006 mutex_enter(&ill->ill_lock);
16007 for (ipif = ill->ill_ipif; ipif != NULL;
16008 ipif = ipif->ipif_next) {
16009 if (!IPIF_IS_CONDEMNED(ipif) && (zoneid == ALL_ZONES ||
16010 zoneid == ipif->ipif_zoneid ||
16011 ipif->ipif_zoneid == ALL_ZONES)) {
16012 ipif_refhold_locked(ipif);
16013 break;
16014 }
16015 }
16016 mutex_exit(&ill->ill_lock);
16017 ill_refrele(ill);
16018 }
16019 return (ipif);
16020 }
16021
16022 /*
16023 * Change an existing physical interface's index. If the new index
16024 * is acceptable we update the index and the phyint_list_avl_by_index tree.
16025 * Finally, we update other systems which may have a dependence on the
16026 * index value.
16027 */
16028 /* ARGSUSED */
16029 int
16030 ip_sioctl_slifindex(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16031 ip_ioctl_cmd_t *ipip, void *ifreq)
16032 {
16033 ill_t *ill;
16034 phyint_t *phyi;
16035 struct ifreq *ifr = (struct ifreq *)ifreq;
16036 struct lifreq *lifr = (struct lifreq *)ifreq;
16037 uint_t old_index, index;
16038 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
16039 avl_index_t where;
16040
16041 if (ipip->ipi_cmd_type == IF_CMD)
16042 index = ifr->ifr_index;
16043 else
16044 index = lifr->lifr_index;
16045
16046 /*
16047 * Only allow on physical interface. Also, index zero is illegal.
16048 */
16049 ill = ipif->ipif_ill;
16050 phyi = ill->ill_phyint;
16051 if (ipif->ipif_id != 0 || index == 0 || index > IF_INDEX_MAX) {
16052 return (EINVAL);
16053 }
16054
16055 /* If the index is not changing, no work to do */
16056 if (phyi->phyint_ifindex == index)
16057 return (0);
16058
16059 /*
16060 * Use phyint_exists() to determine if the new interface index
16061 * is already in use. If the index is unused then we need to
16062 * change the phyint's position in the phyint_list_avl_by_index
16063 * tree. If we do not do this, subsequent lookups (using the new
16064 * index value) will not find the phyint.
16065 */
16066 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
16067 if (phyint_exists(index, ipst)) {
16068 rw_exit(&ipst->ips_ill_g_lock);
16069 return (EEXIST);
16070 }
16071
16072 /*
16073 * The new index is unused. Set it in the phyint. However we must not
16074 * forget to trigger NE_IFINDEX_CHANGE event before the ifindex
16075 * changes. The event must be bound to old ifindex value.
16076 */
16077 ill_nic_event_dispatch(ill, 0, NE_IFINDEX_CHANGE,
16078 &index, sizeof (index));
16079
16080 old_index = phyi->phyint_ifindex;
16081 phyi->phyint_ifindex = index;
16082
16083 avl_remove(&ipst->ips_phyint_g_list->phyint_list_avl_by_index, phyi);
16084 (void) avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
16085 &index, &where);
16086 avl_insert(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
16087 phyi, where);
16088 rw_exit(&ipst->ips_ill_g_lock);
16089
16090 /* Update SCTP's ILL list */
16091 sctp_ill_reindex(ill, old_index);
16092
16093 /* Send the routing sockets message */
16094 ip_rts_ifmsg(ipif, RTSQ_DEFAULT);
16095 if (ILL_OTHER(ill))
16096 ip_rts_ifmsg(ILL_OTHER(ill)->ill_ipif, RTSQ_DEFAULT);
16097
16098 /* Perhaps ilgs should use this ill */
16099 update_conn_ill(NULL, ill->ill_ipst);
16100 return (0);
16101 }
16102
16103 /* ARGSUSED */
16104 int
16105 ip_sioctl_get_lifindex(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16106 ip_ioctl_cmd_t *ipip, void *ifreq)
16107 {
16108 struct ifreq *ifr = (struct ifreq *)ifreq;
16109 struct lifreq *lifr = (struct lifreq *)ifreq;
16110
16111 ip1dbg(("ip_sioctl_get_lifindex(%s:%u %p)\n",
16112 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
16113 /* Get the interface index */
16114 if (ipip->ipi_cmd_type == IF_CMD) {
16115 ifr->ifr_index = ipif->ipif_ill->ill_phyint->phyint_ifindex;
16116 } else {
16117 lifr->lifr_index = ipif->ipif_ill->ill_phyint->phyint_ifindex;
16118 }
16119 return (0);
16120 }
16121
16122 /* ARGSUSED */
16123 int
16124 ip_sioctl_get_lifzone(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16125 ip_ioctl_cmd_t *ipip, void *ifreq)
16126 {
16127 struct lifreq *lifr = (struct lifreq *)ifreq;
16128
16129 ip1dbg(("ip_sioctl_get_lifzone(%s:%u %p)\n",
16130 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
16131 /* Get the interface zone */
16132 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
16133 lifr->lifr_zoneid = ipif->ipif_zoneid;
16134 return (0);
16135 }
16136
16137 /*
16138 * Set the zoneid of an interface.
16139 */
16140 /* ARGSUSED */
16141 int
16142 ip_sioctl_slifzone(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16143 ip_ioctl_cmd_t *ipip, void *ifreq)
16144 {
16145 struct lifreq *lifr = (struct lifreq *)ifreq;
16146 int err = 0;
16147 boolean_t need_up = B_FALSE;
16148 zone_t *zptr;
16149 zone_status_t status;
16150 zoneid_t zoneid;
16151
16152 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
16153 if ((zoneid = lifr->lifr_zoneid) == ALL_ZONES) {
16154 if (!is_system_labeled())
16155 return (ENOTSUP);
16156 zoneid = GLOBAL_ZONEID;
16157 }
16158
16159 /* cannot assign instance zero to a non-global zone */
16160 if (ipif->ipif_id == 0 && zoneid != GLOBAL_ZONEID)
16161 return (ENOTSUP);
16162
16163 /*
16164 * Cannot assign to a zone that doesn't exist or is shutting down. In
16165 * the event of a race with the zone shutdown processing, since IP
16166 * serializes this ioctl and SIOCGLIFCONF/SIOCLIFREMOVEIF, we know the
16167 * interface will be cleaned up even if the zone is shut down
16168 * immediately after the status check. If the interface can't be brought
16169 * down right away, and the zone is shut down before the restart
16170 * function is called, we resolve the possible races by rechecking the
16171 * zone status in the restart function.
16172 */
16173 if ((zptr = zone_find_by_id(zoneid)) == NULL)
16174 return (EINVAL);
16175 status = zone_status_get(zptr);
16176 zone_rele(zptr);
16177
16178 if (status != ZONE_IS_READY && status != ZONE_IS_RUNNING)
16179 return (EINVAL);
16180
16181 if (ipif->ipif_flags & IPIF_UP) {
16182 /*
16183 * If the interface is already marked up,
16184 * we call ipif_down which will take care
16185 * of ditching any IREs that have been set
16186 * up based on the old interface address.
16187 */
16188 err = ipif_logical_down(ipif, q, mp);
16189 if (err == EINPROGRESS)
16190 return (err);
16191 (void) ipif_down_tail(ipif);
16192 need_up = B_TRUE;
16193 }
16194
16195 err = ip_sioctl_slifzone_tail(ipif, lifr->lifr_zoneid, q, mp, need_up);
16196 return (err);
16197 }
16198
16199 static int
16200 ip_sioctl_slifzone_tail(ipif_t *ipif, zoneid_t zoneid,
16201 queue_t *q, mblk_t *mp, boolean_t need_up)
16202 {
16203 int err = 0;
16204 ip_stack_t *ipst;
16205
16206 ip1dbg(("ip_sioctl_zoneid_tail(%s:%u %p)\n",
16207 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
16208
16209 if (CONN_Q(q))
16210 ipst = CONNQ_TO_IPST(q);
16211 else
16212 ipst = ILLQ_TO_IPST(q);
16213
16214 /*
16215 * For exclusive stacks we don't allow a different zoneid than
16216 * global.
16217 */
16218 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID &&
16219 zoneid != GLOBAL_ZONEID)
16220 return (EINVAL);
16221
16222 /* Set the new zone id. */
16223 ipif->ipif_zoneid = zoneid;
16224
16225 /* Update sctp list */
16226 sctp_update_ipif(ipif, SCTP_IPIF_UPDATE);
16227
16228 /* The default multicast interface might have changed */
16229 ire_increment_multicast_generation(ipst, ipif->ipif_ill->ill_isv6);
16230
16231 if (need_up) {
16232 /*
16233 * Now bring the interface back up. If this
16234 * is the only IPIF for the ILL, ipif_up
16235 * will have to re-bind to the device, so
16236 * we may get back EINPROGRESS, in which
16237 * case, this IOCTL will get completed in
16238 * ip_rput_dlpi when we see the DL_BIND_ACK.
16239 */
16240 err = ipif_up(ipif, q, mp);
16241 }
16242 return (err);
16243 }
16244
16245 /* ARGSUSED */
16246 int
16247 ip_sioctl_slifzone_restart(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16248 ip_ioctl_cmd_t *ipip, void *if_req)
16249 {
16250 struct lifreq *lifr = (struct lifreq *)if_req;
16251 zoneid_t zoneid;
16252 zone_t *zptr;
16253 zone_status_t status;
16254
16255 ASSERT(ipip->ipi_cmd_type == LIF_CMD);
16256 if ((zoneid = lifr->lifr_zoneid) == ALL_ZONES)
16257 zoneid = GLOBAL_ZONEID;
16258
16259 ip1dbg(("ip_sioctl_slifzone_restart(%s:%u %p)\n",
16260 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
16261
16262 /*
16263 * We recheck the zone status to resolve the following race condition:
16264 * 1) process sends SIOCSLIFZONE to put hme0:1 in zone "myzone";
16265 * 2) hme0:1 is up and can't be brought down right away;
16266 * ip_sioctl_slifzone() returns EINPROGRESS and the request is queued;
16267 * 3) zone "myzone" is halted; the zone status switches to
16268 * 'shutting_down' and the zones framework sends SIOCGLIFCONF to list
16269 * the interfaces to remove - hme0:1 is not returned because it's not
16270 * yet in "myzone", so it won't be removed;
16271 * 4) the restart function for SIOCSLIFZONE is called; without the
16272 * status check here, we would have hme0:1 in "myzone" after it's been
16273 * destroyed.
16274 * Note that if the status check fails, we need to bring the interface
16275 * back to its state prior to ip_sioctl_slifzone(), hence the call to
16276 * ipif_up_done[_v6]().
16277 */
16278 status = ZONE_IS_UNINITIALIZED;
16279 if ((zptr = zone_find_by_id(zoneid)) != NULL) {
16280 status = zone_status_get(zptr);
16281 zone_rele(zptr);
16282 }
16283 if (status != ZONE_IS_READY && status != ZONE_IS_RUNNING) {
16284 if (ipif->ipif_isv6) {
16285 (void) ipif_up_done_v6(ipif);
16286 } else {
16287 (void) ipif_up_done(ipif);
16288 }
16289 return (EINVAL);
16290 }
16291
16292 (void) ipif_down_tail(ipif);
16293
16294 return (ip_sioctl_slifzone_tail(ipif, lifr->lifr_zoneid, q, mp,
16295 B_TRUE));
16296 }
16297
16298 /*
16299 * Return the number of addresses on `ill' with one or more of the values
16300 * in `set' set and all of the values in `clear' clear.
16301 */
16302 static uint_t
16303 ill_flagaddr_cnt(const ill_t *ill, uint64_t set, uint64_t clear)
16304 {
16305 ipif_t *ipif;
16306 uint_t cnt = 0;
16307
16308 ASSERT(IAM_WRITER_ILL(ill));
16309
16310 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
16311 if ((ipif->ipif_flags & set) && !(ipif->ipif_flags & clear))
16312 cnt++;
16313
16314 return (cnt);
16315 }
16316
16317 /*
16318 * Return the number of migratable addresses on `ill' that are under
16319 * application control.
16320 */
16321 uint_t
16322 ill_appaddr_cnt(const ill_t *ill)
16323 {
16324 return (ill_flagaddr_cnt(ill, IPIF_DHCPRUNNING | IPIF_ADDRCONF,
16325 IPIF_NOFAILOVER));
16326 }
16327
16328 /*
16329 * Return the number of point-to-point addresses on `ill'.
16330 */
16331 uint_t
16332 ill_ptpaddr_cnt(const ill_t *ill)
16333 {
16334 return (ill_flagaddr_cnt(ill, IPIF_POINTOPOINT, 0));
16335 }
16336
16337 /* ARGSUSED */
16338 int
16339 ip_sioctl_get_lifusesrc(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16340 ip_ioctl_cmd_t *ipip, void *ifreq)
16341 {
16342 struct lifreq *lifr = ifreq;
16343
16344 ASSERT(q->q_next == NULL);
16345 ASSERT(CONN_Q(q));
16346
16347 ip1dbg(("ip_sioctl_get_lifusesrc(%s:%u %p)\n",
16348 ipif->ipif_ill->ill_name, ipif->ipif_id, (void *)ipif));
16349 lifr->lifr_index = ipif->ipif_ill->ill_usesrc_ifindex;
16350 ip1dbg(("ip_sioctl_get_lifusesrc:lifr_index = %d\n", lifr->lifr_index));
16351
16352 return (0);
16353 }
16354
16355 /* Find the previous ILL in this usesrc group */
16356 static ill_t *
16357 ill_prev_usesrc(ill_t *uill)
16358 {
16359 ill_t *ill;
16360
16361 for (ill = uill->ill_usesrc_grp_next;
16362 ASSERT(ill), ill->ill_usesrc_grp_next != uill;
16363 ill = ill->ill_usesrc_grp_next)
16364 /* do nothing */;
16365 return (ill);
16366 }
16367
16368 /*
16369 * Release all members of the usesrc group. This routine is called
16370 * from ill_delete when the interface being unplumbed is the
16371 * group head.
16372 *
16373 * This silently clears the usesrc that ifconfig setup.
16374 * An alternative would be to keep that ifindex, and drop packets on the floor
16375 * since no source address can be selected.
16376 * Even if we keep the current semantics, don't need a lock and a linked list.
16377 * Can walk all the ills checking if they have a ill_usesrc_ifindex matching
16378 * the one that is being removed. Issue is how we return the usesrc users
16379 * (SIOCGLIFSRCOF). We want to be able to find the ills which have an
16380 * ill_usesrc_ifindex matching a target ill. We could also do that with an
16381 * ill walk, but the walker would need to insert in the ioctl response.
16382 */
16383 static void
16384 ill_disband_usesrc_group(ill_t *uill)
16385 {
16386 ill_t *next_ill, *tmp_ill;
16387 ip_stack_t *ipst = uill->ill_ipst;
16388
16389 ASSERT(RW_WRITE_HELD(&ipst->ips_ill_g_usesrc_lock));
16390 next_ill = uill->ill_usesrc_grp_next;
16391
16392 do {
16393 ASSERT(next_ill != NULL);
16394 tmp_ill = next_ill->ill_usesrc_grp_next;
16395 ASSERT(tmp_ill != NULL);
16396 next_ill->ill_usesrc_grp_next = NULL;
16397 next_ill->ill_usesrc_ifindex = 0;
16398 next_ill = tmp_ill;
16399 } while (next_ill->ill_usesrc_ifindex != 0);
16400 uill->ill_usesrc_grp_next = NULL;
16401 }
16402
16403 /*
16404 * Remove the client usesrc ILL from the list and relink to a new list
16405 */
16406 int
16407 ill_relink_usesrc_ills(ill_t *ucill, ill_t *uill, uint_t ifindex)
16408 {
16409 ill_t *ill, *tmp_ill;
16410 ip_stack_t *ipst = ucill->ill_ipst;
16411
16412 ASSERT((ucill != NULL) && (ucill->ill_usesrc_grp_next != NULL) &&
16413 (uill != NULL) && RW_WRITE_HELD(&ipst->ips_ill_g_usesrc_lock));
16414
16415 /*
16416 * Check if the usesrc client ILL passed in is not already
16417 * in use as a usesrc ILL i.e one whose source address is
16418 * in use OR a usesrc ILL is not already in use as a usesrc
16419 * client ILL
16420 */
16421 if ((ucill->ill_usesrc_ifindex == 0) ||
16422 (uill->ill_usesrc_ifindex != 0)) {
16423 return (-1);
16424 }
16425
16426 ill = ill_prev_usesrc(ucill);
16427 ASSERT(ill->ill_usesrc_grp_next != NULL);
16428
16429 /* Remove from the current list */
16430 if (ill->ill_usesrc_grp_next->ill_usesrc_grp_next == ill) {
16431 /* Only two elements in the list */
16432 ASSERT(ill->ill_usesrc_ifindex == 0);
16433 ill->ill_usesrc_grp_next = NULL;
16434 } else {
16435 ill->ill_usesrc_grp_next = ucill->ill_usesrc_grp_next;
16436 }
16437
16438 if (ifindex == 0) {
16439 ucill->ill_usesrc_ifindex = 0;
16440 ucill->ill_usesrc_grp_next = NULL;
16441 return (0);
16442 }
16443
16444 ucill->ill_usesrc_ifindex = ifindex;
16445 tmp_ill = uill->ill_usesrc_grp_next;
16446 uill->ill_usesrc_grp_next = ucill;
16447 ucill->ill_usesrc_grp_next =
16448 (tmp_ill != NULL) ? tmp_ill : uill;
16449 return (0);
16450 }
16451
16452 /*
16453 * Set the ill_usesrc and ill_usesrc_head fields. See synchronization notes in
16454 * ip.c for locking details.
16455 */
16456 /* ARGSUSED */
16457 int
16458 ip_sioctl_slifusesrc(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16459 ip_ioctl_cmd_t *ipip, void *ifreq)
16460 {
16461 struct lifreq *lifr = (struct lifreq *)ifreq;
16462 boolean_t isv6 = B_FALSE, reset_flg = B_FALSE;
16463 ill_t *usesrc_ill, *usesrc_cli_ill = ipif->ipif_ill;
16464 int err = 0, ret;
16465 uint_t ifindex;
16466 ipsq_t *ipsq = NULL;
16467 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
16468
16469 ASSERT(IAM_WRITER_IPIF(ipif));
16470 ASSERT(q->q_next == NULL);
16471 ASSERT(CONN_Q(q));
16472
16473 isv6 = (Q_TO_CONN(q))->conn_family == AF_INET6;
16474
16475 ifindex = lifr->lifr_index;
16476 if (ifindex == 0) {
16477 if (usesrc_cli_ill->ill_usesrc_grp_next == NULL) {
16478 /* non usesrc group interface, nothing to reset */
16479 return (0);
16480 }
16481 ifindex = usesrc_cli_ill->ill_usesrc_ifindex;
16482 /* valid reset request */
16483 reset_flg = B_TRUE;
16484 }
16485
16486 usesrc_ill = ill_lookup_on_ifindex(ifindex, isv6, ipst);
16487 if (usesrc_ill == NULL)
16488 return (ENXIO);
16489 if (usesrc_ill == ipif->ipif_ill) {
16490 ill_refrele(usesrc_ill);
16491 return (EINVAL);
16492 }
16493
16494 ipsq = ipsq_try_enter(NULL, usesrc_ill, q, mp, ip_process_ioctl,
16495 NEW_OP, B_TRUE);
16496 if (ipsq == NULL) {
16497 err = EINPROGRESS;
16498 /* Operation enqueued on the ipsq of the usesrc ILL */
16499 goto done;
16500 }
16501
16502 /* USESRC isn't currently supported with IPMP */
16503 if (IS_IPMP(usesrc_ill) || IS_UNDER_IPMP(usesrc_ill)) {
16504 err = ENOTSUP;
16505 goto done;
16506 }
16507
16508 /*
16509 * USESRC isn't compatible with the STANDBY flag. (STANDBY is only
16510 * used by IPMP underlying interfaces, but someone might think it's
16511 * more general and try to use it independently with VNI.)
16512 */
16513 if (usesrc_ill->ill_phyint->phyint_flags & PHYI_STANDBY) {
16514 err = ENOTSUP;
16515 goto done;
16516 }
16517
16518 /*
16519 * If the client is already in use as a usesrc_ill or a usesrc_ill is
16520 * already a client then return EINVAL
16521 */
16522 if (IS_USESRC_ILL(usesrc_cli_ill) || IS_USESRC_CLI_ILL(usesrc_ill)) {
16523 err = EINVAL;
16524 goto done;
16525 }
16526
16527 /*
16528 * If the ill_usesrc_ifindex field is already set to what it needs to
16529 * be then this is a duplicate operation.
16530 */
16531 if (!reset_flg && usesrc_cli_ill->ill_usesrc_ifindex == ifindex) {
16532 err = 0;
16533 goto done;
16534 }
16535
16536 ip1dbg(("ip_sioctl_slifusesrc: usesrc_cli_ill %s, usesrc_ill %s,"
16537 " v6 = %d", usesrc_cli_ill->ill_name, usesrc_ill->ill_name,
16538 usesrc_ill->ill_isv6));
16539
16540 /*
16541 * ill_g_usesrc_lock global lock protects the ill_usesrc_grp_next
16542 * and the ill_usesrc_ifindex fields
16543 */
16544 rw_enter(&ipst->ips_ill_g_usesrc_lock, RW_WRITER);
16545
16546 if (reset_flg) {
16547 ret = ill_relink_usesrc_ills(usesrc_cli_ill, usesrc_ill, 0);
16548 if (ret != 0) {
16549 err = EINVAL;
16550 }
16551 rw_exit(&ipst->ips_ill_g_usesrc_lock);
16552 goto done;
16553 }
16554
16555 /*
16556 * Four possibilities to consider:
16557 * 1. Both usesrc_ill and usesrc_cli_ill are not part of any usesrc grp
16558 * 2. usesrc_ill is part of a group but usesrc_cli_ill isn't
16559 * 3. usesrc_cli_ill is part of a group but usesrc_ill isn't
16560 * 4. Both are part of their respective usesrc groups
16561 */
16562 if ((usesrc_ill->ill_usesrc_grp_next == NULL) &&
16563 (usesrc_cli_ill->ill_usesrc_grp_next == NULL)) {
16564 ASSERT(usesrc_ill->ill_usesrc_ifindex == 0);
16565 usesrc_cli_ill->ill_usesrc_ifindex = ifindex;
16566 usesrc_ill->ill_usesrc_grp_next = usesrc_cli_ill;
16567 usesrc_cli_ill->ill_usesrc_grp_next = usesrc_ill;
16568 } else if ((usesrc_ill->ill_usesrc_grp_next != NULL) &&
16569 (usesrc_cli_ill->ill_usesrc_grp_next == NULL)) {
16570 usesrc_cli_ill->ill_usesrc_ifindex = ifindex;
16571 /* Insert at head of list */
16572 usesrc_cli_ill->ill_usesrc_grp_next =
16573 usesrc_ill->ill_usesrc_grp_next;
16574 usesrc_ill->ill_usesrc_grp_next = usesrc_cli_ill;
16575 } else {
16576 ret = ill_relink_usesrc_ills(usesrc_cli_ill, usesrc_ill,
16577 ifindex);
16578 if (ret != 0)
16579 err = EINVAL;
16580 }
16581 rw_exit(&ipst->ips_ill_g_usesrc_lock);
16582
16583 done:
16584 if (ipsq != NULL)
16585 ipsq_exit(ipsq);
16586 /* The refrele on the lifr_name ipif is done by ip_process_ioctl */
16587 ill_refrele(usesrc_ill);
16588
16589 /* Let conn_ixa caching know that source address selection changed */
16590 ip_update_source_selection(ipst);
16591
16592 return (err);
16593 }
16594
16595 /* ARGSUSED */
16596 int
16597 ip_sioctl_get_dadstate(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
16598 ip_ioctl_cmd_t *ipip, void *if_req)
16599 {
16600 struct lifreq *lifr = (struct lifreq *)if_req;
16601 ill_t *ill = ipif->ipif_ill;
16602
16603 /*
16604 * Need a lock since IFF_UP can be set even when there are
16605 * references to the ipif.
16606 */
16607 mutex_enter(&ill->ill_lock);
16608 if ((ipif->ipif_flags & IPIF_UP) && ipif->ipif_addr_ready == 0)
16609 lifr->lifr_dadstate = DAD_IN_PROGRESS;
16610 else
16611 lifr->lifr_dadstate = DAD_DONE;
16612 mutex_exit(&ill->ill_lock);
16613 return (0);
16614 }
16615
16616 /*
16617 * comparison function used by avl.
16618 */
16619 static int
16620 ill_phyint_compare_index(const void *index_ptr, const void *phyip)
16621 {
16622
16623 uint_t index;
16624
16625 ASSERT(phyip != NULL && index_ptr != NULL);
16626
16627 index = *((uint_t *)index_ptr);
16628 /*
16629 * let the phyint with the lowest index be on top.
16630 */
16631 if (((phyint_t *)phyip)->phyint_ifindex < index)
16632 return (1);
16633 if (((phyint_t *)phyip)->phyint_ifindex > index)
16634 return (-1);
16635 return (0);
16636 }
16637
16638 /*
16639 * comparison function used by avl.
16640 */
16641 static int
16642 ill_phyint_compare_name(const void *name_ptr, const void *phyip)
16643 {
16644 ill_t *ill;
16645 int res = 0;
16646
16647 ASSERT(phyip != NULL && name_ptr != NULL);
16648
16649 if (((phyint_t *)phyip)->phyint_illv4)
16650 ill = ((phyint_t *)phyip)->phyint_illv4;
16651 else
16652 ill = ((phyint_t *)phyip)->phyint_illv6;
16653 ASSERT(ill != NULL);
16654
16655 res = strcmp(ill->ill_name, (char *)name_ptr);
16656 if (res > 0)
16657 return (1);
16658 else if (res < 0)
16659 return (-1);
16660 return (0);
16661 }
16662
16663 /*
16664 * This function is called on the unplumb path via ill_glist_delete() when
16665 * there are no ills left on the phyint and thus the phyint can be freed.
16666 */
16667 static void
16668 phyint_free(phyint_t *phyi)
16669 {
16670 ip_stack_t *ipst = PHYINT_TO_IPST(phyi);
16671
16672 ASSERT(phyi->phyint_illv4 == NULL && phyi->phyint_illv6 == NULL);
16673
16674 /*
16675 * If this phyint was an IPMP meta-interface, blow away the group.
16676 * This is safe to do because all of the illgrps have already been
16677 * removed by I_PUNLINK, and thus SIOCSLIFGROUPNAME cannot find us.
16678 * If we're cleaning up as a result of failed initialization,
16679 * phyint_grp may be NULL.
16680 */
16681 if ((phyi->phyint_flags & PHYI_IPMP) && (phyi->phyint_grp != NULL)) {
16682 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
16683 ipmp_grp_destroy(phyi->phyint_grp);
16684 phyi->phyint_grp = NULL;
16685 rw_exit(&ipst->ips_ipmp_lock);
16686 }
16687
16688 /*
16689 * If this interface was under IPMP, take it out of the group.
16690 */
16691 if (phyi->phyint_grp != NULL)
16692 ipmp_phyint_leave_grp(phyi);
16693
16694 /*
16695 * Delete the phyint and disassociate its ipsq. The ipsq itself
16696 * will be freed in ipsq_exit().
16697 */
16698 phyi->phyint_ipsq->ipsq_phyint = NULL;
16699 phyi->phyint_name[0] = '\0';
16700
16701 mi_free(phyi);
16702 }
16703
16704 /*
16705 * Attach the ill to the phyint structure which can be shared by both
16706 * IPv4 and IPv6 ill. ill_init allocates a phyint to just hold flags. This
16707 * function is called from ipif_set_values and ill_lookup_on_name (for
16708 * loopback) where we know the name of the ill. We lookup the ill and if
16709 * there is one present already with the name use that phyint. Otherwise
16710 * reuse the one allocated by ill_init.
16711 */
16712 static void
16713 ill_phyint_reinit(ill_t *ill)
16714 {
16715 boolean_t isv6 = ill->ill_isv6;
16716 phyint_t *phyi_old;
16717 phyint_t *phyi;
16718 avl_index_t where = 0;
16719 ill_t *ill_other = NULL;
16720 ip_stack_t *ipst = ill->ill_ipst;
16721
16722 ASSERT(RW_WRITE_HELD(&ipst->ips_ill_g_lock));
16723
16724 phyi_old = ill->ill_phyint;
16725 ASSERT(isv6 || (phyi_old->phyint_illv4 == ill &&
16726 phyi_old->phyint_illv6 == NULL));
16727 ASSERT(!isv6 || (phyi_old->phyint_illv6 == ill &&
16728 phyi_old->phyint_illv4 == NULL));
16729 ASSERT(phyi_old->phyint_ifindex == 0);
16730
16731 /*
16732 * Now that our ill has a name, set it in the phyint.
16733 */
16734 (void) strlcpy(ill->ill_phyint->phyint_name, ill->ill_name, LIFNAMSIZ);
16735
16736 phyi = avl_find(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
16737 ill->ill_name, &where);
16738
16739 /*
16740 * 1. We grabbed the ill_g_lock before inserting this ill into
16741 * the global list of ills. So no other thread could have located
16742 * this ill and hence the ipsq of this ill is guaranteed to be empty.
16743 * 2. Now locate the other protocol instance of this ill.
16744 * 3. Now grab both ill locks in the right order, and the phyint lock of
16745 * the new ipsq. Holding ill locks + ill_g_lock ensures that the ipsq
16746 * of neither ill can change.
16747 * 4. Merge the phyint and thus the ipsq as well of this ill onto the
16748 * other ill.
16749 * 5. Release all locks.
16750 */
16751
16752 /*
16753 * Look for IPv4 if we are initializing IPv6 or look for IPv6 if
16754 * we are initializing IPv4.
16755 */
16756 if (phyi != NULL) {
16757 ill_other = (isv6) ? phyi->phyint_illv4 : phyi->phyint_illv6;
16758 ASSERT(ill_other->ill_phyint != NULL);
16759 ASSERT((isv6 && !ill_other->ill_isv6) ||
16760 (!isv6 && ill_other->ill_isv6));
16761 GRAB_ILL_LOCKS(ill, ill_other);
16762 /*
16763 * We are potentially throwing away phyint_flags which
16764 * could be different from the one that we obtain from
16765 * ill_other->ill_phyint. But it is okay as we are assuming
16766 * that the state maintained within IP is correct.
16767 */
16768 mutex_enter(&phyi->phyint_lock);
16769 if (isv6) {
16770 ASSERT(phyi->phyint_illv6 == NULL);
16771 phyi->phyint_illv6 = ill;
16772 } else {
16773 ASSERT(phyi->phyint_illv4 == NULL);
16774 phyi->phyint_illv4 = ill;
16775 }
16776
16777 /*
16778 * Delete the old phyint and make its ipsq eligible
16779 * to be freed in ipsq_exit().
16780 */
16781 phyi_old->phyint_illv4 = NULL;
16782 phyi_old->phyint_illv6 = NULL;
16783 phyi_old->phyint_ipsq->ipsq_phyint = NULL;
16784 phyi_old->phyint_name[0] = '\0';
16785 mi_free(phyi_old);
16786 } else {
16787 mutex_enter(&ill->ill_lock);
16788 /*
16789 * We don't need to acquire any lock, since
16790 * the ill is not yet visible globally and we
16791 * have not yet released the ill_g_lock.
16792 */
16793 phyi = phyi_old;
16794 mutex_enter(&phyi->phyint_lock);
16795 /* XXX We need a recovery strategy here. */
16796 if (!phyint_assign_ifindex(phyi, ipst))
16797 cmn_err(CE_PANIC, "phyint_assign_ifindex() failed");
16798
16799 avl_insert(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
16800 (void *)phyi, where);
16801
16802 (void) avl_find(&ipst->ips_phyint_g_list->
16803 phyint_list_avl_by_index,
16804 &phyi->phyint_ifindex, &where);
16805 avl_insert(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
16806 (void *)phyi, where);
16807 }
16808
16809 /*
16810 * Reassigning ill_phyint automatically reassigns the ipsq also.
16811 * pending mp is not affected because that is per ill basis.
16812 */
16813 ill->ill_phyint = phyi;
16814
16815 /*
16816 * Now that the phyint's ifindex has been assigned, complete the
16817 * remaining
16818 */
16819 ill->ill_ip_mib->ipIfStatsIfIndex = ill->ill_phyint->phyint_ifindex;
16820 if (ill->ill_isv6) {
16821 ill->ill_icmp6_mib->ipv6IfIcmpIfIndex =
16822 ill->ill_phyint->phyint_ifindex;
16823 ill->ill_mcast_type = ipst->ips_mld_max_version;
16824 } else {
16825 ill->ill_mcast_type = ipst->ips_igmp_max_version;
16826 }
16827
16828 /*
16829 * Generate an event within the hooks framework to indicate that
16830 * a new interface has just been added to IP. For this event to
16831 * be generated, the network interface must, at least, have an
16832 * ifindex assigned to it. (We don't generate the event for
16833 * loopback since ill_lookup_on_name() has its own NE_PLUMB event.)
16834 *
16835 * This needs to be run inside the ill_g_lock perimeter to ensure
16836 * that the ordering of delivered events to listeners matches the
16837 * order of them in the kernel.
16838 */
16839 if (!IS_LOOPBACK(ill)) {
16840 ill_nic_event_dispatch(ill, 0, NE_PLUMB, ill->ill_name,
16841 ill->ill_name_length);
16842 }
16843 RELEASE_ILL_LOCKS(ill, ill_other);
16844 mutex_exit(&phyi->phyint_lock);
16845 }
16846
16847 /*
16848 * Notify any downstream modules of the name of this interface.
16849 * An M_IOCTL is used even though we don't expect a successful reply.
16850 * Any reply message from the driver (presumably an M_IOCNAK) will
16851 * eventually get discarded somewhere upstream. The message format is
16852 * simply an SIOCSLIFNAME ioctl just as might be sent from ifconfig
16853 * to IP.
16854 */
16855 static void
16856 ip_ifname_notify(ill_t *ill, queue_t *q)
16857 {
16858 mblk_t *mp1, *mp2;
16859 struct iocblk *iocp;
16860 struct lifreq *lifr;
16861
16862 mp1 = mkiocb(SIOCSLIFNAME);
16863 if (mp1 == NULL)
16864 return;
16865 mp2 = allocb(sizeof (struct lifreq), BPRI_HI);
16866 if (mp2 == NULL) {
16867 freeb(mp1);
16868 return;
16869 }
16870
16871 mp1->b_cont = mp2;
16872 iocp = (struct iocblk *)mp1->b_rptr;
16873 iocp->ioc_count = sizeof (struct lifreq);
16874
16875 lifr = (struct lifreq *)mp2->b_rptr;
16876 mp2->b_wptr += sizeof (struct lifreq);
16877 bzero(lifr, sizeof (struct lifreq));
16878
16879 (void) strncpy(lifr->lifr_name, ill->ill_name, LIFNAMSIZ);
16880 lifr->lifr_ppa = ill->ill_ppa;
16881 lifr->lifr_flags = (ill->ill_flags & (ILLF_IPV4|ILLF_IPV6));
16882
16883 DTRACE_PROBE3(ill__dlpi, char *, "ip_ifname_notify",
16884 char *, "SIOCSLIFNAME", ill_t *, ill);
16885 putnext(q, mp1);
16886 }
16887
16888 static int
16889 ipif_set_values_tail(ill_t *ill, ipif_t *ipif, mblk_t *mp, queue_t *q)
16890 {
16891 int err;
16892 ip_stack_t *ipst = ill->ill_ipst;
16893 phyint_t *phyi = ill->ill_phyint;
16894
16895 /*
16896 * Now that ill_name is set, the configuration for the IPMP
16897 * meta-interface can be performed.
16898 */
16899 if (IS_IPMP(ill)) {
16900 rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
16901 /*
16902 * If phyi->phyint_grp is NULL, then this is the first IPMP
16903 * meta-interface and we need to create the IPMP group.
16904 */
16905 if (phyi->phyint_grp == NULL) {
16906 /*
16907 * If someone has renamed another IPMP group to have
16908 * the same name as our interface, bail.
16909 */
16910 if (ipmp_grp_lookup(ill->ill_name, ipst) != NULL) {
16911 rw_exit(&ipst->ips_ipmp_lock);
16912 return (EEXIST);
16913 }
16914 phyi->phyint_grp = ipmp_grp_create(ill->ill_name, phyi);
16915 if (phyi->phyint_grp == NULL) {
16916 rw_exit(&ipst->ips_ipmp_lock);
16917 return (ENOMEM);
16918 }
16919 }
16920 rw_exit(&ipst->ips_ipmp_lock);
16921 }
16922
16923 /* Tell downstream modules where they are. */
16924 ip_ifname_notify(ill, q);
16925
16926 /*
16927 * ill_dl_phys returns EINPROGRESS in the usual case.
16928 * Error cases are ENOMEM ...
16929 */
16930 err = ill_dl_phys(ill, ipif, mp, q);
16931
16932 if (ill->ill_isv6) {
16933 mutex_enter(&ipst->ips_mld_slowtimeout_lock);
16934 if (ipst->ips_mld_slowtimeout_id == 0) {
16935 ipst->ips_mld_slowtimeout_id = timeout(mld_slowtimo,
16936 (void *)ipst,
16937 MSEC_TO_TICK(MCAST_SLOWTIMO_INTERVAL));
16938 }
16939 mutex_exit(&ipst->ips_mld_slowtimeout_lock);
16940 } else {
16941 mutex_enter(&ipst->ips_igmp_slowtimeout_lock);
16942 if (ipst->ips_igmp_slowtimeout_id == 0) {
16943 ipst->ips_igmp_slowtimeout_id = timeout(igmp_slowtimo,
16944 (void *)ipst,
16945 MSEC_TO_TICK(MCAST_SLOWTIMO_INTERVAL));
16946 }
16947 mutex_exit(&ipst->ips_igmp_slowtimeout_lock);
16948 }
16949
16950 return (err);
16951 }
16952
16953 /*
16954 * Common routine for ppa and ifname setting. Should be called exclusive.
16955 *
16956 * Returns EINPROGRESS when mp has been consumed by queueing it on
16957 * ipx_pending_mp and the ioctl will complete in ip_rput.
16958 *
16959 * NOTE : If ppa is UNIT_MAX, we assign the next valid ppa and return
16960 * the new name and new ppa in lifr_name and lifr_ppa respectively.
16961 * For SLIFNAME, we pass these values back to the userland.
16962 */
16963 static int
16964 ipif_set_values(queue_t *q, mblk_t *mp, char *interf_name, uint_t *new_ppa_ptr)
16965 {
16966 ill_t *ill;
16967 ipif_t *ipif;
16968 ipsq_t *ipsq;
16969 char *ppa_ptr;
16970 char *old_ptr;
16971 char old_char;
16972 int error;
16973 ip_stack_t *ipst;
16974
16975 ip1dbg(("ipif_set_values: interface %s\n", interf_name));
16976 ASSERT(q->q_next != NULL);
16977 ASSERT(interf_name != NULL);
16978
16979 ill = (ill_t *)q->q_ptr;
16980 ipst = ill->ill_ipst;
16981
16982 ASSERT(ill->ill_ipst != NULL);
16983 ASSERT(ill->ill_name[0] == '\0');
16984 ASSERT(IAM_WRITER_ILL(ill));
16985 ASSERT((mi_strlen(interf_name) + 1) <= LIFNAMSIZ);
16986 ASSERT(ill->ill_ppa == UINT_MAX);
16987
16988 ill->ill_defend_start = ill->ill_defend_count = 0;
16989 /* The ppa is sent down by ifconfig or is chosen */
16990 if ((ppa_ptr = ill_get_ppa_ptr(interf_name)) == NULL) {
16991 return (EINVAL);
16992 }
16993
16994 /*
16995 * make sure ppa passed in is same as ppa in the name.
16996 * This check is not made when ppa == UINT_MAX in that case ppa
16997 * in the name could be anything. System will choose a ppa and
16998 * update new_ppa_ptr and inter_name to contain the choosen ppa.
16999 */
17000 if (*new_ppa_ptr != UINT_MAX) {
17001 /* stoi changes the pointer */
17002 old_ptr = ppa_ptr;
17003 /*
17004 * ifconfig passed in 0 for the ppa for DLPI 1 style devices
17005 * (they don't have an externally visible ppa). We assign one
17006 * here so that we can manage the interface. Note that in
17007 * the past this value was always 0 for DLPI 1 drivers.
17008 */
17009 if (*new_ppa_ptr == 0)
17010 *new_ppa_ptr = stoi(&old_ptr);
17011 else if (*new_ppa_ptr != (uint_t)stoi(&old_ptr))
17012 return (EINVAL);
17013 }
17014 /*
17015 * terminate string before ppa
17016 * save char at that location.
17017 */
17018 old_char = ppa_ptr[0];
17019 ppa_ptr[0] = '\0';
17020
17021 ill->ill_ppa = *new_ppa_ptr;
17022 /*
17023 * Finish as much work now as possible before calling ill_glist_insert
17024 * which makes the ill globally visible and also merges it with the
17025 * other protocol instance of this phyint. The remaining work is
17026 * done after entering the ipsq which may happen sometime later.
17027 */
17028 ipif = ill->ill_ipif;
17029
17030 /* We didn't do this when we allocated ipif in ip_ll_subnet_defaults */
17031 ipif_assign_seqid(ipif);
17032
17033 if (!(ill->ill_flags & (ILLF_IPV4|ILLF_IPV6)))
17034 ill->ill_flags |= ILLF_IPV4;
17035
17036 ASSERT(ipif->ipif_next == NULL); /* Only one ipif on ill */
17037 ASSERT((ipif->ipif_flags & IPIF_UP) == 0);
17038
17039 if (ill->ill_flags & ILLF_IPV6) {
17040
17041 ill->ill_isv6 = B_TRUE;
17042 ill_set_inputfn(ill);
17043 if (ill->ill_rq != NULL) {
17044 ill->ill_rq->q_qinfo = &iprinitv6;
17045 }
17046
17047 /* Keep the !IN6_IS_ADDR_V4MAPPED assertions happy */
17048 ipif->ipif_v6lcl_addr = ipv6_all_zeros;
17049 ipif->ipif_v6subnet = ipv6_all_zeros;
17050 ipif->ipif_v6net_mask = ipv6_all_zeros;
17051 ipif->ipif_v6brd_addr = ipv6_all_zeros;
17052 ipif->ipif_v6pp_dst_addr = ipv6_all_zeros;
17053 ill->ill_reachable_retrans_time = ND_RETRANS_TIMER;
17054 /*
17055 * point-to-point or Non-mulicast capable
17056 * interfaces won't do NUD unless explicitly
17057 * configured to do so.
17058 */
17059 if (ipif->ipif_flags & IPIF_POINTOPOINT ||
17060 !(ill->ill_flags & ILLF_MULTICAST)) {
17061 ill->ill_flags |= ILLF_NONUD;
17062 }
17063 /* Make sure IPv4 specific flag is not set on IPv6 if */
17064 if (ill->ill_flags & ILLF_NOARP) {
17065 /*
17066 * Note: xresolv interfaces will eventually need
17067 * NOARP set here as well, but that will require
17068 * those external resolvers to have some
17069 * knowledge of that flag and act appropriately.
17070 * Not to be changed at present.
17071 */
17072 ill->ill_flags &= ~ILLF_NOARP;
17073 }
17074 /*
17075 * Set the ILLF_ROUTER flag according to the global
17076 * IPv6 forwarding policy.
17077 */
17078 if (ipst->ips_ipv6_forwarding != 0)
17079 ill->ill_flags |= ILLF_ROUTER;
17080 } else if (ill->ill_flags & ILLF_IPV4) {
17081 ill->ill_isv6 = B_FALSE;
17082 ill_set_inputfn(ill);
17083 ill->ill_reachable_retrans_time = ARP_RETRANS_TIMER;
17084 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &ipif->ipif_v6lcl_addr);
17085 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &ipif->ipif_v6subnet);
17086 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &ipif->ipif_v6net_mask);
17087 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &ipif->ipif_v6brd_addr);
17088 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &ipif->ipif_v6pp_dst_addr);
17089 /*
17090 * Set the ILLF_ROUTER flag according to the global
17091 * IPv4 forwarding policy.
17092 */
17093 if (ipst->ips_ip_forwarding != 0)
17094 ill->ill_flags |= ILLF_ROUTER;
17095 }
17096
17097 ASSERT(ill->ill_phyint != NULL);
17098
17099 /*
17100 * The ipIfStatsIfindex and ipv6IfIcmpIfIndex assignments will
17101 * be completed in ill_glist_insert -> ill_phyint_reinit
17102 */
17103 if (!ill_allocate_mibs(ill))
17104 return (ENOMEM);
17105
17106 /*
17107 * Pick a default sap until we get the DL_INFO_ACK back from
17108 * the driver.
17109 */
17110 ill->ill_sap = (ill->ill_isv6) ? ill->ill_media->ip_m_ipv6sap :
17111 ill->ill_media->ip_m_ipv4sap;
17112
17113 ill->ill_ifname_pending = 1;
17114 ill->ill_ifname_pending_err = 0;
17115
17116 /*
17117 * When the first ipif comes up in ipif_up_done(), multicast groups
17118 * that were joined while this ill was not bound to the DLPI link need
17119 * to be recovered by ill_recover_multicast().
17120 */
17121 ill->ill_need_recover_multicast = 1;
17122
17123 ill_refhold(ill);
17124 rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
17125 if ((error = ill_glist_insert(ill, interf_name,
17126 (ill->ill_flags & ILLF_IPV6) == ILLF_IPV6)) > 0) {
17127 ill->ill_ppa = UINT_MAX;
17128 ill->ill_name[0] = '\0';
17129 /*
17130 * undo null termination done above.
17131 */
17132 ppa_ptr[0] = old_char;
17133 rw_exit(&ipst->ips_ill_g_lock);
17134 ill_refrele(ill);
17135 return (error);
17136 }
17137
17138 ASSERT(ill->ill_name_length <= LIFNAMSIZ);
17139
17140 /*
17141 * When we return the buffer pointed to by interf_name should contain
17142 * the same name as in ill_name.
17143 * If a ppa was choosen by the system (ppa passed in was UINT_MAX)
17144 * the buffer pointed to by new_ppa_ptr would not contain the right ppa
17145 * so copy full name and update the ppa ptr.
17146 * When ppa passed in != UINT_MAX all values are correct just undo
17147 * null termination, this saves a bcopy.
17148 */
17149 if (*new_ppa_ptr == UINT_MAX) {
17150 bcopy(ill->ill_name, interf_name, ill->ill_name_length);
17151 *new_ppa_ptr = ill->ill_ppa;
17152 } else {
17153 /*
17154 * undo null termination done above.
17155 */
17156 ppa_ptr[0] = old_char;
17157 }
17158
17159 /* Let SCTP know about this ILL */
17160 sctp_update_ill(ill, SCTP_ILL_INSERT);
17161
17162 /*
17163 * ill_glist_insert has made the ill visible globally, and
17164 * ill_phyint_reinit could have changed the ipsq. At this point,
17165 * we need to hold the ips_ill_g_lock across the call to enter the
17166 * ipsq to enforce atomicity and prevent reordering. In the event
17167 * the ipsq has changed, and if the new ipsq is currently busy,
17168 * we need to make sure that this half-completed ioctl is ahead of
17169 * any subsequent ioctl. We achieve this by not dropping the
17170 * ips_ill_g_lock which prevents any ill lookup itself thereby
17171 * ensuring that new ioctls can't start.
17172 */
17173 ipsq = ipsq_try_enter_internal(ill, q, mp, ip_reprocess_ioctl, NEW_OP,
17174 B_TRUE);
17175
17176 rw_exit(&ipst->ips_ill_g_lock);
17177 ill_refrele(ill);
17178 if (ipsq == NULL)
17179 return (EINPROGRESS);
17180
17181 /*
17182 * If ill_phyint_reinit() changed our ipsq, then start on the new ipsq.
17183 */
17184 if (ipsq->ipsq_xop->ipx_current_ipif == NULL)
17185 ipsq_current_start(ipsq, ipif, SIOCSLIFNAME);
17186 else
17187 ASSERT(ipsq->ipsq_xop->ipx_current_ipif == ipif);
17188
17189 error = ipif_set_values_tail(ill, ipif, mp, q);
17190 ipsq_exit(ipsq);
17191 if (error != 0 && error != EINPROGRESS) {
17192 /*
17193 * restore previous values
17194 */
17195 ill->ill_isv6 = B_FALSE;
17196 ill_set_inputfn(ill);
17197 }
17198 return (error);
17199 }
17200
17201 void
17202 ipif_init(ip_stack_t *ipst)
17203 {
17204 int i;
17205
17206 for (i = 0; i < MAX_G_HEADS; i++) {
17207 ipst->ips_ill_g_heads[i].ill_g_list_head =
17208 (ill_if_t *)&ipst->ips_ill_g_heads[i];
17209 ipst->ips_ill_g_heads[i].ill_g_list_tail =
17210 (ill_if_t *)&ipst->ips_ill_g_heads[i];
17211 }
17212
17213 avl_create(&ipst->ips_phyint_g_list->phyint_list_avl_by_index,
17214 ill_phyint_compare_index,
17215 sizeof (phyint_t),
17216 offsetof(struct phyint, phyint_avl_by_index));
17217 avl_create(&ipst->ips_phyint_g_list->phyint_list_avl_by_name,
17218 ill_phyint_compare_name,
17219 sizeof (phyint_t),
17220 offsetof(struct phyint, phyint_avl_by_name));
17221 }
17222
17223 /*
17224 * Save enough information so that we can recreate the IRE if
17225 * the interface goes down and then up.
17226 */
17227 void
17228 ill_save_ire(ill_t *ill, ire_t *ire)
17229 {
17230 mblk_t *save_mp;
17231
17232 save_mp = allocb(sizeof (ifrt_t), BPRI_MED);
17233 if (save_mp != NULL) {
17234 ifrt_t *ifrt;
17235
17236 save_mp->b_wptr += sizeof (ifrt_t);
17237 ifrt = (ifrt_t *)save_mp->b_rptr;
17238 bzero(ifrt, sizeof (ifrt_t));
17239 ifrt->ifrt_type = ire->ire_type;
17240 if (ire->ire_ipversion == IPV4_VERSION) {
17241 ASSERT(!ill->ill_isv6);
17242 ifrt->ifrt_addr = ire->ire_addr;
17243 ifrt->ifrt_gateway_addr = ire->ire_gateway_addr;
17244 ifrt->ifrt_setsrc_addr = ire->ire_setsrc_addr;
17245 ifrt->ifrt_mask = ire->ire_mask;
17246 } else {
17247 ASSERT(ill->ill_isv6);
17248 ifrt->ifrt_v6addr = ire->ire_addr_v6;
17249 /* ire_gateway_addr_v6 can change due to RTM_CHANGE */
17250 mutex_enter(&ire->ire_lock);
17251 ifrt->ifrt_v6gateway_addr = ire->ire_gateway_addr_v6;
17252 mutex_exit(&ire->ire_lock);
17253 ifrt->ifrt_v6setsrc_addr = ire->ire_setsrc_addr_v6;
17254 ifrt->ifrt_v6mask = ire->ire_mask_v6;
17255 }
17256 ifrt->ifrt_flags = ire->ire_flags;
17257 ifrt->ifrt_zoneid = ire->ire_zoneid;
17258 mutex_enter(&ill->ill_saved_ire_lock);
17259 save_mp->b_cont = ill->ill_saved_ire_mp;
17260 ill->ill_saved_ire_mp = save_mp;
17261 ill->ill_saved_ire_cnt++;
17262 mutex_exit(&ill->ill_saved_ire_lock);
17263 }
17264 }
17265
17266 /*
17267 * Remove one entry from ill_saved_ire_mp.
17268 */
17269 void
17270 ill_remove_saved_ire(ill_t *ill, ire_t *ire)
17271 {
17272 mblk_t **mpp;
17273 mblk_t *mp;
17274 ifrt_t *ifrt;
17275
17276 /* Remove from ill_saved_ire_mp list if it is there */
17277 mutex_enter(&ill->ill_saved_ire_lock);
17278 for (mpp = &ill->ill_saved_ire_mp; *mpp != NULL;
17279 mpp = &(*mpp)->b_cont) {
17280 in6_addr_t gw_addr_v6;
17281
17282 /*
17283 * On a given ill, the tuple of address, gateway, mask,
17284 * ire_type, and zoneid is unique for each saved IRE.
17285 */
17286 mp = *mpp;
17287 ifrt = (ifrt_t *)mp->b_rptr;
17288 /* ire_gateway_addr_v6 can change - need lock */
17289 mutex_enter(&ire->ire_lock);
17290 gw_addr_v6 = ire->ire_gateway_addr_v6;
17291 mutex_exit(&ire->ire_lock);
17292
17293 if (ifrt->ifrt_zoneid != ire->ire_zoneid ||
17294 ifrt->ifrt_type != ire->ire_type)
17295 continue;
17296
17297 if (ill->ill_isv6 ?
17298 (IN6_ARE_ADDR_EQUAL(&ifrt->ifrt_v6addr,
17299 &ire->ire_addr_v6) &&
17300 IN6_ARE_ADDR_EQUAL(&ifrt->ifrt_v6gateway_addr,
17301 &gw_addr_v6) &&
17302 IN6_ARE_ADDR_EQUAL(&ifrt->ifrt_v6mask,
17303 &ire->ire_mask_v6)) :
17304 (ifrt->ifrt_addr == ire->ire_addr &&
17305 ifrt->ifrt_gateway_addr == ire->ire_gateway_addr &&
17306 ifrt->ifrt_mask == ire->ire_mask)) {
17307 *mpp = mp->b_cont;
17308 ill->ill_saved_ire_cnt--;
17309 freeb(mp);
17310 break;
17311 }
17312 }
17313 mutex_exit(&ill->ill_saved_ire_lock);
17314 }
17315
17316 /*
17317 * IP multirouting broadcast routes handling
17318 * Append CGTP broadcast IREs to regular ones created
17319 * at ifconfig time.
17320 * The usage is a route add <cgtp_bc> <nic_bc> -multirt i.e., both
17321 * the destination and the gateway are broadcast addresses.
17322 * The caller has verified that the destination is an IRE_BROADCAST and that
17323 * RTF_MULTIRT was set. Here if the gateway is a broadcast address, then
17324 * we create a MULTIRT IRE_BROADCAST.
17325 * Note that the IRE_HOST created by ire_rt_add doesn't get found by anything
17326 * since the IRE_BROADCAST takes precedence; ire_add_v4 does head insertion.
17327 */
17328 static void
17329 ip_cgtp_bcast_add(ire_t *ire, ip_stack_t *ipst)
17330 {
17331 ire_t *ire_prim;
17332
17333 ASSERT(ire != NULL);
17334
17335 ire_prim = ire_ftable_lookup_v4(ire->ire_gateway_addr, 0, 0,
17336 IRE_BROADCAST, NULL, ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst,
17337 NULL);
17338 if (ire_prim != NULL) {
17339 /*
17340 * We are in the special case of broadcasts for
17341 * CGTP. We add an IRE_BROADCAST that holds
17342 * the RTF_MULTIRT flag, the destination
17343 * address and the low level
17344 * info of ire_prim. In other words, CGTP
17345 * broadcast is added to the redundant ipif.
17346 */
17347 ill_t *ill_prim;
17348 ire_t *bcast_ire;
17349
17350 ill_prim = ire_prim->ire_ill;
17351
17352 ip2dbg(("ip_cgtp_filter_bcast_add: ire_prim %p, ill_prim %p\n",
17353 (void *)ire_prim, (void *)ill_prim));
17354
17355 bcast_ire = ire_create(
17356 (uchar_t *)&ire->ire_addr,
17357 (uchar_t *)&ip_g_all_ones,
17358 (uchar_t *)&ire->ire_gateway_addr,
17359 IRE_BROADCAST,
17360 ill_prim,
17361 GLOBAL_ZONEID, /* CGTP is only for the global zone */
17362 ire->ire_flags | RTF_KERNEL,
17363 NULL,
17364 ipst);
17365
17366 /*
17367 * Here we assume that ire_add does head insertion so that
17368 * the added IRE_BROADCAST comes before the existing IRE_HOST.
17369 */
17370 if (bcast_ire != NULL) {
17371 if (ire->ire_flags & RTF_SETSRC) {
17372 bcast_ire->ire_setsrc_addr =
17373 ire->ire_setsrc_addr;
17374 }
17375 bcast_ire = ire_add(bcast_ire);
17376 if (bcast_ire != NULL) {
17377 ip2dbg(("ip_cgtp_filter_bcast_add: "
17378 "added bcast_ire %p\n",
17379 (void *)bcast_ire));
17380
17381 ill_save_ire(ill_prim, bcast_ire);
17382 ire_refrele(bcast_ire);
17383 }
17384 }
17385 ire_refrele(ire_prim);
17386 }
17387 }
17388
17389 /*
17390 * IP multirouting broadcast routes handling
17391 * Remove the broadcast ire.
17392 * The usage is a route delete <cgtp_bc> <nic_bc> -multirt i.e., both
17393 * the destination and the gateway are broadcast addresses.
17394 * The caller has only verified that RTF_MULTIRT was set. We check
17395 * that the destination is broadcast and that the gateway is a broadcast
17396 * address, and if so delete the IRE added by ip_cgtp_bcast_add().
17397 */
17398 static void
17399 ip_cgtp_bcast_delete(ire_t *ire, ip_stack_t *ipst)
17400 {
17401 ASSERT(ire != NULL);
17402
17403 if (ip_type_v4(ire->ire_addr, ipst) == IRE_BROADCAST) {
17404 ire_t *ire_prim;
17405
17406 ire_prim = ire_ftable_lookup_v4(ire->ire_gateway_addr, 0, 0,
17407 IRE_BROADCAST, NULL, ALL_ZONES, NULL, MATCH_IRE_TYPE, 0,
17408 ipst, NULL);
17409 if (ire_prim != NULL) {
17410 ill_t *ill_prim;
17411 ire_t *bcast_ire;
17412
17413 ill_prim = ire_prim->ire_ill;
17414
17415 ip2dbg(("ip_cgtp_filter_bcast_delete: "
17416 "ire_prim %p, ill_prim %p\n",
17417 (void *)ire_prim, (void *)ill_prim));
17418
17419 bcast_ire = ire_ftable_lookup_v4(ire->ire_addr, 0,
17420 ire->ire_gateway_addr, IRE_BROADCAST,
17421 ill_prim, ALL_ZONES, NULL,
17422 MATCH_IRE_TYPE | MATCH_IRE_GW | MATCH_IRE_ILL |
17423 MATCH_IRE_MASK, 0, ipst, NULL);
17424
17425 if (bcast_ire != NULL) {
17426 ip2dbg(("ip_cgtp_filter_bcast_delete: "
17427 "looked up bcast_ire %p\n",
17428 (void *)bcast_ire));
17429 ill_remove_saved_ire(bcast_ire->ire_ill,
17430 bcast_ire);
17431 ire_delete(bcast_ire);
17432 ire_refrele(bcast_ire);
17433 }
17434 ire_refrele(ire_prim);
17435 }
17436 }
17437 }
17438
17439 /*
17440 * Derive an interface id from the link layer address.
17441 * Knows about IEEE 802 and IEEE EUI-64 mappings.
17442 */
17443 static void
17444 ip_ether_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17445 {
17446 char *addr;
17447
17448 /*
17449 * Note that some IPv6 interfaces get plumbed over links that claim to
17450 * be DL_ETHER, but don't actually have Ethernet MAC addresses (e.g.
17451 * PPP links). The ETHERADDRL check here ensures that we only set the
17452 * interface ID on IPv6 interfaces above links that actually have real
17453 * Ethernet addresses.
17454 */
17455 if (ill->ill_phys_addr_length == ETHERADDRL) {
17456 /* Form EUI-64 like address */
17457 addr = (char *)&v6addr->s6_addr32[2];
17458 bcopy(ill->ill_phys_addr, addr, 3);
17459 addr[0] ^= 0x2; /* Toggle Universal/Local bit */
17460 addr[3] = (char)0xff;
17461 addr[4] = (char)0xfe;
17462 bcopy(ill->ill_phys_addr + 3, addr + 5, 3);
17463 }
17464 }
17465
17466 /* ARGSUSED */
17467 static void
17468 ip_nodef_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17469 {
17470 }
17471
17472 typedef struct ipmp_ifcookie {
17473 uint32_t ic_hostid;
17474 char ic_ifname[LIFNAMSIZ];
17475 char ic_zonename[ZONENAME_MAX];
17476 } ipmp_ifcookie_t;
17477
17478 /*
17479 * Construct a pseudo-random interface ID for the IPMP interface that's both
17480 * predictable and (almost) guaranteed to be unique.
17481 */
17482 static void
17483 ip_ipmp_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17484 {
17485 zone_t *zp;
17486 uint8_t *addr;
17487 uchar_t hash[16];
17488 ulong_t hostid;
17489 MD5_CTX ctx;
17490 ipmp_ifcookie_t ic = { 0 };
17491
17492 ASSERT(IS_IPMP(ill));
17493
17494 (void) ddi_strtoul(hw_serial, NULL, 10, &hostid);
17495 ic.ic_hostid = htonl((uint32_t)hostid);
17496
17497 (void) strlcpy(ic.ic_ifname, ill->ill_name, LIFNAMSIZ);
17498
17499 if ((zp = zone_find_by_id(ill->ill_zoneid)) != NULL) {
17500 (void) strlcpy(ic.ic_zonename, zp->zone_name, ZONENAME_MAX);
17501 zone_rele(zp);
17502 }
17503
17504 MD5Init(&ctx);
17505 MD5Update(&ctx, &ic, sizeof (ic));
17506 MD5Final(hash, &ctx);
17507
17508 /*
17509 * Map the hash to an interface ID per the basic approach in RFC3041.
17510 */
17511 addr = &v6addr->s6_addr8[8];
17512 bcopy(hash + 8, addr, sizeof (uint64_t));
17513 addr[0] &= ~0x2; /* set local bit */
17514 }
17515
17516 /*
17517 * Map the multicast in6_addr_t in m_ip6addr to the physaddr for ethernet.
17518 */
17519 static void
17520 ip_ether_v6_mapping(ill_t *ill, uchar_t *m_ip6addr, uchar_t *m_physaddr)
17521 {
17522 phyint_t *phyi = ill->ill_phyint;
17523
17524 /*
17525 * Check PHYI_MULTI_BCAST and length of physical
17526 * address to determine if we use the mapping or the
17527 * broadcast address.
17528 */
17529 if ((phyi->phyint_flags & PHYI_MULTI_BCAST) != 0 ||
17530 ill->ill_phys_addr_length != ETHERADDRL) {
17531 ip_mbcast_mapping(ill, m_ip6addr, m_physaddr);
17532 return;
17533 }
17534 m_physaddr[0] = 0x33;
17535 m_physaddr[1] = 0x33;
17536 m_physaddr[2] = m_ip6addr[12];
17537 m_physaddr[3] = m_ip6addr[13];
17538 m_physaddr[4] = m_ip6addr[14];
17539 m_physaddr[5] = m_ip6addr[15];
17540 }
17541
17542 /*
17543 * Map the multicast ipaddr_t in m_ipaddr to the physaddr for ethernet.
17544 */
17545 static void
17546 ip_ether_v4_mapping(ill_t *ill, uchar_t *m_ipaddr, uchar_t *m_physaddr)
17547 {
17548 phyint_t *phyi = ill->ill_phyint;
17549
17550 /*
17551 * Check PHYI_MULTI_BCAST and length of physical
17552 * address to determine if we use the mapping or the
17553 * broadcast address.
17554 */
17555 if ((phyi->phyint_flags & PHYI_MULTI_BCAST) != 0 ||
17556 ill->ill_phys_addr_length != ETHERADDRL) {
17557 ip_mbcast_mapping(ill, m_ipaddr, m_physaddr);
17558 return;
17559 }
17560 m_physaddr[0] = 0x01;
17561 m_physaddr[1] = 0x00;
17562 m_physaddr[2] = 0x5e;
17563 m_physaddr[3] = m_ipaddr[1] & 0x7f;
17564 m_physaddr[4] = m_ipaddr[2];
17565 m_physaddr[5] = m_ipaddr[3];
17566 }
17567
17568 /* ARGSUSED */
17569 static void
17570 ip_mbcast_mapping(ill_t *ill, uchar_t *m_ipaddr, uchar_t *m_physaddr)
17571 {
17572 /*
17573 * for the MULTI_BCAST case and other cases when we want to
17574 * use the link-layer broadcast address for multicast.
17575 */
17576 uint8_t *bphys_addr;
17577 dl_unitdata_req_t *dlur;
17578
17579 dlur = (dl_unitdata_req_t *)ill->ill_bcast_mp->b_rptr;
17580 if (ill->ill_sap_length < 0) {
17581 bphys_addr = (uchar_t *)dlur +
17582 dlur->dl_dest_addr_offset;
17583 } else {
17584 bphys_addr = (uchar_t *)dlur +
17585 dlur->dl_dest_addr_offset + ill->ill_sap_length;
17586 }
17587
17588 bcopy(bphys_addr, m_physaddr, ill->ill_phys_addr_length);
17589 }
17590
17591 /*
17592 * Derive IPoIB interface id from the link layer address.
17593 */
17594 static void
17595 ip_ib_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17596 {
17597 char *addr;
17598
17599 ASSERT(ill->ill_phys_addr_length == 20);
17600 addr = (char *)&v6addr->s6_addr32[2];
17601 bcopy(ill->ill_phys_addr + 12, addr, 8);
17602 /*
17603 * In IBA 1.1 timeframe, some vendors erroneously set the u/l bit
17604 * in the globally assigned EUI-64 GUID to 1, in violation of IEEE
17605 * rules. In these cases, the IBA considers these GUIDs to be in
17606 * "Modified EUI-64" format, and thus toggling the u/l bit is not
17607 * required; vendors are required not to assign global EUI-64's
17608 * that differ only in u/l bit values, thus guaranteeing uniqueness
17609 * of the interface identifier. Whether the GUID is in modified
17610 * or proper EUI-64 format, the ipv6 identifier must have the u/l
17611 * bit set to 1.
17612 */
17613 addr[0] |= 2; /* Set Universal/Local bit to 1 */
17614 }
17615
17616 /*
17617 * Map the multicast ipaddr_t in m_ipaddr to the physaddr for InfiniBand.
17618 * Note on mapping from multicast IP addresses to IPoIB multicast link
17619 * addresses. IPoIB multicast link addresses are based on IBA link addresses.
17620 * The format of an IPoIB multicast address is:
17621 *
17622 * 4 byte QPN Scope Sign. Pkey
17623 * +--------------------------------------------+
17624 * | 00FFFFFF | FF | 1X | X01B | Pkey | GroupID |
17625 * +--------------------------------------------+
17626 *
17627 * The Scope and Pkey components are properties of the IBA port and
17628 * network interface. They can be ascertained from the broadcast address.
17629 * The Sign. part is the signature, and is 401B for IPv4 and 601B for IPv6.
17630 */
17631 static void
17632 ip_ib_v4_mapping(ill_t *ill, uchar_t *m_ipaddr, uchar_t *m_physaddr)
17633 {
17634 static uint8_t ipv4_g_phys_ibmulti_addr[] = { 0x00, 0xff, 0xff, 0xff,
17635 0xff, 0x10, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
17636 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
17637 uint8_t *bphys_addr;
17638 dl_unitdata_req_t *dlur;
17639
17640 bcopy(ipv4_g_phys_ibmulti_addr, m_physaddr, ill->ill_phys_addr_length);
17641
17642 /*
17643 * RFC 4391: IPv4 MGID is 28-bit long.
17644 */
17645 m_physaddr[16] = m_ipaddr[0] & 0x0f;
17646 m_physaddr[17] = m_ipaddr[1];
17647 m_physaddr[18] = m_ipaddr[2];
17648 m_physaddr[19] = m_ipaddr[3];
17649
17650
17651 dlur = (dl_unitdata_req_t *)ill->ill_bcast_mp->b_rptr;
17652 if (ill->ill_sap_length < 0) {
17653 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset;
17654 } else {
17655 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset +
17656 ill->ill_sap_length;
17657 }
17658 /*
17659 * Now fill in the IBA scope/Pkey values from the broadcast address.
17660 */
17661 m_physaddr[5] = bphys_addr[5];
17662 m_physaddr[8] = bphys_addr[8];
17663 m_physaddr[9] = bphys_addr[9];
17664 }
17665
17666 static void
17667 ip_ib_v6_mapping(ill_t *ill, uchar_t *m_ipaddr, uchar_t *m_physaddr)
17668 {
17669 static uint8_t ipv4_g_phys_ibmulti_addr[] = { 0x00, 0xff, 0xff, 0xff,
17670 0xff, 0x10, 0x60, 0x1b, 0x00, 0x00, 0x00, 0x00,
17671 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
17672 uint8_t *bphys_addr;
17673 dl_unitdata_req_t *dlur;
17674
17675 bcopy(ipv4_g_phys_ibmulti_addr, m_physaddr, ill->ill_phys_addr_length);
17676
17677 /*
17678 * RFC 4391: IPv4 MGID is 80-bit long.
17679 */
17680 bcopy(&m_ipaddr[6], &m_physaddr[10], 10);
17681
17682 dlur = (dl_unitdata_req_t *)ill->ill_bcast_mp->b_rptr;
17683 if (ill->ill_sap_length < 0) {
17684 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset;
17685 } else {
17686 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset +
17687 ill->ill_sap_length;
17688 }
17689 /*
17690 * Now fill in the IBA scope/Pkey values from the broadcast address.
17691 */
17692 m_physaddr[5] = bphys_addr[5];
17693 m_physaddr[8] = bphys_addr[8];
17694 m_physaddr[9] = bphys_addr[9];
17695 }
17696
17697 /*
17698 * Derive IPv6 interface id from an IPv4 link-layer address (e.g. from an IPv4
17699 * tunnel). The IPv4 address simply get placed in the lower 4 bytes of the
17700 * IPv6 interface id. This is a suggested mechanism described in section 3.7
17701 * of RFC4213.
17702 */
17703 static void
17704 ip_ipv4_genv6intfid(ill_t *ill, uint8_t *physaddr, in6_addr_t *v6addr)
17705 {
17706 ASSERT(ill->ill_phys_addr_length == sizeof (ipaddr_t));
17707 v6addr->s6_addr32[2] = 0;
17708 bcopy(physaddr, &v6addr->s6_addr32[3], sizeof (ipaddr_t));
17709 }
17710
17711 /*
17712 * Derive IPv6 interface id from an IPv6 link-layer address (e.g. from an IPv6
17713 * tunnel). The lower 8 bytes of the IPv6 address simply become the interface
17714 * id.
17715 */
17716 static void
17717 ip_ipv6_genv6intfid(ill_t *ill, uint8_t *physaddr, in6_addr_t *v6addr)
17718 {
17719 in6_addr_t *v6lladdr = (in6_addr_t *)physaddr;
17720
17721 ASSERT(ill->ill_phys_addr_length == sizeof (in6_addr_t));
17722 bcopy(&v6lladdr->s6_addr32[2], &v6addr->s6_addr32[2], 8);
17723 }
17724
17725 static void
17726 ip_ipv6_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17727 {
17728 ip_ipv6_genv6intfid(ill, ill->ill_phys_addr, v6addr);
17729 }
17730
17731 static void
17732 ip_ipv6_v6destintfid(ill_t *ill, in6_addr_t *v6addr)
17733 {
17734 ip_ipv6_genv6intfid(ill, ill->ill_dest_addr, v6addr);
17735 }
17736
17737 static void
17738 ip_ipv4_v6intfid(ill_t *ill, in6_addr_t *v6addr)
17739 {
17740 ip_ipv4_genv6intfid(ill, ill->ill_phys_addr, v6addr);
17741 }
17742
17743 static void
17744 ip_ipv4_v6destintfid(ill_t *ill, in6_addr_t *v6addr)
17745 {
17746 ip_ipv4_genv6intfid(ill, ill->ill_dest_addr, v6addr);
17747 }
17748
17749 /*
17750 * Lookup an ill and verify that the zoneid has an ipif on that ill.
17751 * Returns an held ill, or NULL.
17752 */
17753 ill_t *
17754 ill_lookup_on_ifindex_zoneid(uint_t index, zoneid_t zoneid, boolean_t isv6,
17755 ip_stack_t *ipst)
17756 {
17757 ill_t *ill;
17758 ipif_t *ipif;
17759
17760 ill = ill_lookup_on_ifindex(index, isv6, ipst);
17761 if (ill == NULL)
17762 return (NULL);
17763
17764 mutex_enter(&ill->ill_lock);
17765 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
17766 if (IPIF_IS_CONDEMNED(ipif))
17767 continue;
17768 if (zoneid != ALL_ZONES && ipif->ipif_zoneid != zoneid &&
17769 ipif->ipif_zoneid != ALL_ZONES)
17770 continue;
17771
17772 mutex_exit(&ill->ill_lock);
17773 return (ill);
17774 }
17775 mutex_exit(&ill->ill_lock);
17776 ill_refrele(ill);
17777 return (NULL);
17778 }
17779
17780 /*
17781 * Return a pointer to an ipif_t given a combination of (ill_idx,ipif_id)
17782 * If a pointer to an ipif_t is returned then the caller will need to do
17783 * an ill_refrele().
17784 */
17785 ipif_t *
17786 ipif_getby_indexes(uint_t ifindex, uint_t lifidx, boolean_t isv6,
17787 ip_stack_t *ipst)
17788 {
17789 ipif_t *ipif;
17790 ill_t *ill;
17791
17792 ill = ill_lookup_on_ifindex(ifindex, isv6, ipst);
17793 if (ill == NULL)
17794 return (NULL);
17795
17796 mutex_enter(&ill->ill_lock);
17797 if (ill->ill_state_flags & ILL_CONDEMNED) {
17798 mutex_exit(&ill->ill_lock);
17799 ill_refrele(ill);
17800 return (NULL);
17801 }
17802
17803 for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next) {
17804 if (!IPIF_CAN_LOOKUP(ipif))
17805 continue;
17806 if (lifidx == ipif->ipif_id) {
17807 ipif_refhold_locked(ipif);
17808 break;
17809 }
17810 }
17811
17812 mutex_exit(&ill->ill_lock);
17813 ill_refrele(ill);
17814 return (ipif);
17815 }
17816
17817 /*
17818 * Set ill_inputfn based on the current know state.
17819 * This needs to be called when any of the factors taken into
17820 * account changes.
17821 */
17822 void
17823 ill_set_inputfn(ill_t *ill)
17824 {
17825 ip_stack_t *ipst = ill->ill_ipst;
17826
17827 if (ill->ill_isv6) {
17828 if (is_system_labeled())
17829 ill->ill_inputfn = ill_input_full_v6;
17830 else
17831 ill->ill_inputfn = ill_input_short_v6;
17832 } else {
17833 if (is_system_labeled())
17834 ill->ill_inputfn = ill_input_full_v4;
17835 else if (ill->ill_dhcpinit != 0)
17836 ill->ill_inputfn = ill_input_full_v4;
17837 else if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_RSVP].connf_head
17838 != NULL)
17839 ill->ill_inputfn = ill_input_full_v4;
17840 else if (ipst->ips_ip_cgtp_filter &&
17841 ipst->ips_ip_cgtp_filter_ops != NULL)
17842 ill->ill_inputfn = ill_input_full_v4;
17843 else
17844 ill->ill_inputfn = ill_input_short_v4;
17845 }
17846 }
17847
17848 /*
17849 * Re-evaluate ill_inputfn for all the IPv4 ills.
17850 * Used when RSVP and CGTP comes and goes.
17851 */
17852 void
17853 ill_set_inputfn_all(ip_stack_t *ipst)
17854 {
17855 ill_walk_context_t ctx;
17856 ill_t *ill;
17857
17858 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
17859 ill = ILL_START_WALK_V4(&ctx, ipst);
17860 for (; ill != NULL; ill = ill_next(&ctx, ill))
17861 ill_set_inputfn(ill);
17862
17863 rw_exit(&ipst->ips_ill_g_lock);
17864 }
17865
17866 /*
17867 * Set the physical address information for `ill' to the contents of the
17868 * dl_notify_ind_t pointed to by `mp'. Must be called as writer, and will be
17869 * asynchronous if `ill' cannot immediately be quiesced -- in which case
17870 * EINPROGRESS will be returned.
17871 */
17872 int
17873 ill_set_phys_addr(ill_t *ill, mblk_t *mp)
17874 {
17875 ipsq_t *ipsq = ill->ill_phyint->phyint_ipsq;
17876 dl_notify_ind_t *dlindp = (dl_notify_ind_t *)mp->b_rptr;
17877
17878 ASSERT(IAM_WRITER_IPSQ(ipsq));
17879
17880 if (dlindp->dl_data != DL_IPV6_LINK_LAYER_ADDR &&
17881 dlindp->dl_data != DL_CURR_DEST_ADDR &&
17882 dlindp->dl_data != DL_CURR_PHYS_ADDR) {
17883 /* Changing DL_IPV6_TOKEN is not yet supported */
17884 return (0);
17885 }
17886
17887 /*
17888 * We need to store up to two copies of `mp' in `ill'. Due to the
17889 * design of ipsq_pending_mp_add(), we can't pass them as separate
17890 * arguments to ill_set_phys_addr_tail(). Instead, chain them
17891 * together here, then pull 'em apart in ill_set_phys_addr_tail().
17892 */
17893 if ((mp = copyb(mp)) == NULL || (mp->b_cont = copyb(mp)) == NULL) {
17894 freemsg(mp);
17895 return (ENOMEM);
17896 }
17897
17898 ipsq_current_start(ipsq, ill->ill_ipif, 0);
17899
17900 /*
17901 * Since we'll only do a logical down, we can't rely on ipif_down
17902 * to turn on ILL_DOWN_IN_PROGRESS, or for the DL_BIND_ACK to reset
17903 * ILL_DOWN_IN_PROGRESS. We instead manage this separately for this
17904 * case, to quiesce ire's and nce's for ill_is_quiescent.
17905 */
17906 mutex_enter(&ill->ill_lock);
17907 ill->ill_state_flags |= ILL_DOWN_IN_PROGRESS;
17908 /* no more ire/nce addition allowed */
17909 mutex_exit(&ill->ill_lock);
17910
17911 /*
17912 * If we can quiesce the ill, then set the address. If not, then
17913 * ill_set_phys_addr_tail() will be called from ipif_ill_refrele_tail().
17914 */
17915 ill_down_ipifs(ill, B_TRUE);
17916 mutex_enter(&ill->ill_lock);
17917 if (!ill_is_quiescent(ill)) {
17918 /* call cannot fail since `conn_t *' argument is NULL */
17919 (void) ipsq_pending_mp_add(NULL, ill->ill_ipif, ill->ill_rq,
17920 mp, ILL_DOWN);
17921 mutex_exit(&ill->ill_lock);
17922 return (EINPROGRESS);
17923 }
17924 mutex_exit(&ill->ill_lock);
17925
17926 ill_set_phys_addr_tail(ipsq, ill->ill_rq, mp, NULL);
17927 return (0);
17928 }
17929
17930 /*
17931 * When the allowed-ips link property is set on the datalink, IP receives a
17932 * DL_NOTE_ALLOWED_IPS notification that is processed in ill_set_allowed_ips()
17933 * to initialize the ill_allowed_ips[] array in the ill_t. This array is then
17934 * used to vet addresses passed to ip_sioctl_addr() and to ensure that the
17935 * only IP addresses configured on the ill_t are those in the ill_allowed_ips[]
17936 * array.
17937 */
17938 void
17939 ill_set_allowed_ips(ill_t *ill, mblk_t *mp)
17940 {
17941 ipsq_t *ipsq = ill->ill_phyint->phyint_ipsq;
17942 dl_notify_ind_t *dlip = (dl_notify_ind_t *)mp->b_rptr;
17943 mac_protect_t *mrp;
17944 int i;
17945
17946 ASSERT(IAM_WRITER_IPSQ(ipsq));
17947 mrp = (mac_protect_t *)&dlip[1];
17948
17949 if (mrp->mp_ipaddrcnt == 0) { /* reset allowed-ips */
17950 kmem_free(ill->ill_allowed_ips,
17951 ill->ill_allowed_ips_cnt * sizeof (in6_addr_t));
17952 ill->ill_allowed_ips_cnt = 0;
17953 ill->ill_allowed_ips = NULL;
17954 mutex_enter(&ill->ill_phyint->phyint_lock);
17955 ill->ill_phyint->phyint_flags &= ~PHYI_L3PROTECT;
17956 mutex_exit(&ill->ill_phyint->phyint_lock);
17957 return;
17958 }
17959
17960 if (ill->ill_allowed_ips != NULL) {
17961 kmem_free(ill->ill_allowed_ips,
17962 ill->ill_allowed_ips_cnt * sizeof (in6_addr_t));
17963 }
17964 ill->ill_allowed_ips_cnt = mrp->mp_ipaddrcnt;
17965 ill->ill_allowed_ips = kmem_alloc(
17966 ill->ill_allowed_ips_cnt * sizeof (in6_addr_t), KM_SLEEP);
17967 for (i = 0; i < mrp->mp_ipaddrcnt; i++)
17968 ill->ill_allowed_ips[i] = mrp->mp_ipaddrs[i].ip_addr;
17969
17970 mutex_enter(&ill->ill_phyint->phyint_lock);
17971 ill->ill_phyint->phyint_flags |= PHYI_L3PROTECT;
17972 mutex_exit(&ill->ill_phyint->phyint_lock);
17973 }
17974
17975 /*
17976 * Once the ill associated with `q' has quiesced, set its physical address
17977 * information to the values in `addrmp'. Note that two copies of `addrmp'
17978 * are passed (linked by b_cont), since we sometimes need to save two distinct
17979 * copies in the ill_t, and our context doesn't permit sleeping or allocation
17980 * failure (we'll free the other copy if it's not needed). Since the ill_t
17981 * is quiesced, we know any stale nce's with the old address information have
17982 * already been removed, so we don't need to call nce_flush().
17983 */
17984 /* ARGSUSED */
17985 static void
17986 ill_set_phys_addr_tail(ipsq_t *ipsq, queue_t *q, mblk_t *addrmp, void *dummy)
17987 {
17988 ill_t *ill = q->q_ptr;
17989 mblk_t *addrmp2 = unlinkb(addrmp);
17990 dl_notify_ind_t *dlindp = (dl_notify_ind_t *)addrmp->b_rptr;
17991 uint_t addrlen, addroff;
17992 int status;
17993
17994 ASSERT(IAM_WRITER_IPSQ(ipsq));
17995
17996 addroff = dlindp->dl_addr_offset;
17997 addrlen = dlindp->dl_addr_length - ABS(ill->ill_sap_length);
17998
17999 switch (dlindp->dl_data) {
18000 case DL_IPV6_LINK_LAYER_ADDR:
18001 ill_set_ndmp(ill, addrmp, addroff, addrlen);
18002 freemsg(addrmp2);
18003 break;
18004
18005 case DL_CURR_DEST_ADDR:
18006 freemsg(ill->ill_dest_addr_mp);
18007 ill->ill_dest_addr = addrmp->b_rptr + addroff;
18008 ill->ill_dest_addr_mp = addrmp;
18009 if (ill->ill_isv6) {
18010 ill_setdesttoken(ill);
18011 ipif_setdestlinklocal(ill->ill_ipif);
18012 }
18013 freemsg(addrmp2);
18014 break;
18015
18016 case DL_CURR_PHYS_ADDR:
18017 freemsg(ill->ill_phys_addr_mp);
18018 ill->ill_phys_addr = addrmp->b_rptr + addroff;
18019 ill->ill_phys_addr_mp = addrmp;
18020 ill->ill_phys_addr_length = addrlen;
18021 if (ill->ill_isv6)
18022 ill_set_ndmp(ill, addrmp2, addroff, addrlen);
18023 else
18024 freemsg(addrmp2);
18025 if (ill->ill_isv6) {
18026 ill_setdefaulttoken(ill);
18027 ipif_setlinklocal(ill->ill_ipif);
18028 }
18029 break;
18030 default:
18031 ASSERT(0);
18032 }
18033
18034 /*
18035 * reset ILL_DOWN_IN_PROGRESS so that we can successfully add ires
18036 * as we bring the ipifs up again.
18037 */
18038 mutex_enter(&ill->ill_lock);
18039 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
18040 mutex_exit(&ill->ill_lock);
18041 /*
18042 * If there are ipifs to bring up, ill_up_ipifs() will return
18043 * EINPROGRESS, and ipsq_current_finish() will be called by
18044 * ip_rput_dlpi_writer() or arp_bringup_done() when the last ipif is
18045 * brought up.
18046 */
18047 status = ill_up_ipifs(ill, q, addrmp);
18048 if (status != EINPROGRESS)
18049 ipsq_current_finish(ipsq);
18050 }
18051
18052 /*
18053 * Helper routine for setting the ill_nd_lla fields.
18054 */
18055 void
18056 ill_set_ndmp(ill_t *ill, mblk_t *ndmp, uint_t addroff, uint_t addrlen)
18057 {
18058 freemsg(ill->ill_nd_lla_mp);
18059 ill->ill_nd_lla = ndmp->b_rptr + addroff;
18060 ill->ill_nd_lla_mp = ndmp;
18061 ill->ill_nd_lla_len = addrlen;
18062 }
18063
18064 /*
18065 * Replumb the ill.
18066 */
18067 int
18068 ill_replumb(ill_t *ill, mblk_t *mp)
18069 {
18070 ipsq_t *ipsq = ill->ill_phyint->phyint_ipsq;
18071
18072 ASSERT(IAM_WRITER_IPSQ(ipsq));
18073
18074 ipsq_current_start(ipsq, ill->ill_ipif, 0);
18075
18076 /*
18077 * If we can quiesce the ill, then continue. If not, then
18078 * ill_replumb_tail() will be called from ipif_ill_refrele_tail().
18079 */
18080 ill_down_ipifs(ill, B_FALSE);
18081
18082 mutex_enter(&ill->ill_lock);
18083 if (!ill_is_quiescent(ill)) {
18084 /* call cannot fail since `conn_t *' argument is NULL */
18085 (void) ipsq_pending_mp_add(NULL, ill->ill_ipif, ill->ill_rq,
18086 mp, ILL_DOWN);
18087 mutex_exit(&ill->ill_lock);
18088 return (EINPROGRESS);
18089 }
18090 mutex_exit(&ill->ill_lock);
18091
18092 ill_replumb_tail(ipsq, ill->ill_rq, mp, NULL);
18093 return (0);
18094 }
18095
18096 /* ARGSUSED */
18097 static void
18098 ill_replumb_tail(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy)
18099 {
18100 ill_t *ill = q->q_ptr;
18101 int err;
18102 conn_t *connp = NULL;
18103
18104 ASSERT(IAM_WRITER_IPSQ(ipsq));
18105 freemsg(ill->ill_replumb_mp);
18106 ill->ill_replumb_mp = copyb(mp);
18107
18108 if (ill->ill_replumb_mp == NULL) {
18109 /* out of memory */
18110 ipsq_current_finish(ipsq);
18111 return;
18112 }
18113
18114 mutex_enter(&ill->ill_lock);
18115 ill->ill_up_ipifs = ipsq_pending_mp_add(NULL, ill->ill_ipif,
18116 ill->ill_rq, ill->ill_replumb_mp, 0);
18117 mutex_exit(&ill->ill_lock);
18118
18119 if (!ill->ill_up_ipifs) {
18120 /* already closing */
18121 ipsq_current_finish(ipsq);
18122 return;
18123 }
18124 ill->ill_replumbing = 1;
18125 err = ill_down_ipifs_tail(ill);
18126
18127 /*
18128 * Successfully quiesced and brought down the interface, now we send
18129 * the DL_NOTE_REPLUMB_DONE message down to the driver. Reuse the
18130 * DL_NOTE_REPLUMB message.
18131 */
18132 mp = mexchange(NULL, mp, sizeof (dl_notify_conf_t), M_PROTO,
18133 DL_NOTIFY_CONF);
18134 ASSERT(mp != NULL);
18135 ((dl_notify_conf_t *)mp->b_rptr)->dl_notification =
18136 DL_NOTE_REPLUMB_DONE;
18137 ill_dlpi_send(ill, mp);
18138
18139 /*
18140 * For IPv4, we would usually get EINPROGRESS because the ETHERTYPE_ARP
18141 * streams have to be unbound. When all the DLPI exchanges are done,
18142 * ipsq_current_finish() will be called by arp_bringup_done(). The
18143 * remainder of ipif bringup via ill_up_ipifs() will also be done in
18144 * arp_bringup_done().
18145 */
18146 ASSERT(ill->ill_replumb_mp != NULL);
18147 if (err == EINPROGRESS)
18148 return;
18149 else
18150 ill->ill_replumb_mp = ipsq_pending_mp_get(ipsq, &connp);
18151 ASSERT(connp == NULL);
18152 if (err == 0 && ill->ill_replumb_mp != NULL &&
18153 ill_up_ipifs(ill, q, ill->ill_replumb_mp) == EINPROGRESS) {
18154 return;
18155 }
18156 ipsq_current_finish(ipsq);
18157 }
18158
18159 /*
18160 * Issue ioctl `cmd' on `lh'; caller provides the initial payload in `buf'
18161 * which is `bufsize' bytes. On success, zero is returned and `buf' updated
18162 * as per the ioctl. On failure, an errno is returned.
18163 */
18164 static int
18165 ip_ioctl(ldi_handle_t lh, int cmd, void *buf, uint_t bufsize, cred_t *cr)
18166 {
18167 int rval;
18168 struct strioctl iocb;
18169
18170 iocb.ic_cmd = cmd;
18171 iocb.ic_timout = 15;
18172 iocb.ic_len = bufsize;
18173 iocb.ic_dp = buf;
18174
18175 return (ldi_ioctl(lh, I_STR, (intptr_t)&iocb, FKIOCTL, cr, &rval));
18176 }
18177
18178 /*
18179 * Issue an SIOCGLIFCONF for address family `af' and store the result into a
18180 * dynamically-allocated `lifcp' that will be `bufsizep' bytes on success.
18181 */
18182 static int
18183 ip_lifconf_ioctl(ldi_handle_t lh, int af, struct lifconf *lifcp,
18184 uint_t *bufsizep, cred_t *cr)
18185 {
18186 int err;
18187 struct lifnum lifn;
18188
18189 bzero(&lifn, sizeof (lifn));
18190 lifn.lifn_family = af;
18191 lifn.lifn_flags = LIFC_UNDER_IPMP;
18192
18193 if ((err = ip_ioctl(lh, SIOCGLIFNUM, &lifn, sizeof (lifn), cr)) != 0)
18194 return (err);
18195
18196 /*
18197 * Pad the interface count to account for additional interfaces that
18198 * may have been configured between the SIOCGLIFNUM and SIOCGLIFCONF.
18199 */
18200 lifn.lifn_count += 4;
18201 bzero(lifcp, sizeof (*lifcp));
18202 lifcp->lifc_flags = LIFC_UNDER_IPMP;
18203 lifcp->lifc_family = af;
18204 lifcp->lifc_len = *bufsizep = lifn.lifn_count * sizeof (struct lifreq);
18205 lifcp->lifc_buf = kmem_zalloc(*bufsizep, KM_SLEEP);
18206
18207 err = ip_ioctl(lh, SIOCGLIFCONF, lifcp, sizeof (*lifcp), cr);
18208 if (err != 0) {
18209 kmem_free(lifcp->lifc_buf, *bufsizep);
18210 return (err);
18211 }
18212
18213 return (0);
18214 }
18215
18216 /*
18217 * Helper for ip_interface_cleanup() that removes the loopback interface.
18218 */
18219 static void
18220 ip_loopback_removeif(ldi_handle_t lh, boolean_t isv6, cred_t *cr)
18221 {
18222 int err;
18223 struct lifreq lifr;
18224
18225 bzero(&lifr, sizeof (lifr));
18226 (void) strcpy(lifr.lifr_name, ipif_loopback_name);
18227
18228 /*
18229 * Attempt to remove the interface. It may legitimately not exist
18230 * (e.g. the zone administrator unplumbed it), so ignore ENXIO.
18231 */
18232 err = ip_ioctl(lh, SIOCLIFREMOVEIF, &lifr, sizeof (lifr), cr);
18233 if (err != 0 && err != ENXIO) {
18234 ip0dbg(("ip_loopback_removeif: IP%s SIOCLIFREMOVEIF failed: "
18235 "error %d\n", isv6 ? "v6" : "v4", err));
18236 }
18237 }
18238
18239 /*
18240 * Helper for ip_interface_cleanup() that ensures no IP interfaces are in IPMP
18241 * groups and that IPMP data addresses are down. These conditions must be met
18242 * so that IPMP interfaces can be I_PUNLINK'd, as per ip_sioctl_plink_ipmp().
18243 */
18244 static void
18245 ip_ipmp_cleanup(ldi_handle_t lh, boolean_t isv6, cred_t *cr)
18246 {
18247 int af = isv6 ? AF_INET6 : AF_INET;
18248 int i, nifs;
18249 int err;
18250 uint_t bufsize;
18251 uint_t lifrsize = sizeof (struct lifreq);
18252 struct lifconf lifc;
18253 struct lifreq *lifrp;
18254
18255 if ((err = ip_lifconf_ioctl(lh, af, &lifc, &bufsize, cr)) != 0) {
18256 cmn_err(CE_WARN, "ip_ipmp_cleanup: cannot get interface list "
18257 "(error %d); any IPMP interfaces cannot be shutdown", err);
18258 return;
18259 }
18260
18261 nifs = lifc.lifc_len / lifrsize;
18262 for (lifrp = lifc.lifc_req, i = 0; i < nifs; i++, lifrp++) {
18263 err = ip_ioctl(lh, SIOCGLIFFLAGS, lifrp, lifrsize, cr);
18264 if (err != 0) {
18265 cmn_err(CE_WARN, "ip_ipmp_cleanup: %s: cannot get "
18266 "flags: error %d", lifrp->lifr_name, err);
18267 continue;
18268 }
18269
18270 if (lifrp->lifr_flags & IFF_IPMP) {
18271 if ((lifrp->lifr_flags & (IFF_UP|IFF_DUPLICATE)) == 0)
18272 continue;
18273
18274 lifrp->lifr_flags &= ~IFF_UP;
18275 err = ip_ioctl(lh, SIOCSLIFFLAGS, lifrp, lifrsize, cr);
18276 if (err != 0) {
18277 cmn_err(CE_WARN, "ip_ipmp_cleanup: %s: cannot "
18278 "bring down (error %d); IPMP interface may "
18279 "not be shutdown", lifrp->lifr_name, err);
18280 }
18281
18282 /*
18283 * Check if IFF_DUPLICATE is still set -- and if so,
18284 * reset the address to clear it.
18285 */
18286 err = ip_ioctl(lh, SIOCGLIFFLAGS, lifrp, lifrsize, cr);
18287 if (err != 0 || !(lifrp->lifr_flags & IFF_DUPLICATE))
18288 continue;
18289
18290 err = ip_ioctl(lh, SIOCGLIFADDR, lifrp, lifrsize, cr);
18291 if (err != 0 || (err = ip_ioctl(lh, SIOCGLIFADDR,
18292 lifrp, lifrsize, cr)) != 0) {
18293 cmn_err(CE_WARN, "ip_ipmp_cleanup: %s: cannot "
18294 "reset DAD (error %d); IPMP interface may "
18295 "not be shutdown", lifrp->lifr_name, err);
18296 }
18297 continue;
18298 }
18299
18300 if (strchr(lifrp->lifr_name, IPIF_SEPARATOR_CHAR) == 0) {
18301 lifrp->lifr_groupname[0] = '\0';
18302 if ((err = ip_ioctl(lh, SIOCSLIFGROUPNAME, lifrp,
18303 lifrsize, cr)) != 0) {
18304 cmn_err(CE_WARN, "ip_ipmp_cleanup: %s: cannot "
18305 "leave IPMP group (error %d); associated "
18306 "IPMP interface may not be shutdown",
18307 lifrp->lifr_name, err);
18308 continue;
18309 }
18310 }
18311 }
18312
18313 kmem_free(lifc.lifc_buf, bufsize);
18314 }
18315
18316 #define UDPDEV "/devices/pseudo/udp@0:udp"
18317 #define UDP6DEV "/devices/pseudo/udp6@0:udp6"
18318
18319 /*
18320 * Remove the loopback interfaces and prep the IPMP interfaces to be torn down.
18321 * Non-loopback interfaces are either I_LINK'd or I_PLINK'd; the former go away
18322 * when the user-level processes in the zone are killed and the latter are
18323 * cleaned up by str_stack_shutdown().
18324 */
18325 void
18326 ip_interface_cleanup(ip_stack_t *ipst)
18327 {
18328 ldi_handle_t lh;
18329 ldi_ident_t li;
18330 cred_t *cr;
18331 int err;
18332 int i;
18333 char *devs[] = { UDP6DEV, UDPDEV };
18334 netstackid_t stackid = ipst->ips_netstack->netstack_stackid;
18335
18336 if ((err = ldi_ident_from_major(ddi_name_to_major("ip"), &li)) != 0) {
18337 cmn_err(CE_WARN, "ip_interface_cleanup: cannot get ldi ident:"
18338 " error %d", err);
18339 return;
18340 }
18341
18342 cr = zone_get_kcred(netstackid_to_zoneid(stackid));
18343 ASSERT(cr != NULL);
18344
18345 /*
18346 * NOTE: loop executes exactly twice and is hardcoded to know that the
18347 * first iteration is IPv6. (Unrolling yields repetitious code, hence
18348 * the loop.)
18349 */
18350 for (i = 0; i < 2; i++) {
18351 err = ldi_open_by_name(devs[i], FREAD|FWRITE, cr, &lh, li);
18352 if (err != 0) {
18353 cmn_err(CE_WARN, "ip_interface_cleanup: cannot open %s:"
18354 " error %d", devs[i], err);
18355 continue;
18356 }
18357
18358 ip_loopback_removeif(lh, i == 0, cr);
18359 ip_ipmp_cleanup(lh, i == 0, cr);
18360
18361 (void) ldi_close(lh, FREAD|FWRITE, cr);
18362 }
18363
18364 ldi_ident_release(li);
18365 crfree(cr);
18366 }
18367
18368 /*
18369 * This needs to be in-sync with nic_event_t definition
18370 */
18371 static const char *
18372 ill_hook_event2str(nic_event_t event)
18373 {
18374 switch (event) {
18375 case NE_PLUMB:
18376 return ("PLUMB");
18377 case NE_UNPLUMB:
18378 return ("UNPLUMB");
18379 case NE_UP:
18380 return ("UP");
18381 case NE_DOWN:
18382 return ("DOWN");
18383 case NE_ADDRESS_CHANGE:
18384 return ("ADDRESS_CHANGE");
18385 case NE_LIF_UP:
18386 return ("LIF_UP");
18387 case NE_LIF_DOWN:
18388 return ("LIF_DOWN");
18389 case NE_IFINDEX_CHANGE:
18390 return ("IFINDEX_CHANGE");
18391 default:
18392 return ("UNKNOWN");
18393 }
18394 }
18395
18396 void
18397 ill_nic_event_dispatch(ill_t *ill, lif_if_t lif, nic_event_t event,
18398 nic_event_data_t data, size_t datalen)
18399 {
18400 ip_stack_t *ipst = ill->ill_ipst;
18401 hook_nic_event_int_t *info;
18402 const char *str = NULL;
18403
18404 /* create a new nic event info */
18405 if ((info = kmem_alloc(sizeof (*info), KM_NOSLEEP)) == NULL)
18406 goto fail;
18407
18408 info->hnei_event.hne_nic = ill->ill_phyint->phyint_ifindex;
18409 info->hnei_event.hne_lif = lif;
18410 info->hnei_event.hne_event = event;
18411 info->hnei_event.hne_protocol = ill->ill_isv6 ?
18412 ipst->ips_ipv6_net_data : ipst->ips_ipv4_net_data;
18413 info->hnei_event.hne_data = NULL;
18414 info->hnei_event.hne_datalen = 0;
18415 info->hnei_stackid = ipst->ips_netstack->netstack_stackid;
18416
18417 if (data != NULL && datalen != 0) {
18418 info->hnei_event.hne_data = kmem_alloc(datalen, KM_NOSLEEP);
18419 if (info->hnei_event.hne_data == NULL)
18420 goto fail;
18421 bcopy(data, info->hnei_event.hne_data, datalen);
18422 info->hnei_event.hne_datalen = datalen;
18423 }
18424
18425 if (ddi_taskq_dispatch(eventq_queue_nic, ip_ne_queue_func, info,
18426 DDI_NOSLEEP) == DDI_SUCCESS)
18427 return;
18428
18429 fail:
18430 if (info != NULL) {
18431 if (info->hnei_event.hne_data != NULL) {
18432 kmem_free(info->hnei_event.hne_data,
18433 info->hnei_event.hne_datalen);
18434 }
18435 kmem_free(info, sizeof (hook_nic_event_t));
18436 }
18437 str = ill_hook_event2str(event);
18438 ip2dbg(("ill_nic_event_dispatch: could not dispatch %s nic event "
18439 "information for %s (ENOMEM)\n", str, ill->ill_name));
18440 }
18441
18442 static int
18443 ipif_arp_up_done_tail(ipif_t *ipif, enum ip_resolver_action res_act)
18444 {
18445 int err = 0;
18446 const in_addr_t *addr = NULL;
18447 nce_t *nce = NULL;
18448 ill_t *ill = ipif->ipif_ill;
18449 ill_t *bound_ill;
18450 boolean_t added_ipif = B_FALSE;
18451 uint16_t state;
18452 uint16_t flags;
18453
18454 DTRACE_PROBE3(ipif__downup, char *, "ipif_arp_up_done_tail",
18455 ill_t *, ill, ipif_t *, ipif);
18456 if (ipif->ipif_lcl_addr != INADDR_ANY) {
18457 addr = &ipif->ipif_lcl_addr;
18458 }
18459
18460 if ((ipif->ipif_flags & IPIF_UNNUMBERED) || addr == NULL) {
18461 if (res_act != Res_act_initial)
18462 return (EINVAL);
18463 }
18464
18465 if (addr != NULL) {
18466 ipmp_illgrp_t *illg = ill->ill_grp;
18467
18468 /* add unicast nce for the local addr */
18469
18470 if (IS_IPMP(ill)) {
18471 /*
18472 * If we're here via ipif_up(), then the ipif
18473 * won't be bound yet -- add it to the group,
18474 * which will bind it if possible. (We would
18475 * add it in ipif_up(), but deleting on failure
18476 * there is gruesome.) If we're here via
18477 * ipmp_ill_bind_ipif(), then the ipif has
18478 * already been added to the group and we
18479 * just need to use the binding.
18480 */
18481 if ((bound_ill = ipmp_ipif_bound_ill(ipif)) == NULL) {
18482 bound_ill = ipmp_illgrp_add_ipif(illg, ipif);
18483 if (bound_ill == NULL) {
18484 /*
18485 * We couldn't bind the ipif to an ill
18486 * yet, so we have nothing to publish.
18487 * Mark the address as ready and return.
18488 */
18489 ipif->ipif_addr_ready = 1;
18490 return (0);
18491 }
18492 added_ipif = B_TRUE;
18493 }
18494 } else {
18495 bound_ill = ill;
18496 }
18497
18498 flags = (NCE_F_MYADDR | NCE_F_PUBLISH | NCE_F_AUTHORITY |
18499 NCE_F_NONUD);
18500 /*
18501 * If this is an initial bring-up (or the ipif was never
18502 * completely brought up), do DAD. Otherwise, we're here
18503 * because IPMP has rebound an address to this ill: send
18504 * unsolicited advertisements (ARP announcements) to
18505 * inform others.
18506 */
18507 if (res_act == Res_act_initial || !ipif->ipif_addr_ready) {
18508 state = ND_UNCHANGED; /* compute in nce_add_common() */
18509 } else {
18510 state = ND_REACHABLE;
18511 flags |= NCE_F_UNSOL_ADV;
18512 }
18513
18514 retry:
18515 err = nce_lookup_then_add_v4(ill,
18516 bound_ill->ill_phys_addr, bound_ill->ill_phys_addr_length,
18517 addr, flags, state, &nce);
18518
18519 /*
18520 * note that we may encounter EEXIST if we are moving
18521 * the nce as a result of a rebind operation.
18522 */
18523 switch (err) {
18524 case 0:
18525 ipif->ipif_added_nce = 1;
18526 nce->nce_ipif_cnt++;
18527 break;
18528 case EEXIST:
18529 ip1dbg(("ipif_arp_up: NCE already exists for %s\n",
18530 ill->ill_name));
18531 if (!NCE_MYADDR(nce->nce_common)) {
18532 /*
18533 * A leftover nce from before this address
18534 * existed
18535 */
18536 ncec_delete(nce->nce_common);
18537 nce_refrele(nce);
18538 nce = NULL;
18539 goto retry;
18540 }
18541 if ((ipif->ipif_flags & IPIF_POINTOPOINT) == 0) {
18542 nce_refrele(nce);
18543 nce = NULL;
18544 ip1dbg(("ipif_arp_up: NCE already exists "
18545 "for %s:%u\n", ill->ill_name,
18546 ipif->ipif_id));
18547 goto arp_up_done;
18548 }
18549 /*
18550 * Duplicate local addresses are permissible for
18551 * IPIF_POINTOPOINT interfaces which will get marked
18552 * IPIF_UNNUMBERED later in
18553 * ip_addr_availability_check().
18554 *
18555 * The nce_ipif_cnt field tracks the number of
18556 * ipifs that have nce_addr as their local address.
18557 */
18558 ipif->ipif_addr_ready = 1;
18559 ipif->ipif_added_nce = 1;
18560 nce->nce_ipif_cnt++;
18561 err = 0;
18562 break;
18563 default:
18564 ASSERT(nce == NULL);
18565 goto arp_up_done;
18566 }
18567 if (arp_no_defense) {
18568 if ((ipif->ipif_flags & IPIF_UP) &&
18569 !ipif->ipif_addr_ready)
18570 ipif_up_notify(ipif);
18571 ipif->ipif_addr_ready = 1;
18572 }
18573 } else {
18574 /* zero address. nothing to publish */
18575 ipif->ipif_addr_ready = 1;
18576 }
18577 if (nce != NULL)
18578 nce_refrele(nce);
18579 arp_up_done:
18580 if (added_ipif && err != 0)
18581 ipmp_illgrp_del_ipif(ill->ill_grp, ipif);
18582 return (err);
18583 }
18584
18585 int
18586 ipif_arp_up(ipif_t *ipif, enum ip_resolver_action res_act, boolean_t was_dup)
18587 {
18588 int err = 0;
18589 ill_t *ill = ipif->ipif_ill;
18590 boolean_t first_interface, wait_for_dlpi = B_FALSE;
18591
18592 DTRACE_PROBE3(ipif__downup, char *, "ipif_arp_up",
18593 ill_t *, ill, ipif_t *, ipif);
18594
18595 /*
18596 * need to bring up ARP or setup mcast mapping only
18597 * when the first interface is coming UP.
18598 */
18599 first_interface = (ill->ill_ipif_up_count == 0 &&
18600 ill->ill_ipif_dup_count == 0 && !was_dup);
18601
18602 if (res_act == Res_act_initial && first_interface) {
18603 /*
18604 * Send ATTACH + BIND
18605 */
18606 err = arp_ll_up(ill);
18607 if (err != EINPROGRESS && err != 0)
18608 return (err);
18609
18610 /*
18611 * Add NCE for local address. Start DAD.
18612 * we'll wait to hear that DAD has finished
18613 * before using the interface.
18614 */
18615 if (err == EINPROGRESS)
18616 wait_for_dlpi = B_TRUE;
18617 }
18618
18619 if (!wait_for_dlpi)
18620 (void) ipif_arp_up_done_tail(ipif, res_act);
18621
18622 return (!wait_for_dlpi ? 0 : EINPROGRESS);
18623 }
18624
18625 /*
18626 * Finish processing of "arp_up" after all the DLPI message
18627 * exchanges have completed between arp and the driver.
18628 */
18629 void
18630 arp_bringup_done(ill_t *ill, int err)
18631 {
18632 mblk_t *mp1;
18633 ipif_t *ipif;
18634 conn_t *connp = NULL;
18635 ipsq_t *ipsq;
18636 queue_t *q;
18637
18638 ip1dbg(("arp_bringup_done(%s)\n", ill->ill_name));
18639
18640 ASSERT(IAM_WRITER_ILL(ill));
18641
18642 ipsq = ill->ill_phyint->phyint_ipsq;
18643 ipif = ipsq->ipsq_xop->ipx_pending_ipif;
18644 mp1 = ipsq_pending_mp_get(ipsq, &connp);
18645 ASSERT(!((mp1 != NULL) ^ (ipif != NULL)));
18646 if (mp1 == NULL) /* bringup was aborted by the user */
18647 return;
18648
18649 /*
18650 * If an IOCTL is waiting on this (ipsq_current_ioctl != 0), then we
18651 * must have an associated conn_t. Otherwise, we're bringing this
18652 * interface back up as part of handling an asynchronous event (e.g.,
18653 * physical address change).
18654 */
18655 if (ipsq->ipsq_xop->ipx_current_ioctl != 0) {
18656 ASSERT(connp != NULL);
18657 q = CONNP_TO_WQ(connp);
18658 } else {
18659 ASSERT(connp == NULL);
18660 q = ill->ill_rq;
18661 }
18662 if (err == 0) {
18663 if (ipif->ipif_isv6) {
18664 if ((err = ipif_up_done_v6(ipif)) != 0)
18665 ip0dbg(("arp_bringup_done: init failed\n"));
18666 } else {
18667 err = ipif_arp_up_done_tail(ipif, Res_act_initial);
18668 if (err != 0 ||
18669 (err = ipif_up_done(ipif)) != 0) {
18670 ip0dbg(("arp_bringup_done: "
18671 "init failed err %x\n", err));
18672 (void) ipif_arp_down(ipif);
18673 }
18674
18675 }
18676 } else {
18677 ip0dbg(("arp_bringup_done: DL_BIND_REQ failed\n"));
18678 }
18679
18680 if ((err == 0) && (ill->ill_up_ipifs)) {
18681 err = ill_up_ipifs(ill, q, mp1);
18682 if (err == EINPROGRESS)
18683 return;
18684 }
18685
18686 /*
18687 * If we have a moved ipif to bring up, and everything has succeeded
18688 * to this point, bring it up on the IPMP ill. Otherwise, leave it
18689 * down -- the admin can try to bring it up by hand if need be.
18690 */
18691 if (ill->ill_move_ipif != NULL) {
18692 ipif = ill->ill_move_ipif;
18693 ip1dbg(("bringing up ipif %p on ill %s\n", (void *)ipif,
18694 ipif->ipif_ill->ill_name));
18695 ill->ill_move_ipif = NULL;
18696 if (err == 0) {
18697 err = ipif_up(ipif, q, mp1);
18698 if (err == EINPROGRESS)
18699 return;
18700 }
18701 }
18702
18703 /*
18704 * The operation must complete without EINPROGRESS since
18705 * ipsq_pending_mp_get() has removed the mblk from ipsq_pending_mp.
18706 * Otherwise, the operation will be stuck forever in the ipsq.
18707 */
18708 ASSERT(err != EINPROGRESS);
18709 if (ipsq->ipsq_xop->ipx_current_ioctl != 0) {
18710 DTRACE_PROBE4(ipif__ioctl, char *, "arp_bringup_done finish",
18711 int, ipsq->ipsq_xop->ipx_current_ioctl,
18712 ill_t *, ill, ipif_t *, ipif);
18713 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
18714 } else {
18715 ipsq_current_finish(ipsq);
18716 }
18717 }
18718
18719 /*
18720 * Finish processing of arp replumb after all the DLPI message
18721 * exchanges have completed between arp and the driver.
18722 */
18723 void
18724 arp_replumb_done(ill_t *ill, int err)
18725 {
18726 mblk_t *mp1;
18727 ipif_t *ipif;
18728 conn_t *connp = NULL;
18729 ipsq_t *ipsq;
18730 queue_t *q;
18731
18732 ASSERT(IAM_WRITER_ILL(ill));
18733
18734 ipsq = ill->ill_phyint->phyint_ipsq;
18735 ipif = ipsq->ipsq_xop->ipx_pending_ipif;
18736 mp1 = ipsq_pending_mp_get(ipsq, &connp);
18737 ASSERT(!((mp1 != NULL) ^ (ipif != NULL)));
18738 if (mp1 == NULL) {
18739 ip0dbg(("arp_replumb_done: bringup aborted ioctl %x\n",
18740 ipsq->ipsq_xop->ipx_current_ioctl));
18741 /* bringup was aborted by the user */
18742 return;
18743 }
18744 /*
18745 * If an IOCTL is waiting on this (ipsq_current_ioctl != 0), then we
18746 * must have an associated conn_t. Otherwise, we're bringing this
18747 * interface back up as part of handling an asynchronous event (e.g.,
18748 * physical address change).
18749 */
18750 if (ipsq->ipsq_xop->ipx_current_ioctl != 0) {
18751 ASSERT(connp != NULL);
18752 q = CONNP_TO_WQ(connp);
18753 } else {
18754 ASSERT(connp == NULL);
18755 q = ill->ill_rq;
18756 }
18757 if ((err == 0) && (ill->ill_up_ipifs)) {
18758 err = ill_up_ipifs(ill, q, mp1);
18759 if (err == EINPROGRESS)
18760 return;
18761 }
18762 /*
18763 * The operation must complete without EINPROGRESS since
18764 * ipsq_pending_mp_get() has removed the mblk from ipsq_pending_mp.
18765 * Otherwise, the operation will be stuck forever in the ipsq.
18766 */
18767 ASSERT(err != EINPROGRESS);
18768 if (ipsq->ipsq_xop->ipx_current_ioctl != 0) {
18769 DTRACE_PROBE4(ipif__ioctl, char *,
18770 "arp_replumb_done finish",
18771 int, ipsq->ipsq_xop->ipx_current_ioctl,
18772 ill_t *, ill, ipif_t *, ipif);
18773 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
18774 } else {
18775 ipsq_current_finish(ipsq);
18776 }
18777 }
18778
18779 void
18780 ipif_up_notify(ipif_t *ipif)
18781 {
18782 ip_rts_ifmsg(ipif, RTSQ_DEFAULT);
18783 ip_rts_newaddrmsg(RTM_ADD, 0, ipif, RTSQ_DEFAULT);
18784 sctp_update_ipif(ipif, SCTP_IPIF_UP);
18785 ill_nic_event_dispatch(ipif->ipif_ill, MAP_IPIF_ID(ipif->ipif_id),
18786 NE_LIF_UP, NULL, 0);
18787 }
18788
18789 /*
18790 * ILB ioctl uses cv_wait (such as deleting a rule or adding a server) and
18791 * this assumes the context is cv_wait'able. Hence it shouldnt' be used on
18792 * TPI end points with STREAMS modules pushed above. This is assured by not
18793 * having the IPI_MODOK flag for the ioctl. And IP ensures the ILB ioctl
18794 * never ends up on an ipsq, otherwise we may end up processing the ioctl
18795 * while unwinding from the ispq and that could be a thread from the bottom.
18796 */
18797 /* ARGSUSED */
18798 int
18799 ip_sioctl_ilb_cmd(ipif_t *ipif, sin_t *sin, queue_t *q, mblk_t *mp,
18800 ip_ioctl_cmd_t *ipip, void *arg)
18801 {
18802 mblk_t *cmd_mp = mp->b_cont->b_cont;
18803 ilb_cmd_t command = *((ilb_cmd_t *)cmd_mp->b_rptr);
18804 int ret = 0;
18805 int i;
18806 size_t size;
18807 ip_stack_t *ipst;
18808 zoneid_t zoneid;
18809 ilb_stack_t *ilbs;
18810
18811 ipst = CONNQ_TO_IPST(q);
18812 ilbs = ipst->ips_netstack->netstack_ilb;
18813 zoneid = Q_TO_CONN(q)->conn_zoneid;
18814
18815 switch (command) {
18816 case ILB_CREATE_RULE: {
18817 ilb_rule_cmd_t *cmd = (ilb_rule_cmd_t *)cmd_mp->b_rptr;
18818
18819 if (MBLKL(cmd_mp) != sizeof (ilb_rule_cmd_t)) {
18820 ret = EINVAL;
18821 break;
18822 }
18823
18824 ret = ilb_rule_add(ilbs, zoneid, cmd);
18825 break;
18826 }
18827 case ILB_DESTROY_RULE:
18828 case ILB_ENABLE_RULE:
18829 case ILB_DISABLE_RULE: {
18830 ilb_name_cmd_t *cmd = (ilb_name_cmd_t *)cmd_mp->b_rptr;
18831
18832 if (MBLKL(cmd_mp) != sizeof (ilb_name_cmd_t)) {
18833 ret = EINVAL;
18834 break;
18835 }
18836
18837 if (cmd->flags & ILB_RULE_ALLRULES) {
18838 if (command == ILB_DESTROY_RULE) {
18839 ilb_rule_del_all(ilbs, zoneid);
18840 break;
18841 } else if (command == ILB_ENABLE_RULE) {
18842 ilb_rule_enable_all(ilbs, zoneid);
18843 break;
18844 } else if (command == ILB_DISABLE_RULE) {
18845 ilb_rule_disable_all(ilbs, zoneid);
18846 break;
18847 }
18848 } else {
18849 if (command == ILB_DESTROY_RULE) {
18850 ret = ilb_rule_del(ilbs, zoneid, cmd->name);
18851 } else if (command == ILB_ENABLE_RULE) {
18852 ret = ilb_rule_enable(ilbs, zoneid, cmd->name,
18853 NULL);
18854 } else if (command == ILB_DISABLE_RULE) {
18855 ret = ilb_rule_disable(ilbs, zoneid, cmd->name,
18856 NULL);
18857 }
18858 }
18859 break;
18860 }
18861 case ILB_NUM_RULES: {
18862 ilb_num_rules_cmd_t *cmd;
18863
18864 if (MBLKL(cmd_mp) != sizeof (ilb_num_rules_cmd_t)) {
18865 ret = EINVAL;
18866 break;
18867 }
18868 cmd = (ilb_num_rules_cmd_t *)cmd_mp->b_rptr;
18869 ilb_get_num_rules(ilbs, zoneid, &(cmd->num));
18870 break;
18871 }
18872 case ILB_RULE_NAMES: {
18873 ilb_rule_names_cmd_t *cmd;
18874
18875 cmd = (ilb_rule_names_cmd_t *)cmd_mp->b_rptr;
18876 if (MBLKL(cmd_mp) < sizeof (ilb_rule_names_cmd_t) ||
18877 cmd->num_names == 0) {
18878 ret = EINVAL;
18879 break;
18880 }
18881 size = cmd->num_names * ILB_RULE_NAMESZ;
18882 if (cmd_mp->b_rptr + offsetof(ilb_rule_names_cmd_t, buf) +
18883 size != cmd_mp->b_wptr) {
18884 ret = EINVAL;
18885 break;
18886 }
18887 ilb_get_rulenames(ilbs, zoneid, &cmd->num_names, cmd->buf);
18888 break;
18889 }
18890 case ILB_NUM_SERVERS: {
18891 ilb_num_servers_cmd_t *cmd;
18892
18893 if (MBLKL(cmd_mp) != sizeof (ilb_num_servers_cmd_t)) {
18894 ret = EINVAL;
18895 break;
18896 }
18897 cmd = (ilb_num_servers_cmd_t *)cmd_mp->b_rptr;
18898 ret = ilb_get_num_servers(ilbs, zoneid, cmd->name,
18899 &(cmd->num));
18900 break;
18901 }
18902 case ILB_LIST_RULE: {
18903 ilb_rule_cmd_t *cmd = (ilb_rule_cmd_t *)cmd_mp->b_rptr;
18904
18905 if (MBLKL(cmd_mp) != sizeof (ilb_rule_cmd_t)) {
18906 ret = EINVAL;
18907 break;
18908 }
18909 ret = ilb_rule_list(ilbs, zoneid, cmd);
18910 break;
18911 }
18912 case ILB_LIST_SERVERS: {
18913 ilb_servers_info_cmd_t *cmd;
18914
18915 cmd = (ilb_servers_info_cmd_t *)cmd_mp->b_rptr;
18916 if (MBLKL(cmd_mp) < sizeof (ilb_servers_info_cmd_t) ||
18917 cmd->num_servers == 0) {
18918 ret = EINVAL;
18919 break;
18920 }
18921 size = cmd->num_servers * sizeof (ilb_server_info_t);
18922 if (cmd_mp->b_rptr + offsetof(ilb_servers_info_cmd_t, servers) +
18923 size != cmd_mp->b_wptr) {
18924 ret = EINVAL;
18925 break;
18926 }
18927
18928 ret = ilb_get_servers(ilbs, zoneid, cmd->name, cmd->servers,
18929 &cmd->num_servers);
18930 break;
18931 }
18932 case ILB_ADD_SERVERS: {
18933 ilb_servers_info_cmd_t *cmd;
18934 ilb_rule_t *rule;
18935
18936 cmd = (ilb_servers_info_cmd_t *)cmd_mp->b_rptr;
18937 if (MBLKL(cmd_mp) < sizeof (ilb_servers_info_cmd_t)) {
18938 ret = EINVAL;
18939 break;
18940 }
18941 size = cmd->num_servers * sizeof (ilb_server_info_t);
18942 if (cmd_mp->b_rptr + offsetof(ilb_servers_info_cmd_t, servers) +
18943 size != cmd_mp->b_wptr) {
18944 ret = EINVAL;
18945 break;
18946 }
18947 rule = ilb_find_rule(ilbs, zoneid, cmd->name, &ret);
18948 if (rule == NULL) {
18949 ASSERT(ret != 0);
18950 break;
18951 }
18952 for (i = 0; i < cmd->num_servers; i++) {
18953 ilb_server_info_t *s;
18954
18955 s = &cmd->servers[i];
18956 s->err = ilb_server_add(ilbs, rule, s);
18957 }
18958 ILB_RULE_REFRELE(rule);
18959 break;
18960 }
18961 case ILB_DEL_SERVERS:
18962 case ILB_ENABLE_SERVERS:
18963 case ILB_DISABLE_SERVERS: {
18964 ilb_servers_cmd_t *cmd;
18965 ilb_rule_t *rule;
18966 int (*f)();
18967
18968 cmd = (ilb_servers_cmd_t *)cmd_mp->b_rptr;
18969 if (MBLKL(cmd_mp) < sizeof (ilb_servers_cmd_t)) {
18970 ret = EINVAL;
18971 break;
18972 }
18973 size = cmd->num_servers * sizeof (ilb_server_arg_t);
18974 if (cmd_mp->b_rptr + offsetof(ilb_servers_cmd_t, servers) +
18975 size != cmd_mp->b_wptr) {
18976 ret = EINVAL;
18977 break;
18978 }
18979
18980 if (command == ILB_DEL_SERVERS)
18981 f = ilb_server_del;
18982 else if (command == ILB_ENABLE_SERVERS)
18983 f = ilb_server_enable;
18984 else if (command == ILB_DISABLE_SERVERS)
18985 f = ilb_server_disable;
18986
18987 rule = ilb_find_rule(ilbs, zoneid, cmd->name, &ret);
18988 if (rule == NULL) {
18989 ASSERT(ret != 0);
18990 break;
18991 }
18992
18993 for (i = 0; i < cmd->num_servers; i++) {
18994 ilb_server_arg_t *s;
18995
18996 s = &cmd->servers[i];
18997 s->err = f(ilbs, zoneid, NULL, rule, &s->addr);
18998 }
18999 ILB_RULE_REFRELE(rule);
19000 break;
19001 }
19002 case ILB_LIST_NAT_TABLE: {
19003 ilb_list_nat_cmd_t *cmd;
19004
19005 cmd = (ilb_list_nat_cmd_t *)cmd_mp->b_rptr;
19006 if (MBLKL(cmd_mp) < sizeof (ilb_list_nat_cmd_t)) {
19007 ret = EINVAL;
19008 break;
19009 }
19010 size = cmd->num_nat * sizeof (ilb_nat_entry_t);
19011 if (cmd_mp->b_rptr + offsetof(ilb_list_nat_cmd_t, entries) +
19012 size != cmd_mp->b_wptr) {
19013 ret = EINVAL;
19014 break;
19015 }
19016
19017 ret = ilb_list_nat(ilbs, zoneid, cmd->entries, &cmd->num_nat,
19018 &cmd->flags);
19019 break;
19020 }
19021 case ILB_LIST_STICKY_TABLE: {
19022 ilb_list_sticky_cmd_t *cmd;
19023
19024 cmd = (ilb_list_sticky_cmd_t *)cmd_mp->b_rptr;
19025 if (MBLKL(cmd_mp) < sizeof (ilb_list_sticky_cmd_t)) {
19026 ret = EINVAL;
19027 break;
19028 }
19029 size = cmd->num_sticky * sizeof (ilb_sticky_entry_t);
19030 if (cmd_mp->b_rptr + offsetof(ilb_list_sticky_cmd_t, entries) +
19031 size != cmd_mp->b_wptr) {
19032 ret = EINVAL;
19033 break;
19034 }
19035
19036 ret = ilb_list_sticky(ilbs, zoneid, cmd->entries,
19037 &cmd->num_sticky, &cmd->flags);
19038 break;
19039 }
19040 default:
19041 ret = EINVAL;
19042 break;
19043 }
19044 done:
19045 return (ret);
19046 }
19047
19048 /* Remove all cache entries for this logical interface */
19049 void
19050 ipif_nce_down(ipif_t *ipif)
19051 {
19052 ill_t *ill = ipif->ipif_ill;
19053 nce_t *nce;
19054
19055 DTRACE_PROBE3(ipif__downup, char *, "ipif_nce_down",
19056 ill_t *, ill, ipif_t *, ipif);
19057 if (ipif->ipif_added_nce) {
19058 if (ipif->ipif_isv6)
19059 nce = nce_lookup_v6(ill, &ipif->ipif_v6lcl_addr);
19060 else
19061 nce = nce_lookup_v4(ill, &ipif->ipif_lcl_addr);
19062 if (nce != NULL) {
19063 if (--nce->nce_ipif_cnt == 0)
19064 ncec_delete(nce->nce_common);
19065 ipif->ipif_added_nce = 0;
19066 nce_refrele(nce);
19067 } else {
19068 /*
19069 * nce may already be NULL because it was already
19070 * flushed, e.g., due to a call to nce_flush
19071 */
19072 ipif->ipif_added_nce = 0;
19073 }
19074 }
19075 /*
19076 * Make IPMP aware of the deleted data address.
19077 */
19078 if (IS_IPMP(ill))
19079 ipmp_illgrp_del_ipif(ill->ill_grp, ipif);
19080
19081 /*
19082 * Remove all other nces dependent on this ill when the last ipif
19083 * is going away.
19084 */
19085 if (ill->ill_ipif_up_count == 0) {
19086 ncec_walk(ill, ncec_delete_per_ill, ill, ill->ill_ipst);
19087 if (IS_UNDER_IPMP(ill))
19088 nce_flush(ill, B_TRUE);
19089 }
19090 }
19091
19092 /*
19093 * find the first interface that uses usill for its source address.
19094 */
19095 ill_t *
19096 ill_lookup_usesrc(ill_t *usill)
19097 {
19098 ip_stack_t *ipst = usill->ill_ipst;
19099 ill_t *ill;
19100
19101 ASSERT(usill != NULL);
19102
19103 /* ill_g_usesrc_lock protects ill_usesrc_grp_next */
19104 rw_enter(&ipst->ips_ill_g_usesrc_lock, RW_WRITER);
19105 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
19106 for (ill = usill->ill_usesrc_grp_next; ill != NULL && ill != usill;
19107 ill = ill->ill_usesrc_grp_next) {
19108 if (!IS_UNDER_IPMP(ill) && (ill->ill_flags & ILLF_MULTICAST) &&
19109 !ILL_IS_CONDEMNED(ill)) {
19110 ill_refhold(ill);
19111 break;
19112 }
19113 }
19114 rw_exit(&ipst->ips_ill_g_lock);
19115 rw_exit(&ipst->ips_ill_g_usesrc_lock);
19116 return (ill);
19117 }
19118
19119 /*
19120 * This comment applies to both ip_sioctl_get_ifhwaddr and
19121 * ip_sioctl_get_lifhwaddr as the basic function of these two functions
19122 * is the same.
19123 *
19124 * The goal here is to find an IP interface that corresponds to the name
19125 * provided by the caller in the ifreq/lifreq structure held in the mblk_t
19126 * chain and to fill out a sockaddr/sockaddr_storage structure with the
19127 * mac address.
19128 *
19129 * The SIOCGIFHWADDR/SIOCGLIFHWADDR ioctl may return an error for a number
19130 * of different reasons:
19131 * ENXIO - the device name is not known to IP.
19132 * EADDRNOTAVAIL - the device has no hardware address. This is indicated
19133 * by ill_phys_addr not pointing to an actual address.
19134 * EPFNOSUPPORT - this will indicate that a request is being made for a
19135 * mac address that will not fit in the data structure supplier (struct
19136 * sockaddr).
19137 *
19138 */
19139 /* ARGSUSED */
19140 int
19141 ip_sioctl_get_ifhwaddr(ipif_t *ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
19142 ip_ioctl_cmd_t *ipip, void *if_req)
19143 {
19144 struct sockaddr *sock;
19145 struct ifreq *ifr;
19146 mblk_t *mp1;
19147 ill_t *ill;
19148
19149 ASSERT(ipif != NULL);
19150 ill = ipif->ipif_ill;
19151
19152 if (ill->ill_phys_addr == NULL) {
19153 return (EADDRNOTAVAIL);
19154 }
19155 if (ill->ill_phys_addr_length > sizeof (sock->sa_data)) {
19156 return (EPFNOSUPPORT);
19157 }
19158
19159 ip1dbg(("ip_sioctl_get_hwaddr(%s)\n", ill->ill_name));
19160
19161 /* Existence of mp1 has been checked in ip_wput_nondata */
19162 mp1 = mp->b_cont->b_cont;
19163 ifr = (struct ifreq *)mp1->b_rptr;
19164
19165 sock = &ifr->ifr_addr;
19166 /*
19167 * The "family" field in the returned structure is set to a value
19168 * that represents the type of device to which the address belongs.
19169 * The value returned may differ to that on Linux but it will still
19170 * represent the correct symbol on Solaris.
19171 */
19172 sock->sa_family = arp_hw_type(ill->ill_mactype);
19173 bcopy(ill->ill_phys_addr, &sock->sa_data, ill->ill_phys_addr_length);
19174
19175 return (0);
19176 }
19177
19178 /*
19179 * The expection of applications using SIOCGIFHWADDR is that data will
19180 * be returned in the sa_data field of the sockaddr structure. With
19181 * SIOCGLIFHWADDR, we're breaking new ground as there is no Linux
19182 * equivalent. In light of this, struct sockaddr_dl is used as it
19183 * offers more space for address storage in sll_data.
19184 */
19185 /* ARGSUSED */
19186 int
19187 ip_sioctl_get_lifhwaddr(ipif_t *ipif, sin_t *dummy_sin, queue_t *q, mblk_t *mp,
19188 ip_ioctl_cmd_t *ipip, void *if_req)
19189 {
19190 struct sockaddr_dl *sock;
19191 struct lifreq *lifr;
19192 mblk_t *mp1;
19193 ill_t *ill;
19194
19195 ASSERT(ipif != NULL);
19196 ill = ipif->ipif_ill;
19197
19198 if (ill->ill_phys_addr == NULL) {
19199 return (EADDRNOTAVAIL);
19200 }
19201 if (ill->ill_phys_addr_length > sizeof (sock->sdl_data)) {
19202 return (EPFNOSUPPORT);
19203 }
19204
19205 ip1dbg(("ip_sioctl_get_lifhwaddr(%s)\n", ill->ill_name));
19206
19207 /* Existence of mp1 has been checked in ip_wput_nondata */
19208 mp1 = mp->b_cont->b_cont;
19209 lifr = (struct lifreq *)mp1->b_rptr;
19210
19211 /*
19212 * sockaddr_ll is used here because it is also the structure used in
19213 * responding to the same ioctl in sockpfp. The only other choice is
19214 * sockaddr_dl which contains fields that are not required here
19215 * because its purpose is different.
19216 */
19217 lifr->lifr_type = ill->ill_type;
19218 sock = (struct sockaddr_dl *)&lifr->lifr_addr;
19219 sock->sdl_family = AF_LINK;
19220 sock->sdl_index = ill->ill_phyint->phyint_ifindex;
19221 sock->sdl_type = ill->ill_mactype;
19222 sock->sdl_nlen = 0;
19223 sock->sdl_slen = 0;
19224 sock->sdl_alen = ill->ill_phys_addr_length;
19225 bcopy(ill->ill_phys_addr, sock->sdl_data, ill->ill_phys_addr_length);
19226
19227 return (0);
19228 }