1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 1990 Mentat Inc.
25 * Copyright (c) 2017 OmniTI Computer Consulting, Inc. All rights reserved.
26 * Copyright (c) 2016 by Delphix. All rights reserved.
27 * Copyright (c) 2018 Joyent, Inc. All rights reserved.
28 */
29
30 #include <sys/types.h>
31 #include <sys/stream.h>
32 #include <sys/dlpi.h>
33 #include <sys/stropts.h>
34 #include <sys/sysmacros.h>
35 #include <sys/strsubr.h>
36 #include <sys/strlog.h>
37 #include <sys/strsun.h>
38 #include <sys/zone.h>
39 #define _SUN_TPI_VERSION 2
40 #include <sys/tihdr.h>
41 #include <sys/xti_inet.h>
42 #include <sys/ddi.h>
43 #include <sys/suntpi.h>
44 #include <sys/cmn_err.h>
45 #include <sys/debug.h>
46 #include <sys/kobj.h>
47 #include <sys/modctl.h>
48 #include <sys/atomic.h>
49 #include <sys/policy.h>
50 #include <sys/priv.h>
51 #include <sys/taskq.h>
52
53 #include <sys/systm.h>
54 #include <sys/param.h>
55 #include <sys/kmem.h>
56 #include <sys/sdt.h>
57 #include <sys/socket.h>
58 #include <sys/vtrace.h>
59 #include <sys/isa_defs.h>
60 #include <sys/mac.h>
61 #include <net/if.h>
62 #include <net/if_arp.h>
63 #include <net/route.h>
64 #include <sys/sockio.h>
65 #include <netinet/in.h>
66 #include <net/if_dl.h>
67
68 #include <inet/common.h>
69 #include <inet/mi.h>
70 #include <inet/mib2.h>
71 #include <inet/nd.h>
72 #include <inet/arp.h>
73 #include <inet/snmpcom.h>
74 #include <inet/optcom.h>
75 #include <inet/kstatcom.h>
76
77 #include <netinet/igmp_var.h>
78 #include <netinet/ip6.h>
79 #include <netinet/icmp6.h>
80 #include <netinet/sctp.h>
81
82 #include <inet/ip.h>
83 #include <inet/ip_impl.h>
84 #include <inet/ip6.h>
85 #include <inet/ip6_asp.h>
86 #include <inet/tcp.h>
87 #include <inet/tcp_impl.h>
88 #include <inet/ip_multi.h>
89 #include <inet/ip_if.h>
90 #include <inet/ip_ire.h>
91 #include <inet/ip_ftable.h>
92 #include <inet/ip_rts.h>
93 #include <inet/ip_ndp.h>
94 #include <inet/ip_listutils.h>
95 #include <netinet/igmp.h>
96 #include <netinet/ip_mroute.h>
97 #include <inet/ipp_common.h>
98 #include <inet/cc.h>
99
100 #include <net/pfkeyv2.h>
101 #include <inet/sadb.h>
102 #include <inet/ipsec_impl.h>
103 #include <inet/iptun/iptun_impl.h>
104 #include <inet/ipdrop.h>
105 #include <inet/ip_netinfo.h>
106 #include <inet/ilb_ip.h>
107
108 #include <sys/ethernet.h>
109 #include <net/if_types.h>
110 #include <sys/cpuvar.h>
111
112 #include <ipp/ipp.h>
113 #include <ipp/ipp_impl.h>
114 #include <ipp/ipgpc/ipgpc.h>
115
116 #include <sys/pattr.h>
117 #include <inet/ipclassifier.h>
118 #include <inet/sctp_ip.h>
119 #include <inet/sctp/sctp_impl.h>
120 #include <inet/udp_impl.h>
121 #include <inet/rawip_impl.h>
122 #include <inet/rts_impl.h>
123
124 #include <sys/tsol/label.h>
125 #include <sys/tsol/tnet.h>
126
127 #include <sys/squeue_impl.h>
128 #include <inet/ip_arp.h>
129
130 #include <sys/clock_impl.h> /* For LBOLT_FASTPATH{,64} */
131
132 /*
133 * Values for squeue switch:
134 * IP_SQUEUE_ENTER_NODRAIN: SQ_NODRAIN
135 * IP_SQUEUE_ENTER: SQ_PROCESS
136 * IP_SQUEUE_FILL: SQ_FILL
137 */
138 int ip_squeue_enter = IP_SQUEUE_ENTER; /* Setable in /etc/system */
139
140 int ip_squeue_flag;
141
142 /*
143 * Setable in /etc/system
144 */
145 int ip_poll_normal_ms = 100;
146 int ip_poll_normal_ticks = 0;
147 int ip_modclose_ackwait_ms = 3000;
148
149 /*
150 * It would be nice to have these present only in DEBUG systems, but the
151 * current design of the global symbol checking logic requires them to be
152 * unconditionally present.
153 */
154 uint_t ip_thread_data; /* TSD key for debug support */
155 krwlock_t ip_thread_rwlock;
156 list_t ip_thread_list;
157
158 /*
159 * Structure to represent a linked list of msgblks. Used by ip_snmp_ functions.
160 */
161
162 struct listptr_s {
163 mblk_t *lp_head; /* pointer to the head of the list */
164 mblk_t *lp_tail; /* pointer to the tail of the list */
165 };
166
167 typedef struct listptr_s listptr_t;
168
169 /*
170 * This is used by ip_snmp_get_mib2_ip_route_media and
171 * ip_snmp_get_mib2_ip6_route_media to carry the lists of return data.
172 */
173 typedef struct iproutedata_s {
174 uint_t ird_idx;
175 uint_t ird_flags; /* see below */
176 listptr_t ird_route; /* ipRouteEntryTable */
177 listptr_t ird_netmedia; /* ipNetToMediaEntryTable */
178 listptr_t ird_attrs; /* ipRouteAttributeTable */
179 } iproutedata_t;
180
181 /* Include ire_testhidden and IRE_IF_CLONE routes */
182 #define IRD_REPORT_ALL 0x01
183
184 /*
185 * Cluster specific hooks. These should be NULL when booted as a non-cluster
186 */
187
188 /*
189 * Hook functions to enable cluster networking
190 * On non-clustered systems these vectors must always be NULL.
191 *
192 * Hook function to Check ip specified ip address is a shared ip address
193 * in the cluster
194 *
195 */
196 int (*cl_inet_isclusterwide)(netstackid_t stack_id, uint8_t protocol,
197 sa_family_t addr_family, uint8_t *laddrp, void *args) = NULL;
198
199 /*
200 * Hook function to generate cluster wide ip fragment identifier
201 */
202 uint32_t (*cl_inet_ipident)(netstackid_t stack_id, uint8_t protocol,
203 sa_family_t addr_family, uint8_t *laddrp, uint8_t *faddrp,
204 void *args) = NULL;
205
206 /*
207 * Hook function to generate cluster wide SPI.
208 */
209 void (*cl_inet_getspi)(netstackid_t, uint8_t, uint8_t *, size_t,
210 void *) = NULL;
211
212 /*
213 * Hook function to verify if the SPI is already utlized.
214 */
215
216 int (*cl_inet_checkspi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
217
218 /*
219 * Hook function to delete the SPI from the cluster wide repository.
220 */
221
222 void (*cl_inet_deletespi)(netstackid_t, uint8_t, uint32_t, void *) = NULL;
223
224 /*
225 * Hook function to inform the cluster when packet received on an IDLE SA
226 */
227
228 void (*cl_inet_idlesa)(netstackid_t, uint8_t, uint32_t, sa_family_t,
229 in6_addr_t, in6_addr_t, void *) = NULL;
230
231 /*
232 * Synchronization notes:
233 *
234 * IP is a fully D_MP STREAMS module/driver. Thus it does not depend on any
235 * MT level protection given by STREAMS. IP uses a combination of its own
236 * internal serialization mechanism and standard Solaris locking techniques.
237 * The internal serialization is per phyint. This is used to serialize
238 * plumbing operations, IPMP operations, most set ioctls, etc.
239 *
240 * Plumbing is a long sequence of operations involving message
241 * exchanges between IP, ARP and device drivers. Many set ioctls are typically
242 * involved in plumbing operations. A natural model is to serialize these
243 * ioctls one per ill. For example plumbing of hme0 and qfe0 can go on in
244 * parallel without any interference. But various set ioctls on hme0 are best
245 * serialized, along with IPMP operations and processing of DLPI control
246 * messages received from drivers on a per phyint basis. This serialization is
247 * provided by the ipsq_t and primitives operating on this. Details can
248 * be found in ip_if.c above the core primitives operating on ipsq_t.
249 *
250 * Lookups of an ipif or ill by a thread return a refheld ipif / ill.
251 * Simiarly lookup of an ire by a thread also returns a refheld ire.
252 * In addition ipif's and ill's referenced by the ire are also indirectly
253 * refheld. Thus no ipif or ill can vanish as long as an ipif is refheld
254 * directly or indirectly. For example an SIOCSLIFADDR ioctl that changes the
255 * address of an ipif has to go through the ipsq_t. This ensures that only
256 * one such exclusive operation proceeds at any time on the ipif. It then
257 * waits for all refcnts
258 * associated with this ipif to come down to zero. The address is changed
259 * only after the ipif has been quiesced. Then the ipif is brought up again.
260 * More details are described above the comment in ip_sioctl_flags.
261 *
262 * Packet processing is based mostly on IREs and are fully multi-threaded
263 * using standard Solaris MT techniques.
264 *
265 * There are explicit locks in IP to handle:
266 * - The ip_g_head list maintained by mi_open_link() and friends.
267 *
268 * - The reassembly data structures (one lock per hash bucket)
269 *
270 * - conn_lock is meant to protect conn_t fields. The fields actually
271 * protected by conn_lock are documented in the conn_t definition.
272 *
273 * - ire_lock to protect some of the fields of the ire, IRE tables
274 * (one lock per hash bucket). Refer to ip_ire.c for details.
275 *
276 * - ndp_g_lock and ncec_lock for protecting NCEs.
277 *
278 * - ill_lock protects fields of the ill and ipif. Details in ip.h
279 *
280 * - ill_g_lock: This is a global reader/writer lock. Protects the following
281 * * The AVL tree based global multi list of all ills.
282 * * The linked list of all ipifs of an ill
283 * * The <ipsq-xop> mapping
284 * * <ill-phyint> association
285 * Insertion/deletion of an ill in the system, insertion/deletion of an ipif
286 * into an ill, changing the <ipsq-xop> mapping of an ill, changing the
287 * <ill-phyint> assoc of an ill will all have to hold the ill_g_lock as
288 * writer for the actual duration of the insertion/deletion/change.
289 *
290 * - ill_lock: This is a per ill mutex.
291 * It protects some members of the ill_t struct; see ip.h for details.
292 * It also protects the <ill-phyint> assoc.
293 * It also protects the list of ipifs hanging off the ill.
294 *
295 * - ipsq_lock: This is a per ipsq_t mutex lock.
296 * This protects some members of the ipsq_t struct; see ip.h for details.
297 * It also protects the <ipsq-ipxop> mapping
298 *
299 * - ipx_lock: This is a per ipxop_t mutex lock.
300 * This protects some members of the ipxop_t struct; see ip.h for details.
301 *
302 * - phyint_lock: This is a per phyint mutex lock. Protects just the
303 * phyint_flags
304 *
305 * - ip_addr_avail_lock: This is used to ensure the uniqueness of IP addresses.
306 * This lock is held in ipif_up_done and the ipif is marked IPIF_UP and the
307 * uniqueness check also done atomically.
308 *
309 * - ill_g_usesrc_lock: This readers/writer lock protects the usesrc
310 * group list linked by ill_usesrc_grp_next. It also protects the
311 * ill_usesrc_ifindex field. It is taken as a writer when a member of the
312 * group is being added or deleted. This lock is taken as a reader when
313 * walking the list/group(eg: to get the number of members in a usesrc group).
314 * Note, it is only necessary to take this lock if the ill_usesrc_grp_next
315 * field is changing state i.e from NULL to non-NULL or vice-versa. For
316 * example, it is not necessary to take this lock in the initial portion
317 * of ip_sioctl_slifusesrc or at all in ip_sioctl_flags since these
318 * operations are executed exclusively and that ensures that the "usesrc
319 * group state" cannot change. The "usesrc group state" change can happen
320 * only in the latter part of ip_sioctl_slifusesrc and in ill_delete.
321 *
322 * Changing <ill-phyint>, <ipsq-xop> assocications:
323 *
324 * To change the <ill-phyint> association, the ill_g_lock must be held
325 * as writer, and the ill_locks of both the v4 and v6 instance of the ill
326 * must be held.
327 *
328 * To change the <ipsq-xop> association, the ill_g_lock must be held as
329 * writer, the ipsq_lock must be held, and one must be writer on the ipsq.
330 * This is only done when ills are added or removed from IPMP groups.
331 *
332 * To add or delete an ipif from the list of ipifs hanging off the ill,
333 * ill_g_lock (writer) and ill_lock must be held and the thread must be
334 * a writer on the associated ipsq.
335 *
336 * To add or delete an ill to the system, the ill_g_lock must be held as
337 * writer and the thread must be a writer on the associated ipsq.
338 *
339 * To add or delete an ilm to an ill, the ill_lock must be held and the thread
340 * must be a writer on the associated ipsq.
341 *
342 * Lock hierarchy
343 *
344 * Some lock hierarchy scenarios are listed below.
345 *
346 * ill_g_lock -> conn_lock -> ill_lock -> ipsq_lock -> ipx_lock
347 * ill_g_lock -> ill_lock(s) -> phyint_lock
348 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock
349 * ill_g_lock -> ip_addr_avail_lock
350 * conn_lock -> irb_lock -> ill_lock -> ire_lock
351 * ill_g_lock -> ip_g_nd_lock
352 * ill_g_lock -> ips_ipmp_lock -> ill_lock -> nce_lock
353 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock -> nce_lock
354 * arl_lock -> ill_lock
355 * ips_ire_dep_lock -> irb_lock
356 *
357 * When more than 1 ill lock is needed to be held, all ill lock addresses
358 * are sorted on address and locked starting from highest addressed lock
359 * downward.
360 *
361 * Multicast scenarios
362 * ips_ill_g_lock -> ill_mcast_lock
363 * conn_ilg_lock -> ips_ill_g_lock -> ill_lock
364 * ill_mcast_serializer -> ill_mcast_lock -> ips_ipmp_lock -> ill_lock
365 * ill_mcast_serializer -> ill_mcast_lock -> connf_lock -> conn_lock
366 * ill_mcast_serializer -> ill_mcast_lock -> conn_ilg_lock
367 * ill_mcast_serializer -> ill_mcast_lock -> ips_igmp_timer_lock
368 *
369 * IPsec scenarios
370 *
371 * ipsa_lock -> ill_g_lock -> ill_lock
372 * ill_g_usesrc_lock -> ill_g_lock -> ill_lock
373 *
374 * Trusted Solaris scenarios
375 *
376 * igsa_lock -> gcgrp_rwlock -> gcgrp_lock
377 * igsa_lock -> gcdb_lock
378 * gcgrp_rwlock -> ire_lock
379 * gcgrp_rwlock -> gcdb_lock
380 *
381 * squeue(sq_lock), flow related (ft_lock, fe_lock) locking
382 *
383 * cpu_lock --> ill_lock --> sqset_lock --> sq_lock
384 * sq_lock -> conn_lock -> QLOCK(q)
385 * ill_lock -> ft_lock -> fe_lock
386 *
387 * Routing/forwarding table locking notes:
388 *
389 * Lock acquisition order: Radix tree lock, irb_lock.
390 * Requirements:
391 * i. Walker must not hold any locks during the walker callback.
392 * ii Walker must not see a truncated tree during the walk because of any node
393 * deletion.
394 * iii Existing code assumes ire_bucket is valid if it is non-null and is used
395 * in many places in the code to walk the irb list. Thus even if all the
396 * ires in a bucket have been deleted, we still can't free the radix node
397 * until the ires have actually been inactive'd (freed).
398 *
399 * Tree traversal - Need to hold the global tree lock in read mode.
400 * Before dropping the global tree lock, need to either increment the ire_refcnt
401 * to ensure that the radix node can't be deleted.
402 *
403 * Tree add - Need to hold the global tree lock in write mode to add a
404 * radix node. To prevent the node from being deleted, increment the
405 * irb_refcnt, after the node is added to the tree. The ire itself is
406 * added later while holding the irb_lock, but not the tree lock.
407 *
408 * Tree delete - Need to hold the global tree lock and irb_lock in write mode.
409 * All associated ires must be inactive (i.e. freed), and irb_refcnt
410 * must be zero.
411 *
412 * Walker - Increment irb_refcnt before calling the walker callback. Hold the
413 * global tree lock (read mode) for traversal.
414 *
415 * IRE dependencies - In some cases we hold ips_ire_dep_lock across ire_refrele
416 * hence we will acquire irb_lock while holding ips_ire_dep_lock.
417 *
418 * IPsec notes :
419 *
420 * IP interacts with the IPsec code (AH/ESP) by storing IPsec attributes
421 * in the ip_xmit_attr_t ip_recv_attr_t. For outbound datagrams, the
422 * ip_xmit_attr_t has the
423 * information used by the IPsec code for applying the right level of
424 * protection. The information initialized by IP in the ip_xmit_attr_t
425 * is determined by the per-socket policy or global policy in the system.
426 * For inbound datagrams, the ip_recv_attr_t
427 * starts out with nothing in it. It gets filled
428 * with the right information if it goes through the AH/ESP code, which
429 * happens if the incoming packet is secure. The information initialized
430 * by AH/ESP, is later used by IP (during fanouts to ULP) to see whether
431 * the policy requirements needed by per-socket policy or global policy
432 * is met or not.
433 *
434 * For fully connected sockets i.e dst, src [addr, port] is known,
435 * conn_policy_cached is set indicating that policy has been cached.
436 * conn_in_enforce_policy may or may not be set depending on whether
437 * there is a global policy match or per-socket policy match.
438 * Policy inheriting happpens in ip_policy_set once the destination is known.
439 * Once the right policy is set on the conn_t, policy cannot change for
440 * this socket. This makes life simpler for TCP (UDP ?) where
441 * re-transmissions go out with the same policy. For symmetry, policy
442 * is cached for fully connected UDP sockets also. Thus if policy is cached,
443 * it also implies that policy is latched i.e policy cannot change
444 * on these sockets. As we have the right policy on the conn, we don't
445 * have to lookup global policy for every outbound and inbound datagram
446 * and thus serving as an optimization. Note that a global policy change
447 * does not affect fully connected sockets if they have policy. If fully
448 * connected sockets did not have any policy associated with it, global
449 * policy change may affect them.
450 *
451 * IP Flow control notes:
452 * ---------------------
453 * Non-TCP streams are flow controlled by IP. The way this is accomplished
454 * differs when ILL_CAPAB_DLD_DIRECT is enabled for that IP instance. When
455 * ILL_DIRECT_CAPABLE(ill) is TRUE, IP can do direct function calls into
456 * GLDv3. Otherwise packets are sent down to lower layers using STREAMS
457 * functions.
458 *
459 * Per Tx ring udp flow control:
460 * This is applicable only when ILL_CAPAB_DLD_DIRECT capability is set in
461 * the ill (i.e. ILL_DIRECT_CAPABLE(ill) is true).
462 *
463 * The underlying link can expose multiple Tx rings to the GLDv3 mac layer.
464 * To achieve best performance, outgoing traffic need to be fanned out among
465 * these Tx ring. mac_tx() is called (via str_mdata_fastpath_put()) to send
466 * traffic out of the NIC and it takes a fanout hint. UDP connections pass
467 * the address of connp as fanout hint to mac_tx(). Under flow controlled
468 * condition, mac_tx() returns a non-NULL cookie (ip_mac_tx_cookie_t). This
469 * cookie points to a specific Tx ring that is blocked. The cookie is used to
470 * hash into an idl_tx_list[] entry in idl_tx_list[] array. Each idl_tx_list_t
471 * point to drain_lists (idl_t's). These drain list will store the blocked UDP
472 * connp's. The drain list is not a single list but a configurable number of
473 * lists.
474 *
475 * The diagram below shows idl_tx_list_t's and their drain_lists. ip_stack_t
476 * has an array of idl_tx_list_t. The size of the array is TX_FANOUT_SIZE
477 * which is equal to 128. This array in turn contains a pointer to idl_t[],
478 * the ip drain list. The idl_t[] array size is MIN(max_ncpus, 8). The drain
479 * list will point to the list of connp's that are flow controlled.
480 *
481 * --------------- ------- ------- -------
482 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
483 * | --------------- ------- ------- -------
484 * | --------------- ------- ------- -------
485 * |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
486 * ---------------- | --------------- ------- ------- -------
487 * |idl_tx_list[0]|->| --------------- ------- ------- -------
488 * ---------------- |->|drain_list[2]|-->|connp|-->|connp|-->|connp|-->
489 * | --------------- ------- ------- -------
490 * . . . . .
491 * | --------------- ------- ------- -------
492 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
493 * --------------- ------- ------- -------
494 * --------------- ------- ------- -------
495 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|-->
496 * | --------------- ------- ------- -------
497 * | --------------- ------- ------- -------
498 * ---------------- |->|drain_list[1]|-->|connp|-->|connp|-->|connp|-->
499 * |idl_tx_list[1]|->| --------------- ------- ------- -------
500 * ---------------- | . . . .
501 * | --------------- ------- ------- -------
502 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|-->
503 * --------------- ------- ------- -------
504 * .....
505 * ----------------
506 * |idl_tx_list[n]|-> ...
507 * ----------------
508 *
509 * When mac_tx() returns a cookie, the cookie is hashed into an index into
510 * ips_idl_tx_list[], and conn_drain_insert() is called with the idl_tx_list
511 * to insert the conn onto. conn_drain_insert() asserts flow control for the
512 * sockets via su_txq_full() (non-STREAMS) or QFULL on conn_wq (STREAMS).
513 * Further, conn_blocked is set to indicate that the conn is blocked.
514 *
515 * GLDv3 calls ill_flow_enable() when flow control is relieved. The cookie
516 * passed in the call to ill_flow_enable() identifies the blocked Tx ring and
517 * is again hashed to locate the appropriate idl_tx_list, which is then
518 * drained via conn_walk_drain(). conn_walk_drain() goes through each conn in
519 * the drain list and calls conn_drain_remove() to clear flow control (via
520 * calling su_txq_full() or clearing QFULL), and remove the conn from the
521 * drain list.
522 *
523 * Note that the drain list is not a single list but a (configurable) array of
524 * lists (8 elements by default). Synchronization between drain insertion and
525 * flow control wakeup is handled by using idl_txl->txl_lock, and only
526 * conn_drain_insert() and conn_drain_remove() manipulate the drain list.
527 *
528 * Flow control via STREAMS is used when ILL_DIRECT_CAPABLE() returns FALSE.
529 * On the send side, if the packet cannot be sent down to the driver by IP
530 * (canput() fails), ip_xmit() drops the packet and returns EWOULDBLOCK to the
531 * caller, who may then invoke ixa_check_drain_insert() to insert the conn on
532 * the 0'th drain list. When ip_wsrv() runs on the ill_wq because flow
533 * control has been relieved, the blocked conns in the 0'th drain list are
534 * drained as in the non-STREAMS case.
535 *
536 * In both the STREAMS and non-STREAMS cases, the sockfs upcall to set QFULL
537 * is done when the conn is inserted into the drain list (conn_drain_insert())
538 * and cleared when the conn is removed from the it (conn_drain_remove()).
539 *
540 * IPQOS notes:
541 *
542 * IPQoS Policies are applied to packets using IPPF (IP Policy framework)
543 * and IPQoS modules. IPPF includes hooks in IP at different control points
544 * (callout positions) which direct packets to IPQoS modules for policy
545 * processing. Policies, if present, are global.
546 *
547 * The callout positions are located in the following paths:
548 * o local_in (packets destined for this host)
549 * o local_out (packets orginating from this host )
550 * o fwd_in (packets forwarded by this m/c - inbound)
551 * o fwd_out (packets forwarded by this m/c - outbound)
552 * Hooks at these callout points can be enabled/disabled using the ndd variable
553 * ip_policy_mask (a bit mask with the 4 LSB indicating the callout positions).
554 * By default all the callout positions are enabled.
555 *
556 * Outbound (local_out)
557 * Hooks are placed in ire_send_wire_v4 and ire_send_wire_v6.
558 *
559 * Inbound (local_in)
560 * Hooks are placed in ip_fanout_v4 and ip_fanout_v6.
561 *
562 * Forwarding (in and out)
563 * Hooks are placed in ire_recv_forward_v4/v6.
564 *
565 * IP Policy Framework processing (IPPF processing)
566 * Policy processing for a packet is initiated by ip_process, which ascertains
567 * that the classifier (ipgpc) is loaded and configured, failing which the
568 * packet resumes normal processing in IP. If the clasifier is present, the
569 * packet is acted upon by one or more IPQoS modules (action instances), per
570 * filters configured in ipgpc and resumes normal IP processing thereafter.
571 * An action instance can drop a packet in course of its processing.
572 *
573 * Zones notes:
574 *
575 * The partitioning rules for networking are as follows:
576 * 1) Packets coming from a zone must have a source address belonging to that
577 * zone.
578 * 2) Packets coming from a zone can only be sent on a physical interface on
579 * which the zone has an IP address.
580 * 3) Between two zones on the same machine, packet delivery is only allowed if
581 * there's a matching route for the destination and zone in the forwarding
582 * table.
583 * 4) The TCP and UDP port spaces are per-zone; that is, two processes in
584 * different zones can bind to the same port with the wildcard address
585 * (INADDR_ANY).
586 *
587 * The granularity of interface partitioning is at the logical interface level.
588 * Therefore, every zone has its own IP addresses, and incoming packets can be
589 * attributed to a zone unambiguously. A logical interface is placed into a zone
590 * using the SIOCSLIFZONE ioctl; this sets the ipif_zoneid field in the ipif_t
591 * structure. Rule (1) is implemented by modifying the source address selection
592 * algorithm so that the list of eligible addresses is filtered based on the
593 * sending process zone.
594 *
595 * The Internet Routing Entries (IREs) are either exclusive to a zone or shared
596 * across all zones, depending on their type. Here is the break-up:
597 *
598 * IRE type Shared/exclusive
599 * -------- ----------------
600 * IRE_BROADCAST Exclusive
601 * IRE_DEFAULT (default routes) Shared (*)
602 * IRE_LOCAL Exclusive (x)
603 * IRE_LOOPBACK Exclusive
604 * IRE_PREFIX (net routes) Shared (*)
605 * IRE_IF_NORESOLVER (interface routes) Exclusive
606 * IRE_IF_RESOLVER (interface routes) Exclusive
607 * IRE_IF_CLONE (interface routes) Exclusive
608 * IRE_HOST (host routes) Shared (*)
609 *
610 * (*) A zone can only use a default or off-subnet route if the gateway is
611 * directly reachable from the zone, that is, if the gateway's address matches
612 * one of the zone's logical interfaces.
613 *
614 * (x) IRE_LOCAL are handled a bit differently.
615 * When ip_restrict_interzone_loopback is set (the default),
616 * ire_route_recursive restricts loopback using an IRE_LOCAL
617 * between zone to the case when L2 would have conceptually looped the packet
618 * back, i.e. the loopback which is required since neither Ethernet drivers
619 * nor Ethernet hardware loops them back. This is the case when the normal
620 * routes (ignoring IREs with different zoneids) would send out the packet on
621 * the same ill as the ill with which is IRE_LOCAL is associated.
622 *
623 * Multiple zones can share a common broadcast address; typically all zones
624 * share the 255.255.255.255 address. Incoming as well as locally originated
625 * broadcast packets must be dispatched to all the zones on the broadcast
626 * network. For directed broadcasts (e.g. 10.16.72.255) this is not trivial
627 * since some zones may not be on the 10.16.72/24 network. To handle this, each
628 * zone has its own set of IRE_BROADCAST entries; then, broadcast packets are
629 * sent to every zone that has an IRE_BROADCAST entry for the destination
630 * address on the input ill, see ip_input_broadcast().
631 *
632 * Applications in different zones can join the same multicast group address.
633 * The same logic applies for multicast as for broadcast. ip_input_multicast
634 * dispatches packets to all zones that have members on the physical interface.
635 */
636
637 /*
638 * Squeue Fanout flags:
639 * 0: No fanout.
640 * 1: Fanout across all squeues
641 */
642 boolean_t ip_squeue_fanout = 0;
643
644 /*
645 * Maximum dups allowed per packet.
646 */
647 uint_t ip_max_frag_dups = 10;
648
649 static int ip_open(queue_t *q, dev_t *devp, int flag, int sflag,
650 cred_t *credp, boolean_t isv6);
651 static mblk_t *ip_xmit_attach_llhdr(mblk_t *, nce_t *);
652
653 static boolean_t icmp_inbound_verify_v4(mblk_t *, icmph_t *, ip_recv_attr_t *);
654 static void icmp_inbound_too_big_v4(icmph_t *, ip_recv_attr_t *);
655 static void icmp_inbound_error_fanout_v4(mblk_t *, icmph_t *,
656 ip_recv_attr_t *);
657 static void icmp_options_update(ipha_t *);
658 static void icmp_param_problem(mblk_t *, uint8_t, ip_recv_attr_t *);
659 static void icmp_pkt(mblk_t *, void *, size_t, ip_recv_attr_t *);
660 static mblk_t *icmp_pkt_err_ok(mblk_t *, ip_recv_attr_t *);
661 static void icmp_redirect_v4(mblk_t *mp, ipha_t *, icmph_t *,
662 ip_recv_attr_t *);
663 static void icmp_send_redirect(mblk_t *, ipaddr_t, ip_recv_attr_t *);
664 static void icmp_send_reply_v4(mblk_t *, ipha_t *, icmph_t *,
665 ip_recv_attr_t *);
666
667 mblk_t *ip_dlpi_alloc(size_t, t_uscalar_t);
668 char *ip_dot_addr(ipaddr_t, char *);
669 mblk_t *ip_carve_mp(mblk_t **, ssize_t);
670 int ip_close(queue_t *, int);
671 static char *ip_dot_saddr(uchar_t *, char *);
672 static void ip_lrput(queue_t *, mblk_t *);
673 ipaddr_t ip_net_mask(ipaddr_t);
674 char *ip_nv_lookup(nv_t *, int);
675 void ip_rput(queue_t *, mblk_t *);
676 static void ip_rput_dlpi_writer(ipsq_t *dummy_sq, queue_t *q, mblk_t *mp,
677 void *dummy_arg);
678 int ip_snmp_get(queue_t *, mblk_t *, int, boolean_t);
679 static mblk_t *ip_snmp_get_mib2_ip(queue_t *, mblk_t *,
680 mib2_ipIfStatsEntry_t *, ip_stack_t *, boolean_t);
681 static mblk_t *ip_snmp_get_mib2_ip_traffic_stats(queue_t *, mblk_t *,
682 ip_stack_t *, boolean_t);
683 static mblk_t *ip_snmp_get_mib2_ip6(queue_t *, mblk_t *, ip_stack_t *,
684 boolean_t);
685 static mblk_t *ip_snmp_get_mib2_icmp(queue_t *, mblk_t *, ip_stack_t *ipst);
686 static mblk_t *ip_snmp_get_mib2_icmp6(queue_t *, mblk_t *, ip_stack_t *ipst);
687 static mblk_t *ip_snmp_get_mib2_igmp(queue_t *, mblk_t *, ip_stack_t *ipst);
688 static mblk_t *ip_snmp_get_mib2_multi(queue_t *, mblk_t *, ip_stack_t *ipst);
689 static mblk_t *ip_snmp_get_mib2_ip_addr(queue_t *, mblk_t *,
690 ip_stack_t *ipst, boolean_t);
691 static mblk_t *ip_snmp_get_mib2_ip6_addr(queue_t *, mblk_t *,
692 ip_stack_t *ipst, boolean_t);
693 static mblk_t *ip_snmp_get_mib2_ip_group_src(queue_t *, mblk_t *,
694 ip_stack_t *ipst);
695 static mblk_t *ip_snmp_get_mib2_ip6_group_src(queue_t *, mblk_t *,
696 ip_stack_t *ipst);
697 static mblk_t *ip_snmp_get_mib2_ip_group_mem(queue_t *, mblk_t *,
698 ip_stack_t *ipst);
699 static mblk_t *ip_snmp_get_mib2_ip6_group_mem(queue_t *, mblk_t *,
700 ip_stack_t *ipst);
701 static mblk_t *ip_snmp_get_mib2_virt_multi(queue_t *, mblk_t *,
702 ip_stack_t *ipst);
703 static mblk_t *ip_snmp_get_mib2_multi_rtable(queue_t *, mblk_t *,
704 ip_stack_t *ipst);
705 static mblk_t *ip_snmp_get_mib2_ip_route_media(queue_t *, mblk_t *, int,
706 ip_stack_t *ipst);
707 static mblk_t *ip_snmp_get_mib2_ip6_route_media(queue_t *, mblk_t *, int,
708 ip_stack_t *ipst);
709 static void ip_snmp_get2_v4(ire_t *, iproutedata_t *);
710 static void ip_snmp_get2_v6_route(ire_t *, iproutedata_t *);
711 static int ip_snmp_get2_v4_media(ncec_t *, iproutedata_t *);
712 static int ip_snmp_get2_v6_media(ncec_t *, iproutedata_t *);
713 int ip_snmp_set(queue_t *, int, int, uchar_t *, int);
714
715 static mblk_t *ip_fragment_copyhdr(uchar_t *, int, int, ip_stack_t *,
716 mblk_t *);
717
718 static void conn_drain_init(ip_stack_t *);
719 static void conn_drain_fini(ip_stack_t *);
720 static void conn_drain(conn_t *connp, boolean_t closing);
721
722 static void conn_walk_drain(ip_stack_t *, idl_tx_list_t *);
723 static void conn_walk_sctp(pfv_t, void *, zoneid_t, netstack_t *);
724
725 static void *ip_stack_init(netstackid_t stackid, netstack_t *ns);
726 static void ip_stack_shutdown(netstackid_t stackid, void *arg);
727 static void ip_stack_fini(netstackid_t stackid, void *arg);
728
729 static int ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
730 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
731 ire_t *, conn_t *, boolean_t, const in6_addr_t *, mcast_record_t,
732 const in6_addr_t *);
733
734 static int ip_squeue_switch(int);
735
736 static void *ip_kstat_init(netstackid_t, ip_stack_t *);
737 static void ip_kstat_fini(netstackid_t, kstat_t *);
738 static int ip_kstat_update(kstat_t *kp, int rw);
739 static void *icmp_kstat_init(netstackid_t);
740 static void icmp_kstat_fini(netstackid_t, kstat_t *);
741 static int icmp_kstat_update(kstat_t *kp, int rw);
742 static void *ip_kstat2_init(netstackid_t, ip_stat_t *);
743 static void ip_kstat2_fini(netstackid_t, kstat_t *);
744
745 static void ipobs_init(ip_stack_t *);
746 static void ipobs_fini(ip_stack_t *);
747
748 static int ip_tp_cpu_update(cpu_setup_t, int, void *);
749
750 ipaddr_t ip_g_all_ones = IP_HOST_MASK;
751
752 static long ip_rput_pullups;
753 int dohwcksum = 1; /* use h/w cksum if supported by the hardware */
754
755 vmem_t *ip_minor_arena_sa; /* for minor nos. from INET_MIN_DEV+2 thru 2^^18-1 */
756 vmem_t *ip_minor_arena_la; /* for minor nos. from 2^^18 thru 2^^32-1 */
757
758 int ip_debug;
759
760 /*
761 * Multirouting/CGTP stuff
762 */
763 int ip_cgtp_filter_rev = CGTP_FILTER_REV; /* CGTP hooks version */
764
765 /*
766 * IP tunables related declarations. Definitions are in ip_tunables.c
767 */
768 extern mod_prop_info_t ip_propinfo_tbl[];
769 extern int ip_propinfo_count;
770
771 /*
772 * Table of IP ioctls encoding the various properties of the ioctl and
773 * indexed based on the last byte of the ioctl command. Occasionally there
774 * is a clash, and there is more than 1 ioctl with the same last byte.
775 * In such a case 1 ioctl is encoded in the ndx table and the remaining
776 * ioctls are encoded in the misc table. An entry in the ndx table is
777 * retrieved by indexing on the last byte of the ioctl command and comparing
778 * the ioctl command with the value in the ndx table. In the event of a
779 * mismatch the misc table is then searched sequentially for the desired
780 * ioctl command.
781 *
782 * Entry: <command> <copyin_size> <flags> <cmd_type> <function> <restart_func>
783 */
784 ip_ioctl_cmd_t ip_ndx_ioctl_table[] = {
785 /* 000 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
786 /* 001 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
787 /* 002 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
788 /* 003 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
789 /* 004 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
790 /* 005 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
791 /* 006 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
792 /* 007 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
793 /* 008 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
794 /* 009 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
795
796 /* 010 */ { SIOCADDRT, sizeof (struct rtentry), IPI_PRIV,
797 MISC_CMD, ip_siocaddrt, NULL },
798 /* 011 */ { SIOCDELRT, sizeof (struct rtentry), IPI_PRIV,
799 MISC_CMD, ip_siocdelrt, NULL },
800
801 /* 012 */ { SIOCSIFADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
802 IF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
803 /* 013 */ { SIOCGIFADDR, sizeof (struct ifreq), IPI_GET_CMD,
804 IF_CMD, ip_sioctl_get_addr, NULL },
805
806 /* 014 */ { SIOCSIFDSTADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
807 IF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
808 /* 015 */ { SIOCGIFDSTADDR, sizeof (struct ifreq),
809 IPI_GET_CMD, IF_CMD, ip_sioctl_get_dstaddr, NULL },
810
811 /* 016 */ { SIOCSIFFLAGS, sizeof (struct ifreq),
812 IPI_PRIV | IPI_WR,
813 IF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
814 /* 017 */ { SIOCGIFFLAGS, sizeof (struct ifreq),
815 IPI_MODOK | IPI_GET_CMD,
816 IF_CMD, ip_sioctl_get_flags, NULL },
817
818 /* 018 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
819 /* 019 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
820
821 /* copyin size cannot be coded for SIOCGIFCONF */
822 /* 020 */ { O_SIOCGIFCONF, 0, IPI_GET_CMD,
823 MISC_CMD, ip_sioctl_get_ifconf, NULL },
824
825 /* 021 */ { SIOCSIFMTU, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
826 IF_CMD, ip_sioctl_mtu, NULL },
827 /* 022 */ { SIOCGIFMTU, sizeof (struct ifreq), IPI_GET_CMD,
828 IF_CMD, ip_sioctl_get_mtu, NULL },
829 /* 023 */ { SIOCGIFBRDADDR, sizeof (struct ifreq),
830 IPI_GET_CMD, IF_CMD, ip_sioctl_get_brdaddr, NULL },
831 /* 024 */ { SIOCSIFBRDADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
832 IF_CMD, ip_sioctl_brdaddr, NULL },
833 /* 025 */ { SIOCGIFNETMASK, sizeof (struct ifreq),
834 IPI_GET_CMD, IF_CMD, ip_sioctl_get_netmask, NULL },
835 /* 026 */ { SIOCSIFNETMASK, sizeof (struct ifreq), IPI_PRIV | IPI_WR,
836 IF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
837 /* 027 */ { SIOCGIFMETRIC, sizeof (struct ifreq),
838 IPI_GET_CMD, IF_CMD, ip_sioctl_get_metric, NULL },
839 /* 028 */ { SIOCSIFMETRIC, sizeof (struct ifreq), IPI_PRIV,
840 IF_CMD, ip_sioctl_metric, NULL },
841 /* 029 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
842
843 /* See 166-168 below for extended SIOC*XARP ioctls */
844 /* 030 */ { SIOCSARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
845 ARP_CMD, ip_sioctl_arp, NULL },
846 /* 031 */ { SIOCGARP, sizeof (struct arpreq), IPI_GET_CMD,
847 ARP_CMD, ip_sioctl_arp, NULL },
848 /* 032 */ { SIOCDARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR,
849 ARP_CMD, ip_sioctl_arp, NULL },
850
851 /* 033 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
852 /* 034 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
853 /* 035 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
854 /* 036 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
855 /* 037 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
856 /* 038 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
857 /* 039 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
858 /* 040 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
859 /* 041 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
860 /* 042 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
861 /* 043 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
862 /* 044 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
863 /* 045 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
864 /* 046 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
865 /* 047 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
866 /* 048 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
867 /* 049 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
868 /* 050 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
869 /* 051 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
870 /* 052 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
871 /* 053 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
872
873 /* 054 */ { IF_UNITSEL, sizeof (int), IPI_PRIV | IPI_WR | IPI_MODOK,
874 MISC_CMD, if_unitsel, if_unitsel_restart },
875
876 /* 055 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
877 /* 056 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
878 /* 057 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
879 /* 058 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
880 /* 059 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
881 /* 060 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
882 /* 061 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
883 /* 062 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
884 /* 063 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
885 /* 064 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
886 /* 065 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
887 /* 066 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
888 /* 067 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
889 /* 068 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
890 /* 069 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
891 /* 070 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
892 /* 071 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
893 /* 072 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
894
895 /* 073 */ { SIOCSIFNAME, sizeof (struct ifreq),
896 IPI_PRIV | IPI_WR | IPI_MODOK,
897 IF_CMD, ip_sioctl_sifname, NULL },
898
899 /* 074 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
900 /* 075 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
901 /* 076 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
902 /* 077 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
903 /* 078 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
904 /* 079 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
905 /* 080 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
906 /* 081 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
907 /* 082 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
908 /* 083 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
909 /* 084 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
910 /* 085 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
911 /* 086 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
912
913 /* 087 */ { SIOCGIFNUM, sizeof (int), IPI_GET_CMD,
914 MISC_CMD, ip_sioctl_get_ifnum, NULL },
915 /* 088 */ { SIOCGIFMUXID, sizeof (struct ifreq), IPI_GET_CMD,
916 IF_CMD, ip_sioctl_get_muxid, NULL },
917 /* 089 */ { SIOCSIFMUXID, sizeof (struct ifreq),
918 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_muxid, NULL },
919
920 /* Both if and lif variants share same func */
921 /* 090 */ { SIOCGIFINDEX, sizeof (struct ifreq), IPI_GET_CMD,
922 IF_CMD, ip_sioctl_get_lifindex, NULL },
923 /* Both if and lif variants share same func */
924 /* 091 */ { SIOCSIFINDEX, sizeof (struct ifreq),
925 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_slifindex, NULL },
926
927 /* copyin size cannot be coded for SIOCGIFCONF */
928 /* 092 */ { SIOCGIFCONF, 0, IPI_GET_CMD,
929 MISC_CMD, ip_sioctl_get_ifconf, NULL },
930 /* 093 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
931 /* 094 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
932 /* 095 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
933 /* 096 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
934 /* 097 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
935 /* 098 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
936 /* 099 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
937 /* 100 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
938 /* 101 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
939 /* 102 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
940 /* 103 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
941 /* 104 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
942 /* 105 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
943 /* 106 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
944 /* 107 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
945 /* 108 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
946 /* 109 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
947
948 /* 110 */ { SIOCLIFREMOVEIF, sizeof (struct lifreq),
949 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_removeif,
950 ip_sioctl_removeif_restart },
951 /* 111 */ { SIOCLIFADDIF, sizeof (struct lifreq),
952 IPI_GET_CMD | IPI_PRIV | IPI_WR,
953 LIF_CMD, ip_sioctl_addif, NULL },
954 #define SIOCLIFADDR_NDX 112
955 /* 112 */ { SIOCSLIFADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
956 LIF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart },
957 /* 113 */ { SIOCGLIFADDR, sizeof (struct lifreq),
958 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_addr, NULL },
959 /* 114 */ { SIOCSLIFDSTADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
960 LIF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart },
961 /* 115 */ { SIOCGLIFDSTADDR, sizeof (struct lifreq),
962 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dstaddr, NULL },
963 /* 116 */ { SIOCSLIFFLAGS, sizeof (struct lifreq),
964 IPI_PRIV | IPI_WR,
965 LIF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart },
966 /* 117 */ { SIOCGLIFFLAGS, sizeof (struct lifreq),
967 IPI_GET_CMD | IPI_MODOK,
968 LIF_CMD, ip_sioctl_get_flags, NULL },
969
970 /* 118 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
971 /* 119 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
972
973 /* 120 */ { O_SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
974 ip_sioctl_get_lifconf, NULL },
975 /* 121 */ { SIOCSLIFMTU, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
976 LIF_CMD, ip_sioctl_mtu, NULL },
977 /* 122 */ { SIOCGLIFMTU, sizeof (struct lifreq), IPI_GET_CMD,
978 LIF_CMD, ip_sioctl_get_mtu, NULL },
979 /* 123 */ { SIOCGLIFBRDADDR, sizeof (struct lifreq),
980 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_brdaddr, NULL },
981 /* 124 */ { SIOCSLIFBRDADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
982 LIF_CMD, ip_sioctl_brdaddr, NULL },
983 /* 125 */ { SIOCGLIFNETMASK, sizeof (struct lifreq),
984 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_netmask, NULL },
985 /* 126 */ { SIOCSLIFNETMASK, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
986 LIF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart },
987 /* 127 */ { SIOCGLIFMETRIC, sizeof (struct lifreq),
988 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_metric, NULL },
989 /* 128 */ { SIOCSLIFMETRIC, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
990 LIF_CMD, ip_sioctl_metric, NULL },
991 /* 129 */ { SIOCSLIFNAME, sizeof (struct lifreq),
992 IPI_PRIV | IPI_WR | IPI_MODOK,
993 LIF_CMD, ip_sioctl_slifname,
994 ip_sioctl_slifname_restart },
995
996 /* 130 */ { SIOCGLIFNUM, sizeof (struct lifnum), IPI_GET_CMD,
997 MISC_CMD, ip_sioctl_get_lifnum, NULL },
998 /* 131 */ { SIOCGLIFMUXID, sizeof (struct lifreq),
999 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_muxid, NULL },
1000 /* 132 */ { SIOCSLIFMUXID, sizeof (struct lifreq),
1001 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_muxid, NULL },
1002 /* 133 */ { SIOCGLIFINDEX, sizeof (struct lifreq),
1003 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifindex, 0 },
1004 /* 134 */ { SIOCSLIFINDEX, sizeof (struct lifreq),
1005 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifindex, 0 },
1006 /* 135 */ { SIOCSLIFTOKEN, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1007 LIF_CMD, ip_sioctl_token, NULL },
1008 /* 136 */ { SIOCGLIFTOKEN, sizeof (struct lifreq),
1009 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_token, NULL },
1010 /* 137 */ { SIOCSLIFSUBNET, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1011 LIF_CMD, ip_sioctl_subnet, ip_sioctl_subnet_restart },
1012 /* 138 */ { SIOCGLIFSUBNET, sizeof (struct lifreq),
1013 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_subnet, NULL },
1014 /* 139 */ { SIOCSLIFLNKINFO, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1015 LIF_CMD, ip_sioctl_lnkinfo, NULL },
1016
1017 /* 140 */ { SIOCGLIFLNKINFO, sizeof (struct lifreq),
1018 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lnkinfo, NULL },
1019 /* 141 */ { SIOCLIFDELND, sizeof (struct lifreq), IPI_PRIV,
1020 LIF_CMD, ip_siocdelndp_v6, NULL },
1021 /* 142 */ { SIOCLIFGETND, sizeof (struct lifreq), IPI_GET_CMD,
1022 LIF_CMD, ip_siocqueryndp_v6, NULL },
1023 /* 143 */ { SIOCLIFSETND, sizeof (struct lifreq), IPI_PRIV,
1024 LIF_CMD, ip_siocsetndp_v6, NULL },
1025 /* 144 */ { SIOCTMYADDR, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1026 MISC_CMD, ip_sioctl_tmyaddr, NULL },
1027 /* 145 */ { SIOCTONLINK, sizeof (struct sioc_addrreq), IPI_GET_CMD,
1028 MISC_CMD, ip_sioctl_tonlink, NULL },
1029 /* 146 */ { SIOCTMYSITE, sizeof (struct sioc_addrreq), 0,
1030 MISC_CMD, ip_sioctl_tmysite, NULL },
1031 /* 147 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1032 /* 148 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1033
1034 /* Old *IPSECONFIG ioctls are now deprecated, now see spdsock.c */
1035 /* 149 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1036 /* 150 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1037 /* 151 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1038 /* 152 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1039
1040 /* 153 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1041
1042 /* 154 */ { SIOCGLIFBINDING, sizeof (struct lifreq), IPI_GET_CMD,
1043 LIF_CMD, ip_sioctl_get_binding, NULL },
1044 /* 155 */ { SIOCSLIFGROUPNAME, sizeof (struct lifreq),
1045 IPI_PRIV | IPI_WR,
1046 LIF_CMD, ip_sioctl_groupname, ip_sioctl_groupname },
1047 /* 156 */ { SIOCGLIFGROUPNAME, sizeof (struct lifreq),
1048 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_groupname, NULL },
1049 /* 157 */ { SIOCGLIFGROUPINFO, sizeof (lifgroupinfo_t),
1050 IPI_GET_CMD, MISC_CMD, ip_sioctl_groupinfo, NULL },
1051
1052 /* Leave 158-160 unused; used to be SIOC*IFARP ioctls */
1053 /* 158 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1054 /* 159 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1055 /* 160 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1056
1057 /* 161 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1058
1059 /* These are handled in ip_sioctl_copyin_setup itself */
1060 /* 162 */ { SIOCGIP6ADDRPOLICY, 0, IPI_NULL_BCONT,
1061 MISC_CMD, NULL, NULL },
1062 /* 163 */ { SIOCSIP6ADDRPOLICY, 0, IPI_PRIV | IPI_NULL_BCONT,
1063 MISC_CMD, NULL, NULL },
1064 /* 164 */ { SIOCGDSTINFO, 0, IPI_GET_CMD, MISC_CMD, NULL, NULL },
1065
1066 /* 165 */ { SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD,
1067 ip_sioctl_get_lifconf, NULL },
1068
1069 /* 166 */ { SIOCSXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1070 XARP_CMD, ip_sioctl_arp, NULL },
1071 /* 167 */ { SIOCGXARP, sizeof (struct xarpreq), IPI_GET_CMD,
1072 XARP_CMD, ip_sioctl_arp, NULL },
1073 /* 168 */ { SIOCDXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR,
1074 XARP_CMD, ip_sioctl_arp, NULL },
1075
1076 /* SIOCPOPSOCKFS is not handled by IP */
1077 /* 169 */ { IPI_DONTCARE /* SIOCPOPSOCKFS */, 0, 0, 0, NULL, NULL },
1078
1079 /* 170 */ { SIOCGLIFZONE, sizeof (struct lifreq),
1080 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifzone, NULL },
1081 /* 171 */ { SIOCSLIFZONE, sizeof (struct lifreq),
1082 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifzone,
1083 ip_sioctl_slifzone_restart },
1084 /* 172-174 are SCTP ioctls and not handled by IP */
1085 /* 172 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1086 /* 173 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1087 /* 174 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1088 /* 175 */ { SIOCGLIFUSESRC, sizeof (struct lifreq),
1089 IPI_GET_CMD, LIF_CMD,
1090 ip_sioctl_get_lifusesrc, 0 },
1091 /* 176 */ { SIOCSLIFUSESRC, sizeof (struct lifreq),
1092 IPI_PRIV | IPI_WR,
1093 LIF_CMD, ip_sioctl_slifusesrc,
1094 NULL },
1095 /* 177 */ { SIOCGLIFSRCOF, 0, IPI_GET_CMD, MISC_CMD,
1096 ip_sioctl_get_lifsrcof, NULL },
1097 /* 178 */ { SIOCGMSFILTER, sizeof (struct group_filter), IPI_GET_CMD,
1098 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1099 /* 179 */ { SIOCSMSFILTER, sizeof (struct group_filter), 0,
1100 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1101 /* 180 */ { SIOCGIPMSFILTER, sizeof (struct ip_msfilter), IPI_GET_CMD,
1102 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1103 /* 181 */ { SIOCSIPMSFILTER, sizeof (struct ip_msfilter), 0,
1104 MSFILT_CMD, ip_sioctl_msfilter, NULL },
1105 /* 182 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL },
1106 /* SIOCSENABLESDP is handled by SDP */
1107 /* 183 */ { IPI_DONTCARE /* SIOCSENABLESDP */, 0, 0, 0, NULL, NULL },
1108 /* 184 */ { IPI_DONTCARE /* SIOCSQPTR */, 0, 0, 0, NULL, NULL },
1109 /* 185 */ { SIOCGIFHWADDR, sizeof (struct ifreq), IPI_GET_CMD,
1110 IF_CMD, ip_sioctl_get_ifhwaddr, NULL },
1111 /* 186 */ { IPI_DONTCARE /* SIOCGSTAMP */, 0, 0, 0, NULL, NULL },
1112 /* 187 */ { SIOCILB, 0, IPI_PRIV | IPI_GET_CMD, MISC_CMD,
1113 ip_sioctl_ilb_cmd, NULL },
1114 /* 188 */ { SIOCGETPROP, 0, IPI_GET_CMD, 0, NULL, NULL },
1115 /* 189 */ { SIOCSETPROP, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL},
1116 /* 190 */ { SIOCGLIFDADSTATE, sizeof (struct lifreq),
1117 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dadstate, NULL },
1118 /* 191 */ { SIOCSLIFPREFIX, sizeof (struct lifreq), IPI_PRIV | IPI_WR,
1119 LIF_CMD, ip_sioctl_prefix, ip_sioctl_prefix_restart },
1120 /* 192 */ { SIOCGLIFHWADDR, sizeof (struct lifreq), IPI_GET_CMD,
1121 LIF_CMD, ip_sioctl_get_lifhwaddr, NULL }
1122 };
1123
1124 int ip_ndx_ioctl_count = sizeof (ip_ndx_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1125
1126 ip_ioctl_cmd_t ip_misc_ioctl_table[] = {
1127 { I_LINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1128 { I_UNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1129 { I_PLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1130 { I_PUNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1131 { ND_GET, 0, 0, 0, NULL, NULL },
1132 { ND_SET, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL },
1133 { IP_IOCTL, 0, 0, 0, NULL, NULL },
1134 { SIOCGETVIFCNT, sizeof (struct sioc_vif_req), IPI_GET_CMD,
1135 MISC_CMD, mrt_ioctl},
1136 { SIOCGETSGCNT, sizeof (struct sioc_sg_req), IPI_GET_CMD,
1137 MISC_CMD, mrt_ioctl},
1138 { SIOCGETLSGCNT, sizeof (struct sioc_lsg_req), IPI_GET_CMD,
1139 MISC_CMD, mrt_ioctl}
1140 };
1141
1142 int ip_misc_ioctl_count =
1143 sizeof (ip_misc_ioctl_table) / sizeof (ip_ioctl_cmd_t);
1144
1145 int conn_drain_nthreads; /* Number of drainers reqd. */
1146 /* Settable in /etc/system */
1147 /* Defined in ip_ire.c */
1148 extern uint32_t ip_ire_max_bucket_cnt, ip6_ire_max_bucket_cnt;
1149 extern uint32_t ip_ire_min_bucket_cnt, ip6_ire_min_bucket_cnt;
1150 extern uint32_t ip_ire_mem_ratio, ip_ire_cpu_ratio;
1151
1152 static nv_t ire_nv_arr[] = {
1153 { IRE_BROADCAST, "BROADCAST" },
1154 { IRE_LOCAL, "LOCAL" },
1155 { IRE_LOOPBACK, "LOOPBACK" },
1156 { IRE_DEFAULT, "DEFAULT" },
1157 { IRE_PREFIX, "PREFIX" },
1158 { IRE_IF_NORESOLVER, "IF_NORESOL" },
1159 { IRE_IF_RESOLVER, "IF_RESOLV" },
1160 { IRE_IF_CLONE, "IF_CLONE" },
1161 { IRE_HOST, "HOST" },
1162 { IRE_MULTICAST, "MULTICAST" },
1163 { IRE_NOROUTE, "NOROUTE" },
1164 { 0 }
1165 };
1166
1167 nv_t *ire_nv_tbl = ire_nv_arr;
1168
1169 /* Simple ICMP IP Header Template */
1170 static ipha_t icmp_ipha = {
1171 IP_SIMPLE_HDR_VERSION, 0, 0, 0, 0, 0, IPPROTO_ICMP
1172 };
1173
1174 struct module_info ip_mod_info = {
1175 IP_MOD_ID, IP_MOD_NAME, IP_MOD_MINPSZ, IP_MOD_MAXPSZ, IP_MOD_HIWAT,
1176 IP_MOD_LOWAT
1177 };
1178
1179 /*
1180 * Duplicate static symbols within a module confuses mdb; so we avoid the
1181 * problem by making the symbols here distinct from those in udp.c.
1182 */
1183
1184 /*
1185 * Entry points for IP as a device and as a module.
1186 * We have separate open functions for the /dev/ip and /dev/ip6 devices.
1187 */
1188 static struct qinit iprinitv4 = {
1189 (pfi_t)ip_rput, NULL, ip_openv4, ip_close, NULL,
1190 &ip_mod_info
1191 };
1192
1193 struct qinit iprinitv6 = {
1194 (pfi_t)ip_rput_v6, NULL, ip_openv6, ip_close, NULL,
1195 &ip_mod_info
1196 };
1197
1198 static struct qinit ipwinit = {
1199 (pfi_t)ip_wput_nondata, (pfi_t)ip_wsrv, NULL, NULL, NULL,
1200 &ip_mod_info
1201 };
1202
1203 static struct qinit iplrinit = {
1204 (pfi_t)ip_lrput, NULL, ip_openv4, ip_close, NULL,
1205 &ip_mod_info
1206 };
1207
1208 static struct qinit iplwinit = {
1209 (pfi_t)ip_lwput, NULL, NULL, NULL, NULL,
1210 &ip_mod_info
1211 };
1212
1213 /* For AF_INET aka /dev/ip */
1214 struct streamtab ipinfov4 = {
1215 &iprinitv4, &ipwinit, &iplrinit, &iplwinit
1216 };
1217
1218 /* For AF_INET6 aka /dev/ip6 */
1219 struct streamtab ipinfov6 = {
1220 &iprinitv6, &ipwinit, &iplrinit, &iplwinit
1221 };
1222
1223 #ifdef DEBUG
1224 boolean_t skip_sctp_cksum = B_FALSE;
1225 #endif
1226
1227 /*
1228 * Generate an ICMP fragmentation needed message.
1229 * When called from ip_output side a minimal ip_recv_attr_t needs to be
1230 * constructed by the caller.
1231 */
1232 void
1233 icmp_frag_needed(mblk_t *mp, int mtu, ip_recv_attr_t *ira)
1234 {
1235 icmph_t icmph;
1236 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1237
1238 mp = icmp_pkt_err_ok(mp, ira);
1239 if (mp == NULL)
1240 return;
1241
1242 bzero(&icmph, sizeof (icmph_t));
1243 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
1244 icmph.icmph_code = ICMP_FRAGMENTATION_NEEDED;
1245 icmph.icmph_du_mtu = htons((uint16_t)mtu);
1246 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutFragNeeded);
1247 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
1248
1249 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
1250 }
1251
1252 /*
1253 * icmp_inbound_v4 deals with ICMP messages that are handled by IP.
1254 * If the ICMP message is consumed by IP, i.e., it should not be delivered
1255 * to any IPPROTO_ICMP raw sockets, then it returns NULL.
1256 * Likewise, if the ICMP error is misformed (too short, etc), then it
1257 * returns NULL. The caller uses this to determine whether or not to send
1258 * to raw sockets.
1259 *
1260 * All error messages are passed to the matching transport stream.
1261 *
1262 * The following cases are handled by icmp_inbound:
1263 * 1) It needs to send a reply back and possibly delivering it
1264 * to the "interested" upper clients.
1265 * 2) Return the mblk so that the caller can pass it to the RAW socket clients.
1266 * 3) It needs to change some values in IP only.
1267 * 4) It needs to change some values in IP and upper layers e.g TCP
1268 * by delivering an error to the upper layers.
1269 *
1270 * We handle the above three cases in the context of IPsec in the
1271 * following way :
1272 *
1273 * 1) Send the reply back in the same way as the request came in.
1274 * If it came in encrypted, it goes out encrypted. If it came in
1275 * clear, it goes out in clear. Thus, this will prevent chosen
1276 * plain text attack.
1277 * 2) The client may or may not expect things to come in secure.
1278 * If it comes in secure, the policy constraints are checked
1279 * before delivering it to the upper layers. If it comes in
1280 * clear, ipsec_inbound_accept_clear will decide whether to
1281 * accept this in clear or not. In both the cases, if the returned
1282 * message (IP header + 8 bytes) that caused the icmp message has
1283 * AH/ESP headers, it is sent up to AH/ESP for validation before
1284 * sending up. If there are only 8 bytes of returned message, then
1285 * upper client will not be notified.
1286 * 3) Check with global policy to see whether it matches the constaints.
1287 * But this will be done only if icmp_accept_messages_in_clear is
1288 * zero.
1289 * 4) If we need to change both in IP and ULP, then the decision taken
1290 * while affecting the values in IP and while delivering up to TCP
1291 * should be the same.
1292 *
1293 * There are two cases.
1294 *
1295 * a) If we reject data at the IP layer (ipsec_check_global_policy()
1296 * failed), we will not deliver it to the ULP, even though they
1297 * are *willing* to accept in *clear*. This is fine as our global
1298 * disposition to icmp messages asks us reject the datagram.
1299 *
1300 * b) If we accept data at the IP layer (ipsec_check_global_policy()
1301 * succeeded or icmp_accept_messages_in_clear is 1), and not able
1302 * to deliver it to ULP (policy failed), it can lead to
1303 * consistency problems. The cases known at this time are
1304 * ICMP_DESTINATION_UNREACHABLE messages with following code
1305 * values :
1306 *
1307 * - ICMP_FRAGMENTATION_NEEDED : IP adapts to the new value
1308 * and Upper layer rejects. Then the communication will
1309 * come to a stop. This is solved by making similar decisions
1310 * at both levels. Currently, when we are unable to deliver
1311 * to the Upper Layer (due to policy failures) while IP has
1312 * adjusted dce_pmtu, the next outbound datagram would
1313 * generate a local ICMP_FRAGMENTATION_NEEDED message - which
1314 * will be with the right level of protection. Thus the right
1315 * value will be communicated even if we are not able to
1316 * communicate when we get from the wire initially. But this
1317 * assumes there would be at least one outbound datagram after
1318 * IP has adjusted its dce_pmtu value. To make things
1319 * simpler, we accept in clear after the validation of
1320 * AH/ESP headers.
1321 *
1322 * - Other ICMP ERRORS : We may not be able to deliver it to the
1323 * upper layer depending on the level of protection the upper
1324 * layer expects and the disposition in ipsec_inbound_accept_clear().
1325 * ipsec_inbound_accept_clear() decides whether a given ICMP error
1326 * should be accepted in clear when the Upper layer expects secure.
1327 * Thus the communication may get aborted by some bad ICMP
1328 * packets.
1329 */
1330 mblk_t *
1331 icmp_inbound_v4(mblk_t *mp, ip_recv_attr_t *ira)
1332 {
1333 icmph_t *icmph;
1334 ipha_t *ipha; /* Outer header */
1335 int ip_hdr_length; /* Outer header length */
1336 boolean_t interested;
1337 ipif_t *ipif;
1338 uint32_t ts;
1339 uint32_t *tsp;
1340 timestruc_t now;
1341 ill_t *ill = ira->ira_ill;
1342 ip_stack_t *ipst = ill->ill_ipst;
1343 zoneid_t zoneid = ira->ira_zoneid;
1344 int len_needed;
1345 mblk_t *mp_ret = NULL;
1346
1347 ipha = (ipha_t *)mp->b_rptr;
1348
1349 BUMP_MIB(&ipst->ips_icmp_mib, icmpInMsgs);
1350
1351 ip_hdr_length = ira->ira_ip_hdr_length;
1352 if ((mp->b_wptr - mp->b_rptr) < (ip_hdr_length + ICMPH_SIZE)) {
1353 if (ira->ira_pktlen < (ip_hdr_length + ICMPH_SIZE)) {
1354 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1355 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1356 freemsg(mp);
1357 return (NULL);
1358 }
1359 /* Last chance to get real. */
1360 ipha = ip_pullup(mp, ip_hdr_length + ICMPH_SIZE, ira);
1361 if (ipha == NULL) {
1362 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
1363 freemsg(mp);
1364 return (NULL);
1365 }
1366 }
1367
1368 /* The IP header will always be a multiple of four bytes */
1369 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1370 ip2dbg(("icmp_inbound_v4: type %d code %d\n", icmph->icmph_type,
1371 icmph->icmph_code));
1372
1373 /*
1374 * We will set "interested" to "true" if we should pass a copy to
1375 * the transport or if we handle the packet locally.
1376 */
1377 interested = B_FALSE;
1378 switch (icmph->icmph_type) {
1379 case ICMP_ECHO_REPLY:
1380 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchoReps);
1381 break;
1382 case ICMP_DEST_UNREACHABLE:
1383 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED)
1384 BUMP_MIB(&ipst->ips_icmp_mib, icmpInFragNeeded);
1385 interested = B_TRUE; /* Pass up to transport */
1386 BUMP_MIB(&ipst->ips_icmp_mib, icmpInDestUnreachs);
1387 break;
1388 case ICMP_SOURCE_QUENCH:
1389 interested = B_TRUE; /* Pass up to transport */
1390 BUMP_MIB(&ipst->ips_icmp_mib, icmpInSrcQuenchs);
1391 break;
1392 case ICMP_REDIRECT:
1393 if (!ipst->ips_ip_ignore_redirect)
1394 interested = B_TRUE;
1395 BUMP_MIB(&ipst->ips_icmp_mib, icmpInRedirects);
1396 break;
1397 case ICMP_ECHO_REQUEST:
1398 /*
1399 * Whether to respond to echo requests that come in as IP
1400 * broadcasts or as IP multicast is subject to debate
1401 * (what isn't?). We aim to please, you pick it.
1402 * Default is do it.
1403 */
1404 if (ira->ira_flags & IRAF_MULTICAST) {
1405 /* multicast: respond based on tunable */
1406 interested = ipst->ips_ip_g_resp_to_echo_mcast;
1407 } else if (ira->ira_flags & IRAF_BROADCAST) {
1408 /* broadcast: respond based on tunable */
1409 interested = ipst->ips_ip_g_resp_to_echo_bcast;
1410 } else {
1411 /* unicast: always respond */
1412 interested = B_TRUE;
1413 }
1414 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchos);
1415 if (!interested) {
1416 /* We never pass these to RAW sockets */
1417 freemsg(mp);
1418 return (NULL);
1419 }
1420
1421 /* Check db_ref to make sure we can modify the packet. */
1422 if (mp->b_datap->db_ref > 1) {
1423 mblk_t *mp1;
1424
1425 mp1 = copymsg(mp);
1426 freemsg(mp);
1427 if (!mp1) {
1428 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1429 return (NULL);
1430 }
1431 mp = mp1;
1432 ipha = (ipha_t *)mp->b_rptr;
1433 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1434 }
1435 icmph->icmph_type = ICMP_ECHO_REPLY;
1436 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutEchoReps);
1437 icmp_send_reply_v4(mp, ipha, icmph, ira);
1438 return (NULL);
1439
1440 case ICMP_ROUTER_ADVERTISEMENT:
1441 case ICMP_ROUTER_SOLICITATION:
1442 break;
1443 case ICMP_TIME_EXCEEDED:
1444 interested = B_TRUE; /* Pass up to transport */
1445 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimeExcds);
1446 break;
1447 case ICMP_PARAM_PROBLEM:
1448 interested = B_TRUE; /* Pass up to transport */
1449 BUMP_MIB(&ipst->ips_icmp_mib, icmpInParmProbs);
1450 break;
1451 case ICMP_TIME_STAMP_REQUEST:
1452 /* Response to Time Stamp Requests is local policy. */
1453 if (ipst->ips_ip_g_resp_to_timestamp) {
1454 if (ira->ira_flags & IRAF_MULTIBROADCAST)
1455 interested =
1456 ipst->ips_ip_g_resp_to_timestamp_bcast;
1457 else
1458 interested = B_TRUE;
1459 }
1460 if (!interested) {
1461 /* We never pass these to RAW sockets */
1462 freemsg(mp);
1463 return (NULL);
1464 }
1465
1466 /* Make sure we have enough of the packet */
1467 len_needed = ip_hdr_length + ICMPH_SIZE +
1468 3 * sizeof (uint32_t);
1469
1470 if (mp->b_wptr - mp->b_rptr < len_needed) {
1471 ipha = ip_pullup(mp, len_needed, ira);
1472 if (ipha == NULL) {
1473 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1474 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1475 mp, ill);
1476 freemsg(mp);
1477 return (NULL);
1478 }
1479 /* Refresh following the pullup. */
1480 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1481 }
1482 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestamps);
1483 /* Check db_ref to make sure we can modify the packet. */
1484 if (mp->b_datap->db_ref > 1) {
1485 mblk_t *mp1;
1486
1487 mp1 = copymsg(mp);
1488 freemsg(mp);
1489 if (!mp1) {
1490 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1491 return (NULL);
1492 }
1493 mp = mp1;
1494 ipha = (ipha_t *)mp->b_rptr;
1495 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1496 }
1497 icmph->icmph_type = ICMP_TIME_STAMP_REPLY;
1498 tsp = (uint32_t *)&icmph[1];
1499 tsp++; /* Skip past 'originate time' */
1500 /* Compute # of milliseconds since midnight */
1501 gethrestime(&now);
1502 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
1503 NSEC2MSEC(now.tv_nsec);
1504 *tsp++ = htonl(ts); /* Lay in 'receive time' */
1505 *tsp++ = htonl(ts); /* Lay in 'send time' */
1506 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimestampReps);
1507 icmp_send_reply_v4(mp, ipha, icmph, ira);
1508 return (NULL);
1509
1510 case ICMP_TIME_STAMP_REPLY:
1511 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestampReps);
1512 break;
1513 case ICMP_INFO_REQUEST:
1514 /* Per RFC 1122 3.2.2.7, ignore this. */
1515 case ICMP_INFO_REPLY:
1516 break;
1517 case ICMP_ADDRESS_MASK_REQUEST:
1518 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1519 interested =
1520 ipst->ips_ip_respond_to_address_mask_broadcast;
1521 } else {
1522 interested = B_TRUE;
1523 }
1524 if (!interested) {
1525 /* We never pass these to RAW sockets */
1526 freemsg(mp);
1527 return (NULL);
1528 }
1529 len_needed = ip_hdr_length + ICMPH_SIZE + IP_ADDR_LEN;
1530 if (mp->b_wptr - mp->b_rptr < len_needed) {
1531 ipha = ip_pullup(mp, len_needed, ira);
1532 if (ipha == NULL) {
1533 BUMP_MIB(ill->ill_ip_mib,
1534 ipIfStatsInTruncatedPkts);
1535 ip_drop_input("ipIfStatsInTruncatedPkts", mp,
1536 ill);
1537 freemsg(mp);
1538 return (NULL);
1539 }
1540 /* Refresh following the pullup. */
1541 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1542 }
1543 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMasks);
1544 /* Check db_ref to make sure we can modify the packet. */
1545 if (mp->b_datap->db_ref > 1) {
1546 mblk_t *mp1;
1547
1548 mp1 = copymsg(mp);
1549 freemsg(mp);
1550 if (!mp1) {
1551 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
1552 return (NULL);
1553 }
1554 mp = mp1;
1555 ipha = (ipha_t *)mp->b_rptr;
1556 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1557 }
1558 /*
1559 * Need the ipif with the mask be the same as the source
1560 * address of the mask reply. For unicast we have a specific
1561 * ipif. For multicast/broadcast we only handle onlink
1562 * senders, and use the source address to pick an ipif.
1563 */
1564 ipif = ipif_lookup_addr(ipha->ipha_dst, ill, zoneid, ipst);
1565 if (ipif == NULL) {
1566 /* Broadcast or multicast */
1567 ipif = ipif_lookup_remote(ill, ipha->ipha_src, zoneid);
1568 if (ipif == NULL) {
1569 freemsg(mp);
1570 return (NULL);
1571 }
1572 }
1573 icmph->icmph_type = ICMP_ADDRESS_MASK_REPLY;
1574 bcopy(&ipif->ipif_net_mask, &icmph[1], IP_ADDR_LEN);
1575 ipif_refrele(ipif);
1576 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutAddrMaskReps);
1577 icmp_send_reply_v4(mp, ipha, icmph, ira);
1578 return (NULL);
1579
1580 case ICMP_ADDRESS_MASK_REPLY:
1581 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMaskReps);
1582 break;
1583 default:
1584 interested = B_TRUE; /* Pass up to transport */
1585 BUMP_MIB(&ipst->ips_icmp_mib, icmpInUnknowns);
1586 break;
1587 }
1588 /*
1589 * See if there is an ICMP client to avoid an extra copymsg/freemsg
1590 * if there isn't one.
1591 */
1592 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_ICMP].connf_head != NULL) {
1593 /* If there is an ICMP client and we want one too, copy it. */
1594
1595 if (!interested) {
1596 /* Caller will deliver to RAW sockets */
1597 return (mp);
1598 }
1599 mp_ret = copymsg(mp);
1600 if (mp_ret == NULL) {
1601 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1602 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1603 }
1604 } else if (!interested) {
1605 /* Neither we nor raw sockets are interested. Drop packet now */
1606 freemsg(mp);
1607 return (NULL);
1608 }
1609
1610 /*
1611 * ICMP error or redirect packet. Make sure we have enough of
1612 * the header and that db_ref == 1 since we might end up modifying
1613 * the packet.
1614 */
1615 if (mp->b_cont != NULL) {
1616 if (ip_pullup(mp, -1, ira) == NULL) {
1617 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1618 ip_drop_input("ipIfStatsInDiscards - ip_pullup",
1619 mp, ill);
1620 freemsg(mp);
1621 return (mp_ret);
1622 }
1623 }
1624
1625 if (mp->b_datap->db_ref > 1) {
1626 mblk_t *mp1;
1627
1628 mp1 = copymsg(mp);
1629 if (mp1 == NULL) {
1630 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1631 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill);
1632 freemsg(mp);
1633 return (mp_ret);
1634 }
1635 freemsg(mp);
1636 mp = mp1;
1637 }
1638
1639 /*
1640 * In case mp has changed, verify the message before any further
1641 * processes.
1642 */
1643 ipha = (ipha_t *)mp->b_rptr;
1644 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length];
1645 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
1646 freemsg(mp);
1647 return (mp_ret);
1648 }
1649
1650 switch (icmph->icmph_type) {
1651 case ICMP_REDIRECT:
1652 icmp_redirect_v4(mp, ipha, icmph, ira);
1653 break;
1654 case ICMP_DEST_UNREACHABLE:
1655 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED) {
1656 /* Update DCE and adjust MTU is icmp header if needed */
1657 icmp_inbound_too_big_v4(icmph, ira);
1658 }
1659 /* FALLTHRU */
1660 default:
1661 icmp_inbound_error_fanout_v4(mp, icmph, ira);
1662 break;
1663 }
1664 return (mp_ret);
1665 }
1666
1667 /*
1668 * Send an ICMP echo, timestamp or address mask reply.
1669 * The caller has already updated the payload part of the packet.
1670 * We handle the ICMP checksum, IP source address selection and feed
1671 * the packet into ip_output_simple.
1672 */
1673 static void
1674 icmp_send_reply_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph,
1675 ip_recv_attr_t *ira)
1676 {
1677 uint_t ip_hdr_length = ira->ira_ip_hdr_length;
1678 ill_t *ill = ira->ira_ill;
1679 ip_stack_t *ipst = ill->ill_ipst;
1680 ip_xmit_attr_t ixas;
1681
1682 /* Send out an ICMP packet */
1683 icmph->icmph_checksum = 0;
1684 icmph->icmph_checksum = IP_CSUM(mp, ip_hdr_length, 0);
1685 /* Reset time to live. */
1686 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
1687 {
1688 /* Swap source and destination addresses */
1689 ipaddr_t tmp;
1690
1691 tmp = ipha->ipha_src;
1692 ipha->ipha_src = ipha->ipha_dst;
1693 ipha->ipha_dst = tmp;
1694 }
1695 ipha->ipha_ident = 0;
1696 if (!IS_SIMPLE_IPH(ipha))
1697 icmp_options_update(ipha);
1698
1699 bzero(&ixas, sizeof (ixas));
1700 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
1701 ixas.ixa_zoneid = ira->ira_zoneid;
1702 ixas.ixa_cred = kcred;
1703 ixas.ixa_cpid = NOPID;
1704 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
1705 ixas.ixa_ifindex = 0;
1706 ixas.ixa_ipst = ipst;
1707 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
1708
1709 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
1710 /*
1711 * This packet should go out the same way as it
1712 * came in i.e in clear, independent of the IPsec policy
1713 * for transmitting packets.
1714 */
1715 ixas.ixa_flags |= IXAF_NO_IPSEC;
1716 } else {
1717 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
1718 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1719 /* Note: mp already consumed and ip_drop_packet done */
1720 return;
1721 }
1722 }
1723 if (ira->ira_flags & IRAF_MULTIBROADCAST) {
1724 /*
1725 * Not one or our addresses (IRE_LOCALs), thus we let
1726 * ip_output_simple pick the source.
1727 */
1728 ipha->ipha_src = INADDR_ANY;
1729 ixas.ixa_flags |= IXAF_SET_SOURCE;
1730 }
1731 /* Should we send with DF and use dce_pmtu? */
1732 if (ipst->ips_ipv4_icmp_return_pmtu) {
1733 ixas.ixa_flags |= IXAF_PMTU_DISCOVERY;
1734 ipha->ipha_fragment_offset_and_flags |= IPH_DF_HTONS;
1735 }
1736
1737 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
1738
1739 (void) ip_output_simple(mp, &ixas);
1740 ixa_cleanup(&ixas);
1741 }
1742
1743 /*
1744 * Verify the ICMP messages for either for ICMP error or redirect packet.
1745 * The caller should have fully pulled up the message. If it's a redirect
1746 * packet, only basic checks on IP header will be done; otherwise, verify
1747 * the packet by looking at the included ULP header.
1748 *
1749 * Called before icmp_inbound_error_fanout_v4 is called.
1750 */
1751 static boolean_t
1752 icmp_inbound_verify_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
1753 {
1754 ill_t *ill = ira->ira_ill;
1755 int hdr_length;
1756 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
1757 conn_t *connp;
1758 ipha_t *ipha; /* Inner IP header */
1759
1760 ipha = (ipha_t *)&icmph[1];
1761 if ((uchar_t *)ipha + IP_SIMPLE_HDR_LENGTH > mp->b_wptr)
1762 goto truncated;
1763
1764 hdr_length = IPH_HDR_LENGTH(ipha);
1765
1766 if ((IPH_HDR_VERSION(ipha) != IPV4_VERSION))
1767 goto discard_pkt;
1768
1769 if (hdr_length < sizeof (ipha_t))
1770 goto truncated;
1771
1772 if ((uchar_t *)ipha + hdr_length > mp->b_wptr)
1773 goto truncated;
1774
1775 /*
1776 * Stop here for ICMP_REDIRECT.
1777 */
1778 if (icmph->icmph_type == ICMP_REDIRECT)
1779 return (B_TRUE);
1780
1781 /*
1782 * ICMP errors only.
1783 */
1784 switch (ipha->ipha_protocol) {
1785 case IPPROTO_UDP:
1786 /*
1787 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1788 * transport header.
1789 */
1790 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1791 mp->b_wptr)
1792 goto truncated;
1793 break;
1794 case IPPROTO_TCP: {
1795 tcpha_t *tcpha;
1796
1797 /*
1798 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1799 * transport header.
1800 */
1801 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1802 mp->b_wptr)
1803 goto truncated;
1804
1805 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
1806 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
1807 ipst);
1808 if (connp == NULL)
1809 goto discard_pkt;
1810
1811 if ((connp->conn_verifyicmp != NULL) &&
1812 !connp->conn_verifyicmp(connp, tcpha, icmph, NULL, ira)) {
1813 CONN_DEC_REF(connp);
1814 goto discard_pkt;
1815 }
1816 CONN_DEC_REF(connp);
1817 break;
1818 }
1819 case IPPROTO_SCTP:
1820 /*
1821 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of
1822 * transport header.
1823 */
1824 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN >
1825 mp->b_wptr)
1826 goto truncated;
1827 break;
1828 case IPPROTO_ESP:
1829 case IPPROTO_AH:
1830 break;
1831 case IPPROTO_ENCAP:
1832 if ((uchar_t *)ipha + hdr_length + sizeof (ipha_t) >
1833 mp->b_wptr)
1834 goto truncated;
1835 break;
1836 default:
1837 break;
1838 }
1839
1840 return (B_TRUE);
1841
1842 discard_pkt:
1843 /* Bogus ICMP error. */
1844 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
1845 return (B_FALSE);
1846
1847 truncated:
1848 /* We pulled up everthing already. Must be truncated */
1849 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
1850 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
1851 return (B_FALSE);
1852 }
1853
1854 /* Table from RFC 1191 */
1855 static int icmp_frag_size_table[] =
1856 { 32000, 17914, 8166, 4352, 2002, 1496, 1006, 508, 296, 68 };
1857
1858 /*
1859 * Process received ICMP Packet too big.
1860 * Just handles the DCE create/update, including using the above table of
1861 * PMTU guesses. The caller is responsible for validating the packet before
1862 * passing it in and also to fanout the ICMP error to any matching transport
1863 * conns. Assumes the message has been fully pulled up and verified.
1864 *
1865 * Before getting here, the caller has called icmp_inbound_verify_v4()
1866 * that should have verified with ULP to prevent undoing the changes we're
1867 * going to make to DCE. For example, TCP might have verified that the packet
1868 * which generated error is in the send window.
1869 *
1870 * In some cases modified this MTU in the ICMP header packet; the caller
1871 * should pass to the matching ULP after this returns.
1872 */
1873 static void
1874 icmp_inbound_too_big_v4(icmph_t *icmph, ip_recv_attr_t *ira)
1875 {
1876 dce_t *dce;
1877 int old_mtu;
1878 int mtu, orig_mtu;
1879 ipaddr_t dst;
1880 boolean_t disable_pmtud;
1881 ill_t *ill = ira->ira_ill;
1882 ip_stack_t *ipst = ill->ill_ipst;
1883 uint_t hdr_length;
1884 ipha_t *ipha;
1885
1886 /* Caller already pulled up everything. */
1887 ipha = (ipha_t *)&icmph[1];
1888 ASSERT(icmph->icmph_type == ICMP_DEST_UNREACHABLE &&
1889 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED);
1890 ASSERT(ill != NULL);
1891
1892 hdr_length = IPH_HDR_LENGTH(ipha);
1893
1894 /*
1895 * We handle path MTU for source routed packets since the DCE
1896 * is looked up using the final destination.
1897 */
1898 dst = ip_get_dst(ipha);
1899
1900 dce = dce_lookup_and_add_v4(dst, ipst);
1901 if (dce == NULL) {
1902 /* Couldn't add a unique one - ENOMEM */
1903 ip1dbg(("icmp_inbound_too_big_v4: no dce for 0x%x\n",
1904 ntohl(dst)));
1905 return;
1906 }
1907
1908 /* Check for MTU discovery advice as described in RFC 1191 */
1909 mtu = ntohs(icmph->icmph_du_mtu);
1910 orig_mtu = mtu;
1911 disable_pmtud = B_FALSE;
1912
1913 mutex_enter(&dce->dce_lock);
1914 if (dce->dce_flags & DCEF_PMTU)
1915 old_mtu = dce->dce_pmtu;
1916 else
1917 old_mtu = ill->ill_mtu;
1918
1919 if (icmph->icmph_du_zero != 0 || mtu < ipst->ips_ip_pmtu_min) {
1920 uint32_t length;
1921 int i;
1922
1923 /*
1924 * Use the table from RFC 1191 to figure out
1925 * the next "plateau" based on the length in
1926 * the original IP packet.
1927 */
1928 length = ntohs(ipha->ipha_length);
1929 DTRACE_PROBE2(ip4__pmtu__guess, dce_t *, dce,
1930 uint32_t, length);
1931 if (old_mtu <= length &&
1932 old_mtu >= length - hdr_length) {
1933 /*
1934 * Handle broken BSD 4.2 systems that
1935 * return the wrong ipha_length in ICMP
1936 * errors.
1937 */
1938 ip1dbg(("Wrong mtu: sent %d, dce %d\n",
1939 length, old_mtu));
1940 length -= hdr_length;
1941 }
1942 for (i = 0; i < A_CNT(icmp_frag_size_table); i++) {
1943 if (length > icmp_frag_size_table[i])
1944 break;
1945 }
1946 if (i == A_CNT(icmp_frag_size_table)) {
1947 /* Smaller than IP_MIN_MTU! */
1948 ip1dbg(("Too big for packet size %d\n",
1949 length));
1950 disable_pmtud = B_TRUE;
1951 mtu = ipst->ips_ip_pmtu_min;
1952 } else {
1953 mtu = icmp_frag_size_table[i];
1954 ip1dbg(("Calculated mtu %d, packet size %d, "
1955 "before %d\n", mtu, length, old_mtu));
1956 if (mtu < ipst->ips_ip_pmtu_min) {
1957 mtu = ipst->ips_ip_pmtu_min;
1958 disable_pmtud = B_TRUE;
1959 }
1960 }
1961 }
1962 if (disable_pmtud)
1963 dce->dce_flags |= DCEF_TOO_SMALL_PMTU;
1964 else
1965 dce->dce_flags &= ~DCEF_TOO_SMALL_PMTU;
1966
1967 dce->dce_pmtu = MIN(old_mtu, mtu);
1968 /* Prepare to send the new max frag size for the ULP. */
1969 icmph->icmph_du_zero = 0;
1970 icmph->icmph_du_mtu = htons((uint16_t)dce->dce_pmtu);
1971 DTRACE_PROBE4(ip4__pmtu__change, icmph_t *, icmph, dce_t *,
1972 dce, int, orig_mtu, int, mtu);
1973
1974 /* We now have a PMTU for sure */
1975 dce->dce_flags |= DCEF_PMTU;
1976 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
1977 mutex_exit(&dce->dce_lock);
1978 /*
1979 * After dropping the lock the new value is visible to everyone.
1980 * Then we bump the generation number so any cached values reinspect
1981 * the dce_t.
1982 */
1983 dce_increment_generation(dce);
1984 dce_refrele(dce);
1985 }
1986
1987 /*
1988 * If the packet in error is Self-Encapsulated, icmp_inbound_error_fanout_v4
1989 * calls this function.
1990 */
1991 static mblk_t *
1992 icmp_inbound_self_encap_error_v4(mblk_t *mp, ipha_t *ipha, ipha_t *in_ipha)
1993 {
1994 int length;
1995
1996 ASSERT(mp->b_datap->db_type == M_DATA);
1997
1998 /* icmp_inbound_v4 has already pulled up the whole error packet */
1999 ASSERT(mp->b_cont == NULL);
2000
2001 /*
2002 * The length that we want to overlay is the inner header
2003 * and what follows it.
2004 */
2005 length = msgdsize(mp) - ((uchar_t *)in_ipha - mp->b_rptr);
2006
2007 /*
2008 * Overlay the inner header and whatever follows it over the
2009 * outer header.
2010 */
2011 bcopy((uchar_t *)in_ipha, (uchar_t *)ipha, length);
2012
2013 /* Adjust for what we removed */
2014 mp->b_wptr -= (uchar_t *)in_ipha - (uchar_t *)ipha;
2015 return (mp);
2016 }
2017
2018 /*
2019 * Try to pass the ICMP message upstream in case the ULP cares.
2020 *
2021 * If the packet that caused the ICMP error is secure, we send
2022 * it to AH/ESP to make sure that the attached packet has a
2023 * valid association. ipha in the code below points to the
2024 * IP header of the packet that caused the error.
2025 *
2026 * For IPsec cases, we let the next-layer-up (which has access to
2027 * cached policy on the conn_t, or can query the SPD directly)
2028 * subtract out any IPsec overhead if they must. We therefore make no
2029 * adjustments here for IPsec overhead.
2030 *
2031 * IFN could have been generated locally or by some router.
2032 *
2033 * LOCAL : ire_send_wire (before calling ipsec_out_process) can call
2034 * icmp_frag_needed/icmp_pkt2big_v6 to generated a local IFN.
2035 * This happens because IP adjusted its value of MTU on an
2036 * earlier IFN message and could not tell the upper layer,
2037 * the new adjusted value of MTU e.g. Packet was encrypted
2038 * or there was not enough information to fanout to upper
2039 * layers. Thus on the next outbound datagram, ire_send_wire
2040 * generates the IFN, where IPsec processing has *not* been
2041 * done.
2042 *
2043 * Note that we retain ixa_fragsize across IPsec thus once
2044 * we have picking ixa_fragsize and entered ipsec_out_process we do
2045 * no change the fragsize even if the path MTU changes before
2046 * we reach ip_output_post_ipsec.
2047 *
2048 * In the local case, IRAF_LOOPBACK will be set indicating
2049 * that IFN was generated locally.
2050 *
2051 * ROUTER : IFN could be secure or non-secure.
2052 *
2053 * * SECURE : We use the IPSEC_IN to fanout to AH/ESP if the
2054 * packet in error has AH/ESP headers to validate the AH/ESP
2055 * headers. AH/ESP will verify whether there is a valid SA or
2056 * not and send it back. We will fanout again if we have more
2057 * data in the packet.
2058 *
2059 * If the packet in error does not have AH/ESP, we handle it
2060 * like any other case.
2061 *
2062 * * NON_SECURE : If the packet in error has AH/ESP headers, we send it
2063 * up to AH/ESP for validation. AH/ESP will verify whether there is a
2064 * valid SA or not and send it back. We will fanout again if
2065 * we have more data in the packet.
2066 *
2067 * If the packet in error does not have AH/ESP, we handle it
2068 * like any other case.
2069 *
2070 * The caller must have called icmp_inbound_verify_v4.
2071 */
2072 static void
2073 icmp_inbound_error_fanout_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira)
2074 {
2075 uint16_t *up; /* Pointer to ports in ULP header */
2076 uint32_t ports; /* reversed ports for fanout */
2077 ipha_t ripha; /* With reversed addresses */
2078 ipha_t *ipha; /* Inner IP header */
2079 uint_t hdr_length; /* Inner IP header length */
2080 tcpha_t *tcpha;
2081 conn_t *connp;
2082 ill_t *ill = ira->ira_ill;
2083 ip_stack_t *ipst = ill->ill_ipst;
2084 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
2085 ill_t *rill = ira->ira_rill;
2086
2087 /* Caller already pulled up everything. */
2088 ipha = (ipha_t *)&icmph[1];
2089 ASSERT((uchar_t *)&ipha[1] <= mp->b_wptr);
2090 ASSERT(mp->b_cont == NULL);
2091
2092 hdr_length = IPH_HDR_LENGTH(ipha);
2093 ira->ira_protocol = ipha->ipha_protocol;
2094
2095 /*
2096 * We need a separate IP header with the source and destination
2097 * addresses reversed to do fanout/classification because the ipha in
2098 * the ICMP error is in the form we sent it out.
2099 */
2100 ripha.ipha_src = ipha->ipha_dst;
2101 ripha.ipha_dst = ipha->ipha_src;
2102 ripha.ipha_protocol = ipha->ipha_protocol;
2103 ripha.ipha_version_and_hdr_length = ipha->ipha_version_and_hdr_length;
2104
2105 ip2dbg(("icmp_inbound_error_v4: proto %d %x to %x: %d/%d\n",
2106 ripha.ipha_protocol, ntohl(ipha->ipha_src),
2107 ntohl(ipha->ipha_dst),
2108 icmph->icmph_type, icmph->icmph_code));
2109
2110 switch (ipha->ipha_protocol) {
2111 case IPPROTO_UDP:
2112 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2113
2114 /* Attempt to find a client stream based on port. */
2115 ip2dbg(("icmp_inbound_error_v4: UDP ports %d to %d\n",
2116 ntohs(up[0]), ntohs(up[1])));
2117
2118 /* Note that we send error to all matches. */
2119 ira->ira_flags |= IRAF_ICMP_ERROR;
2120 ip_fanout_udp_multi_v4(mp, &ripha, up[0], up[1], ira);
2121 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2122 return;
2123
2124 case IPPROTO_TCP:
2125 /*
2126 * Find a TCP client stream for this packet.
2127 * Note that we do a reverse lookup since the header is
2128 * in the form we sent it out.
2129 */
2130 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length);
2131 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN,
2132 ipst);
2133 if (connp == NULL)
2134 goto discard_pkt;
2135
2136 if (CONN_INBOUND_POLICY_PRESENT(connp, ipss) ||
2137 (ira->ira_flags & IRAF_IPSEC_SECURE)) {
2138 mp = ipsec_check_inbound_policy(mp, connp,
2139 ipha, NULL, ira);
2140 if (mp == NULL) {
2141 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2142 /* Note that mp is NULL */
2143 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2144 CONN_DEC_REF(connp);
2145 return;
2146 }
2147 }
2148
2149 ira->ira_flags |= IRAF_ICMP_ERROR;
2150 ira->ira_ill = ira->ira_rill = NULL;
2151 if (IPCL_IS_TCP(connp)) {
2152 SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
2153 connp->conn_recvicmp, connp, ira, SQ_FILL,
2154 SQTAG_TCP_INPUT_ICMP_ERR);
2155 } else {
2156 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
2157 (connp->conn_recv)(connp, mp, NULL, ira);
2158 CONN_DEC_REF(connp);
2159 }
2160 ira->ira_ill = ill;
2161 ira->ira_rill = rill;
2162 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2163 return;
2164
2165 case IPPROTO_SCTP:
2166 up = (uint16_t *)((uchar_t *)ipha + hdr_length);
2167 /* Find a SCTP client stream for this packet. */
2168 ((uint16_t *)&ports)[0] = up[1];
2169 ((uint16_t *)&ports)[1] = up[0];
2170
2171 ira->ira_flags |= IRAF_ICMP_ERROR;
2172 ip_fanout_sctp(mp, &ripha, NULL, ports, ira);
2173 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2174 return;
2175
2176 case IPPROTO_ESP:
2177 case IPPROTO_AH:
2178 if (!ipsec_loaded(ipss)) {
2179 ip_proto_not_sup(mp, ira);
2180 return;
2181 }
2182
2183 if (ipha->ipha_protocol == IPPROTO_ESP)
2184 mp = ipsecesp_icmp_error(mp, ira);
2185 else
2186 mp = ipsecah_icmp_error(mp, ira);
2187 if (mp == NULL)
2188 return;
2189
2190 /* Just in case ipsec didn't preserve the NULL b_cont */
2191 if (mp->b_cont != NULL) {
2192 if (!pullupmsg(mp, -1))
2193 goto discard_pkt;
2194 }
2195
2196 /*
2197 * Note that ira_pktlen and ira_ip_hdr_length are no longer
2198 * correct, but we don't use them any more here.
2199 *
2200 * If succesful, the mp has been modified to not include
2201 * the ESP/AH header so we can fanout to the ULP's icmp
2202 * error handler.
2203 */
2204 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2205 goto truncated;
2206
2207 /* Verify the modified message before any further processes. */
2208 ipha = (ipha_t *)mp->b_rptr;
2209 hdr_length = IPH_HDR_LENGTH(ipha);
2210 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2211 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2212 freemsg(mp);
2213 return;
2214 }
2215
2216 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2217 return;
2218
2219 case IPPROTO_ENCAP: {
2220 /* Look for self-encapsulated packets that caused an error */
2221 ipha_t *in_ipha;
2222
2223 /*
2224 * Caller has verified that length has to be
2225 * at least the size of IP header.
2226 */
2227 ASSERT(hdr_length >= sizeof (ipha_t));
2228 /*
2229 * Check the sanity of the inner IP header like
2230 * we did for the outer header.
2231 */
2232 in_ipha = (ipha_t *)((uchar_t *)ipha + hdr_length);
2233 if ((IPH_HDR_VERSION(in_ipha) != IPV4_VERSION)) {
2234 goto discard_pkt;
2235 }
2236 if (IPH_HDR_LENGTH(in_ipha) < sizeof (ipha_t)) {
2237 goto discard_pkt;
2238 }
2239 /* Check for Self-encapsulated tunnels */
2240 if (in_ipha->ipha_src == ipha->ipha_src &&
2241 in_ipha->ipha_dst == ipha->ipha_dst) {
2242
2243 mp = icmp_inbound_self_encap_error_v4(mp, ipha,
2244 in_ipha);
2245 if (mp == NULL)
2246 goto discard_pkt;
2247
2248 /*
2249 * Just in case self_encap didn't preserve the NULL
2250 * b_cont
2251 */
2252 if (mp->b_cont != NULL) {
2253 if (!pullupmsg(mp, -1))
2254 goto discard_pkt;
2255 }
2256 /*
2257 * Note that ira_pktlen and ira_ip_hdr_length are no
2258 * longer correct, but we don't use them any more here.
2259 */
2260 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH)
2261 goto truncated;
2262
2263 /*
2264 * Verify the modified message before any further
2265 * processes.
2266 */
2267 ipha = (ipha_t *)mp->b_rptr;
2268 hdr_length = IPH_HDR_LENGTH(ipha);
2269 icmph = (icmph_t *)&mp->b_rptr[hdr_length];
2270 if (!icmp_inbound_verify_v4(mp, icmph, ira)) {
2271 freemsg(mp);
2272 return;
2273 }
2274
2275 /*
2276 * The packet in error is self-encapsualted.
2277 * And we are finding it further encapsulated
2278 * which we could not have possibly generated.
2279 */
2280 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2281 goto discard_pkt;
2282 }
2283 icmp_inbound_error_fanout_v4(mp, icmph, ira);
2284 return;
2285 }
2286 /* No self-encapsulated */
2287 /* FALLTHRU */
2288 }
2289 case IPPROTO_IPV6:
2290 if ((connp = ipcl_iptun_classify_v4(&ripha.ipha_src,
2291 &ripha.ipha_dst, ipst)) != NULL) {
2292 ira->ira_flags |= IRAF_ICMP_ERROR;
2293 connp->conn_recvicmp(connp, mp, NULL, ira);
2294 CONN_DEC_REF(connp);
2295 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2296 return;
2297 }
2298 /*
2299 * No IP tunnel is interested, fallthrough and see
2300 * if a raw socket will want it.
2301 */
2302 /* FALLTHRU */
2303 default:
2304 ira->ira_flags |= IRAF_ICMP_ERROR;
2305 ip_fanout_proto_v4(mp, &ripha, ira);
2306 ira->ira_flags &= ~IRAF_ICMP_ERROR;
2307 return;
2308 }
2309 /* NOTREACHED */
2310 discard_pkt:
2311 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
2312 ip1dbg(("icmp_inbound_error_fanout_v4: drop pkt\n"));
2313 ip_drop_input("ipIfStatsInDiscards", mp, ill);
2314 freemsg(mp);
2315 return;
2316
2317 truncated:
2318 /* We pulled up everthing already. Must be truncated */
2319 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
2320 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
2321 freemsg(mp);
2322 }
2323
2324 /*
2325 * Common IP options parser.
2326 *
2327 * Setup routine: fill in *optp with options-parsing state, then
2328 * tail-call ipoptp_next to return the first option.
2329 */
2330 uint8_t
2331 ipoptp_first(ipoptp_t *optp, ipha_t *ipha)
2332 {
2333 uint32_t totallen; /* total length of all options */
2334
2335 totallen = ipha->ipha_version_and_hdr_length -
2336 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
2337 totallen <<= 2;
2338 optp->ipoptp_next = (uint8_t *)(&ipha[1]);
2339 optp->ipoptp_end = optp->ipoptp_next + totallen;
2340 optp->ipoptp_flags = 0;
2341 return (ipoptp_next(optp));
2342 }
2343
2344 /* Like above but without an ipha_t */
2345 uint8_t
2346 ipoptp_first2(ipoptp_t *optp, uint32_t totallen, uint8_t *opt)
2347 {
2348 optp->ipoptp_next = opt;
2349 optp->ipoptp_end = optp->ipoptp_next + totallen;
2350 optp->ipoptp_flags = 0;
2351 return (ipoptp_next(optp));
2352 }
2353
2354 /*
2355 * Common IP options parser: extract next option.
2356 */
2357 uint8_t
2358 ipoptp_next(ipoptp_t *optp)
2359 {
2360 uint8_t *end = optp->ipoptp_end;
2361 uint8_t *cur = optp->ipoptp_next;
2362 uint8_t opt, len, pointer;
2363
2364 /*
2365 * If cur > end already, then the ipoptp_end or ipoptp_next pointer
2366 * has been corrupted.
2367 */
2368 ASSERT(cur <= end);
2369
2370 if (cur == end)
2371 return (IPOPT_EOL);
2372
2373 opt = cur[IPOPT_OPTVAL];
2374
2375 /*
2376 * Skip any NOP options.
2377 */
2378 while (opt == IPOPT_NOP) {
2379 cur++;
2380 if (cur == end)
2381 return (IPOPT_EOL);
2382 opt = cur[IPOPT_OPTVAL];
2383 }
2384
2385 if (opt == IPOPT_EOL)
2386 return (IPOPT_EOL);
2387
2388 /*
2389 * Option requiring a length.
2390 */
2391 if ((cur + 1) >= end) {
2392 optp->ipoptp_flags |= IPOPTP_ERROR;
2393 return (IPOPT_EOL);
2394 }
2395 len = cur[IPOPT_OLEN];
2396 if (len < 2) {
2397 optp->ipoptp_flags |= IPOPTP_ERROR;
2398 return (IPOPT_EOL);
2399 }
2400 optp->ipoptp_cur = cur;
2401 optp->ipoptp_len = len;
2402 optp->ipoptp_next = cur + len;
2403 if (cur + len > end) {
2404 optp->ipoptp_flags |= IPOPTP_ERROR;
2405 return (IPOPT_EOL);
2406 }
2407
2408 /*
2409 * For the options which require a pointer field, make sure
2410 * its there, and make sure it points to either something
2411 * inside this option, or the end of the option.
2412 */
2413 switch (opt) {
2414 case IPOPT_RR:
2415 case IPOPT_TS:
2416 case IPOPT_LSRR:
2417 case IPOPT_SSRR:
2418 if (len <= IPOPT_OFFSET) {
2419 optp->ipoptp_flags |= IPOPTP_ERROR;
2420 return (opt);
2421 }
2422 pointer = cur[IPOPT_OFFSET];
2423 if (pointer - 1 > len) {
2424 optp->ipoptp_flags |= IPOPTP_ERROR;
2425 return (opt);
2426 }
2427 break;
2428 }
2429
2430 /*
2431 * Sanity check the pointer field based on the type of the
2432 * option.
2433 */
2434 switch (opt) {
2435 case IPOPT_RR:
2436 case IPOPT_SSRR:
2437 case IPOPT_LSRR:
2438 if (pointer < IPOPT_MINOFF_SR)
2439 optp->ipoptp_flags |= IPOPTP_ERROR;
2440 break;
2441 case IPOPT_TS:
2442 if (pointer < IPOPT_MINOFF_IT)
2443 optp->ipoptp_flags |= IPOPTP_ERROR;
2444 /*
2445 * Note that the Internet Timestamp option also
2446 * contains two four bit fields (the Overflow field,
2447 * and the Flag field), which follow the pointer
2448 * field. We don't need to check that these fields
2449 * fall within the length of the option because this
2450 * was implicitely done above. We've checked that the
2451 * pointer value is at least IPOPT_MINOFF_IT, and that
2452 * it falls within the option. Since IPOPT_MINOFF_IT >
2453 * IPOPT_POS_OV_FLG, we don't need the explicit check.
2454 */
2455 ASSERT(len > IPOPT_POS_OV_FLG);
2456 break;
2457 }
2458
2459 return (opt);
2460 }
2461
2462 /*
2463 * Use the outgoing IP header to create an IP_OPTIONS option the way
2464 * it was passed down from the application.
2465 *
2466 * This is compatible with BSD in that it returns
2467 * the reverse source route with the final destination
2468 * as the last entry. The first 4 bytes of the option
2469 * will contain the final destination.
2470 */
2471 int
2472 ip_opt_get_user(conn_t *connp, uchar_t *buf)
2473 {
2474 ipoptp_t opts;
2475 uchar_t *opt;
2476 uint8_t optval;
2477 uint8_t optlen;
2478 uint32_t len = 0;
2479 uchar_t *buf1 = buf;
2480 uint32_t totallen;
2481 ipaddr_t dst;
2482 ip_pkt_t *ipp = &connp->conn_xmit_ipp;
2483
2484 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
2485 return (0);
2486
2487 totallen = ipp->ipp_ipv4_options_len;
2488 if (totallen & 0x3)
2489 return (0);
2490
2491 buf += IP_ADDR_LEN; /* Leave room for final destination */
2492 len += IP_ADDR_LEN;
2493 bzero(buf1, IP_ADDR_LEN);
2494
2495 dst = connp->conn_faddr_v4;
2496
2497 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
2498 optval != IPOPT_EOL;
2499 optval = ipoptp_next(&opts)) {
2500 int off;
2501
2502 opt = opts.ipoptp_cur;
2503 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
2504 break;
2505 }
2506 optlen = opts.ipoptp_len;
2507
2508 switch (optval) {
2509 case IPOPT_SSRR:
2510 case IPOPT_LSRR:
2511
2512 /*
2513 * Insert destination as the first entry in the source
2514 * route and move down the entries on step.
2515 * The last entry gets placed at buf1.
2516 */
2517 buf[IPOPT_OPTVAL] = optval;
2518 buf[IPOPT_OLEN] = optlen;
2519 buf[IPOPT_OFFSET] = optlen;
2520
2521 off = optlen - IP_ADDR_LEN;
2522 if (off < 0) {
2523 /* No entries in source route */
2524 break;
2525 }
2526 /* Last entry in source route if not already set */
2527 if (dst == INADDR_ANY)
2528 bcopy(opt + off, buf1, IP_ADDR_LEN);
2529 off -= IP_ADDR_LEN;
2530
2531 while (off > 0) {
2532 bcopy(opt + off,
2533 buf + off + IP_ADDR_LEN,
2534 IP_ADDR_LEN);
2535 off -= IP_ADDR_LEN;
2536 }
2537 /* ipha_dst into first slot */
2538 bcopy(&dst, buf + off + IP_ADDR_LEN,
2539 IP_ADDR_LEN);
2540 buf += optlen;
2541 len += optlen;
2542 break;
2543
2544 default:
2545 bcopy(opt, buf, optlen);
2546 buf += optlen;
2547 len += optlen;
2548 break;
2549 }
2550 }
2551 done:
2552 /* Pad the resulting options */
2553 while (len & 0x3) {
2554 *buf++ = IPOPT_EOL;
2555 len++;
2556 }
2557 return (len);
2558 }
2559
2560 /*
2561 * Update any record route or timestamp options to include this host.
2562 * Reverse any source route option.
2563 * This routine assumes that the options are well formed i.e. that they
2564 * have already been checked.
2565 */
2566 static void
2567 icmp_options_update(ipha_t *ipha)
2568 {
2569 ipoptp_t opts;
2570 uchar_t *opt;
2571 uint8_t optval;
2572 ipaddr_t src; /* Our local address */
2573 ipaddr_t dst;
2574
2575 ip2dbg(("icmp_options_update\n"));
2576 src = ipha->ipha_src;
2577 dst = ipha->ipha_dst;
2578
2579 for (optval = ipoptp_first(&opts, ipha);
2580 optval != IPOPT_EOL;
2581 optval = ipoptp_next(&opts)) {
2582 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
2583 opt = opts.ipoptp_cur;
2584 ip2dbg(("icmp_options_update: opt %d, len %d\n",
2585 optval, opts.ipoptp_len));
2586 switch (optval) {
2587 int off1, off2;
2588 case IPOPT_SSRR:
2589 case IPOPT_LSRR:
2590 /*
2591 * Reverse the source route. The first entry
2592 * should be the next to last one in the current
2593 * source route (the last entry is our address).
2594 * The last entry should be the final destination.
2595 */
2596 off1 = IPOPT_MINOFF_SR - 1;
2597 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
2598 if (off2 < 0) {
2599 /* No entries in source route */
2600 ip1dbg((
2601 "icmp_options_update: bad src route\n"));
2602 break;
2603 }
2604 bcopy((char *)opt + off2, &dst, IP_ADDR_LEN);
2605 bcopy(&ipha->ipha_dst, (char *)opt + off2, IP_ADDR_LEN);
2606 bcopy(&dst, &ipha->ipha_dst, IP_ADDR_LEN);
2607 off2 -= IP_ADDR_LEN;
2608
2609 while (off1 < off2) {
2610 bcopy((char *)opt + off1, &src, IP_ADDR_LEN);
2611 bcopy((char *)opt + off2, (char *)opt + off1,
2612 IP_ADDR_LEN);
2613 bcopy(&src, (char *)opt + off2, IP_ADDR_LEN);
2614 off1 += IP_ADDR_LEN;
2615 off2 -= IP_ADDR_LEN;
2616 }
2617 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
2618 break;
2619 }
2620 }
2621 }
2622
2623 /*
2624 * Process received ICMP Redirect messages.
2625 * Assumes the caller has verified that the headers are in the pulled up mblk.
2626 * Consumes mp.
2627 */
2628 static void
2629 icmp_redirect_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph, ip_recv_attr_t *ira)
2630 {
2631 ire_t *ire, *nire;
2632 ire_t *prev_ire;
2633 ipaddr_t src, dst, gateway;
2634 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2635 ipha_t *inner_ipha; /* Inner IP header */
2636
2637 /* Caller already pulled up everything. */
2638 inner_ipha = (ipha_t *)&icmph[1];
2639 src = ipha->ipha_src;
2640 dst = inner_ipha->ipha_dst;
2641 gateway = icmph->icmph_rd_gateway;
2642 /* Make sure the new gateway is reachable somehow. */
2643 ire = ire_ftable_lookup_v4(gateway, 0, 0, IRE_ONLINK, NULL,
2644 ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst, NULL);
2645 /*
2646 * Make sure we had a route for the dest in question and that
2647 * that route was pointing to the old gateway (the source of the
2648 * redirect packet.)
2649 * We do longest match and then compare ire_gateway_addr below.
2650 */
2651 prev_ire = ire_ftable_lookup_v4(dst, 0, 0, 0, NULL, ALL_ZONES,
2652 NULL, MATCH_IRE_DSTONLY, 0, ipst, NULL);
2653 /*
2654 * Check that
2655 * the redirect was not from ourselves
2656 * the new gateway and the old gateway are directly reachable
2657 */
2658 if (prev_ire == NULL || ire == NULL ||
2659 (prev_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) ||
2660 (prev_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) ||
2661 !(ire->ire_type & IRE_IF_ALL) ||
2662 prev_ire->ire_gateway_addr != src) {
2663 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2664 ip_drop_input("icmpInBadRedirects - ire", mp, ira->ira_ill);
2665 freemsg(mp);
2666 if (ire != NULL)
2667 ire_refrele(ire);
2668 if (prev_ire != NULL)
2669 ire_refrele(prev_ire);
2670 return;
2671 }
2672
2673 ire_refrele(prev_ire);
2674 ire_refrele(ire);
2675
2676 /*
2677 * TODO: more precise handling for cases 0, 2, 3, the latter two
2678 * require TOS routing
2679 */
2680 switch (icmph->icmph_code) {
2681 case 0:
2682 case 1:
2683 /* TODO: TOS specificity for cases 2 and 3 */
2684 case 2:
2685 case 3:
2686 break;
2687 default:
2688 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects);
2689 ip_drop_input("icmpInBadRedirects - code", mp, ira->ira_ill);
2690 freemsg(mp);
2691 return;
2692 }
2693 /*
2694 * Create a Route Association. This will allow us to remember that
2695 * someone we believe told us to use the particular gateway.
2696 */
2697 ire = ire_create(
2698 (uchar_t *)&dst, /* dest addr */
2699 (uchar_t *)&ip_g_all_ones, /* mask */
2700 (uchar_t *)&gateway, /* gateway addr */
2701 IRE_HOST,
2702 NULL, /* ill */
2703 ALL_ZONES,
2704 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST),
2705 NULL, /* tsol_gc_t */
2706 ipst);
2707
2708 if (ire == NULL) {
2709 freemsg(mp);
2710 return;
2711 }
2712 nire = ire_add(ire);
2713 /* Check if it was a duplicate entry */
2714 if (nire != NULL && nire != ire) {
2715 ASSERT(nire->ire_identical_ref > 1);
2716 ire_delete(nire);
2717 ire_refrele(nire);
2718 nire = NULL;
2719 }
2720 ire = nire;
2721 if (ire != NULL) {
2722 ire_refrele(ire); /* Held in ire_add */
2723
2724 /* tell routing sockets that we received a redirect */
2725 ip_rts_change(RTM_REDIRECT, dst, gateway, IP_HOST_MASK, 0, src,
2726 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST), 0,
2727 (RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_AUTHOR), ipst);
2728 }
2729
2730 /*
2731 * Delete any existing IRE_HOST type redirect ires for this destination.
2732 * This together with the added IRE has the effect of
2733 * modifying an existing redirect.
2734 */
2735 prev_ire = ire_ftable_lookup_v4(dst, 0, src, IRE_HOST, NULL,
2736 ALL_ZONES, NULL, (MATCH_IRE_GW | MATCH_IRE_TYPE), 0, ipst, NULL);
2737 if (prev_ire != NULL) {
2738 if (prev_ire ->ire_flags & RTF_DYNAMIC)
2739 ire_delete(prev_ire);
2740 ire_refrele(prev_ire);
2741 }
2742
2743 freemsg(mp);
2744 }
2745
2746 /*
2747 * Generate an ICMP parameter problem message.
2748 * When called from ip_output side a minimal ip_recv_attr_t needs to be
2749 * constructed by the caller.
2750 */
2751 static void
2752 icmp_param_problem(mblk_t *mp, uint8_t ptr, ip_recv_attr_t *ira)
2753 {
2754 icmph_t icmph;
2755 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2756
2757 mp = icmp_pkt_err_ok(mp, ira);
2758 if (mp == NULL)
2759 return;
2760
2761 bzero(&icmph, sizeof (icmph_t));
2762 icmph.icmph_type = ICMP_PARAM_PROBLEM;
2763 icmph.icmph_pp_ptr = ptr;
2764 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutParmProbs);
2765 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
2766 }
2767
2768 /*
2769 * Build and ship an IPv4 ICMP message using the packet data in mp, and
2770 * the ICMP header pointed to by "stuff". (May be called as writer.)
2771 * Note: assumes that icmp_pkt_err_ok has been called to verify that
2772 * an icmp error packet can be sent.
2773 * Assigns an appropriate source address to the packet. If ipha_dst is
2774 * one of our addresses use it for source. Otherwise let ip_output_simple
2775 * pick the source address.
2776 */
2777 static void
2778 icmp_pkt(mblk_t *mp, void *stuff, size_t len, ip_recv_attr_t *ira)
2779 {
2780 ipaddr_t dst;
2781 icmph_t *icmph;
2782 ipha_t *ipha;
2783 uint_t len_needed;
2784 size_t msg_len;
2785 mblk_t *mp1;
2786 ipaddr_t src;
2787 ire_t *ire;
2788 ip_xmit_attr_t ixas;
2789 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2790
2791 ipha = (ipha_t *)mp->b_rptr;
2792
2793 bzero(&ixas, sizeof (ixas));
2794 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
2795 ixas.ixa_zoneid = ira->ira_zoneid;
2796 ixas.ixa_ifindex = 0;
2797 ixas.ixa_ipst = ipst;
2798 ixas.ixa_cred = kcred;
2799 ixas.ixa_cpid = NOPID;
2800 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */
2801 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
2802
2803 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2804 /*
2805 * Apply IPsec based on how IPsec was applied to
2806 * the packet that had the error.
2807 *
2808 * If it was an outbound packet that caused the ICMP
2809 * error, then the caller will have setup the IRA
2810 * appropriately.
2811 */
2812 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) {
2813 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2814 /* Note: mp already consumed and ip_drop_packet done */
2815 return;
2816 }
2817 } else {
2818 /*
2819 * This is in clear. The icmp message we are building
2820 * here should go out in clear, independent of our policy.
2821 */
2822 ixas.ixa_flags |= IXAF_NO_IPSEC;
2823 }
2824
2825 /* Remember our eventual destination */
2826 dst = ipha->ipha_src;
2827
2828 /*
2829 * If the packet was for one of our unicast addresses, make
2830 * sure we respond with that as the source. Otherwise
2831 * have ip_output_simple pick the source address.
2832 */
2833 ire = ire_ftable_lookup_v4(ipha->ipha_dst, 0, 0,
2834 (IRE_LOCAL|IRE_LOOPBACK), NULL, ira->ira_zoneid, NULL,
2835 MATCH_IRE_TYPE|MATCH_IRE_ZONEONLY, 0, ipst, NULL);
2836 if (ire != NULL) {
2837 ire_refrele(ire);
2838 src = ipha->ipha_dst;
2839 } else {
2840 src = INADDR_ANY;
2841 ixas.ixa_flags |= IXAF_SET_SOURCE;
2842 }
2843
2844 /*
2845 * Check if we can send back more then 8 bytes in addition to
2846 * the IP header. We try to send 64 bytes of data and the internal
2847 * header in the special cases of ipv4 encapsulated ipv4 or ipv6.
2848 */
2849 len_needed = IPH_HDR_LENGTH(ipha);
2850 if (ipha->ipha_protocol == IPPROTO_ENCAP ||
2851 ipha->ipha_protocol == IPPROTO_IPV6) {
2852 if (!pullupmsg(mp, -1)) {
2853 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
2854 ip_drop_output("ipIfStatsOutDiscards", mp, NULL);
2855 freemsg(mp);
2856 return;
2857 }
2858 ipha = (ipha_t *)mp->b_rptr;
2859
2860 if (ipha->ipha_protocol == IPPROTO_ENCAP) {
2861 len_needed += IPH_HDR_LENGTH(((uchar_t *)ipha +
2862 len_needed));
2863 } else {
2864 ip6_t *ip6h = (ip6_t *)((uchar_t *)ipha + len_needed);
2865
2866 ASSERT(ipha->ipha_protocol == IPPROTO_IPV6);
2867 len_needed += ip_hdr_length_v6(mp, ip6h);
2868 }
2869 }
2870 len_needed += ipst->ips_ip_icmp_return;
2871 msg_len = msgdsize(mp);
2872 if (msg_len > len_needed) {
2873 (void) adjmsg(mp, len_needed - msg_len);
2874 msg_len = len_needed;
2875 }
2876 mp1 = allocb(sizeof (icmp_ipha) + len, BPRI_MED);
2877 if (mp1 == NULL) {
2878 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutErrors);
2879 freemsg(mp);
2880 return;
2881 }
2882 mp1->b_cont = mp;
2883 mp = mp1;
2884
2885 /*
2886 * Set IXAF_TRUSTED_ICMP so we can let the ICMP messages this
2887 * node generates be accepted in peace by all on-host destinations.
2888 * If we do NOT assume that all on-host destinations trust
2889 * self-generated ICMP messages, then rework here, ip6.c, and spd.c.
2890 * (Look for IXAF_TRUSTED_ICMP).
2891 */
2892 ixas.ixa_flags |= IXAF_TRUSTED_ICMP;
2893
2894 ipha = (ipha_t *)mp->b_rptr;
2895 mp1->b_wptr = (uchar_t *)ipha + (sizeof (icmp_ipha) + len);
2896 *ipha = icmp_ipha;
2897 ipha->ipha_src = src;
2898 ipha->ipha_dst = dst;
2899 ipha->ipha_ttl = ipst->ips_ip_def_ttl;
2900 msg_len += sizeof (icmp_ipha) + len;
2901 if (msg_len > IP_MAXPACKET) {
2902 (void) adjmsg(mp, IP_MAXPACKET - msg_len);
2903 msg_len = IP_MAXPACKET;
2904 }
2905 ipha->ipha_length = htons((uint16_t)msg_len);
2906 icmph = (icmph_t *)&ipha[1];
2907 bcopy(stuff, icmph, len);
2908 icmph->icmph_checksum = 0;
2909 icmph->icmph_checksum = IP_CSUM(mp, (int32_t)sizeof (ipha_t), 0);
2910 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs);
2911
2912 (void) ip_output_simple(mp, &ixas);
2913 ixa_cleanup(&ixas);
2914 }
2915
2916 /*
2917 * Determine if an ICMP error packet can be sent given the rate limit.
2918 * The limit consists of an average frequency (icmp_pkt_err_interval measured
2919 * in milliseconds) and a burst size. Burst size number of packets can
2920 * be sent arbitrarely closely spaced.
2921 * The state is tracked using two variables to implement an approximate
2922 * token bucket filter:
2923 * icmp_pkt_err_last - lbolt value when the last burst started
2924 * icmp_pkt_err_sent - number of packets sent in current burst
2925 */
2926 boolean_t
2927 icmp_err_rate_limit(ip_stack_t *ipst)
2928 {
2929 clock_t now = TICK_TO_MSEC(ddi_get_lbolt());
2930 uint_t refilled; /* Number of packets refilled in tbf since last */
2931 /* Guard against changes by loading into local variable */
2932 uint_t err_interval = ipst->ips_ip_icmp_err_interval;
2933
2934 if (err_interval == 0)
2935 return (B_FALSE);
2936
2937 if (ipst->ips_icmp_pkt_err_last > now) {
2938 /* 100HZ lbolt in ms for 32bit arch wraps every 49.7 days */
2939 ipst->ips_icmp_pkt_err_last = 0;
2940 ipst->ips_icmp_pkt_err_sent = 0;
2941 }
2942 /*
2943 * If we are in a burst update the token bucket filter.
2944 * Update the "last" time to be close to "now" but make sure
2945 * we don't loose precision.
2946 */
2947 if (ipst->ips_icmp_pkt_err_sent != 0) {
2948 refilled = (now - ipst->ips_icmp_pkt_err_last)/err_interval;
2949 if (refilled > ipst->ips_icmp_pkt_err_sent) {
2950 ipst->ips_icmp_pkt_err_sent = 0;
2951 } else {
2952 ipst->ips_icmp_pkt_err_sent -= refilled;
2953 ipst->ips_icmp_pkt_err_last += refilled * err_interval;
2954 }
2955 }
2956 if (ipst->ips_icmp_pkt_err_sent == 0) {
2957 /* Start of new burst */
2958 ipst->ips_icmp_pkt_err_last = now;
2959 }
2960 if (ipst->ips_icmp_pkt_err_sent < ipst->ips_ip_icmp_err_burst) {
2961 ipst->ips_icmp_pkt_err_sent++;
2962 ip1dbg(("icmp_err_rate_limit: %d sent in burst\n",
2963 ipst->ips_icmp_pkt_err_sent));
2964 return (B_FALSE);
2965 }
2966 ip1dbg(("icmp_err_rate_limit: dropped\n"));
2967 return (B_TRUE);
2968 }
2969
2970 /*
2971 * Check if it is ok to send an IPv4 ICMP error packet in
2972 * response to the IPv4 packet in mp.
2973 * Free the message and return null if no
2974 * ICMP error packet should be sent.
2975 */
2976 static mblk_t *
2977 icmp_pkt_err_ok(mblk_t *mp, ip_recv_attr_t *ira)
2978 {
2979 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
2980 icmph_t *icmph;
2981 ipha_t *ipha;
2982 uint_t len_needed;
2983
2984 if (!mp)
2985 return (NULL);
2986 ipha = (ipha_t *)mp->b_rptr;
2987 if (ip_csum_hdr(ipha)) {
2988 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInCksumErrs);
2989 ip_drop_input("ipIfStatsInCksumErrs", mp, NULL);
2990 freemsg(mp);
2991 return (NULL);
2992 }
2993 if (ip_type_v4(ipha->ipha_dst, ipst) == IRE_BROADCAST ||
2994 ip_type_v4(ipha->ipha_src, ipst) == IRE_BROADCAST ||
2995 CLASSD(ipha->ipha_dst) ||
2996 CLASSD(ipha->ipha_src) ||
2997 (ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET)) {
2998 /* Note: only errors to the fragment with offset 0 */
2999 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3000 freemsg(mp);
3001 return (NULL);
3002 }
3003 if (ipha->ipha_protocol == IPPROTO_ICMP) {
3004 /*
3005 * Check the ICMP type. RFC 1122 sez: don't send ICMP
3006 * errors in response to any ICMP errors.
3007 */
3008 len_needed = IPH_HDR_LENGTH(ipha) + ICMPH_SIZE;
3009 if (mp->b_wptr - mp->b_rptr < len_needed) {
3010 if (!pullupmsg(mp, len_needed)) {
3011 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors);
3012 freemsg(mp);
3013 return (NULL);
3014 }
3015 ipha = (ipha_t *)mp->b_rptr;
3016 }
3017 icmph = (icmph_t *)
3018 (&((char *)ipha)[IPH_HDR_LENGTH(ipha)]);
3019 switch (icmph->icmph_type) {
3020 case ICMP_DEST_UNREACHABLE:
3021 case ICMP_SOURCE_QUENCH:
3022 case ICMP_TIME_EXCEEDED:
3023 case ICMP_PARAM_PROBLEM:
3024 case ICMP_REDIRECT:
3025 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3026 freemsg(mp);
3027 return (NULL);
3028 default:
3029 break;
3030 }
3031 }
3032 /*
3033 * If this is a labeled system, then check to see if we're allowed to
3034 * send a response to this particular sender. If not, then just drop.
3035 */
3036 if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) {
3037 ip2dbg(("icmp_pkt_err_ok: can't respond to packet\n"));
3038 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops);
3039 freemsg(mp);
3040 return (NULL);
3041 }
3042 if (icmp_err_rate_limit(ipst)) {
3043 /*
3044 * Only send ICMP error packets every so often.
3045 * This should be done on a per port/source basis,
3046 * but for now this will suffice.
3047 */
3048 freemsg(mp);
3049 return (NULL);
3050 }
3051 return (mp);
3052 }
3053
3054 /*
3055 * Called when a packet was sent out the same link that it arrived on.
3056 * Check if it is ok to send a redirect and then send it.
3057 */
3058 void
3059 ip_send_potential_redirect_v4(mblk_t *mp, ipha_t *ipha, ire_t *ire,
3060 ip_recv_attr_t *ira)
3061 {
3062 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3063 ipaddr_t src, nhop;
3064 mblk_t *mp1;
3065 ire_t *nhop_ire;
3066
3067 /*
3068 * Check the source address to see if it originated
3069 * on the same logical subnet it is going back out on.
3070 * If so, we should be able to send it a redirect.
3071 * Avoid sending a redirect if the destination
3072 * is directly connected (i.e., we matched an IRE_ONLINK),
3073 * or if the packet was source routed out this interface.
3074 *
3075 * We avoid sending a redirect if the
3076 * destination is directly connected
3077 * because it is possible that multiple
3078 * IP subnets may have been configured on
3079 * the link, and the source may not
3080 * be on the same subnet as ip destination,
3081 * even though they are on the same
3082 * physical link.
3083 */
3084 if ((ire->ire_type & IRE_ONLINK) ||
3085 ip_source_routed(ipha, ipst))
3086 return;
3087
3088 nhop_ire = ire_nexthop(ire);
3089 if (nhop_ire == NULL)
3090 return;
3091
3092 nhop = nhop_ire->ire_addr;
3093
3094 if (nhop_ire->ire_type & IRE_IF_CLONE) {
3095 ire_t *ire2;
3096
3097 /* Follow ire_dep_parent to find non-clone IRE_INTERFACE */
3098 mutex_enter(&nhop_ire->ire_lock);
3099 ire2 = nhop_ire->ire_dep_parent;
3100 if (ire2 != NULL)
3101 ire_refhold(ire2);
3102 mutex_exit(&nhop_ire->ire_lock);
3103 ire_refrele(nhop_ire);
3104 nhop_ire = ire2;
3105 }
3106 if (nhop_ire == NULL)
3107 return;
3108
3109 ASSERT(!(nhop_ire->ire_type & IRE_IF_CLONE));
3110
3111 src = ipha->ipha_src;
3112
3113 /*
3114 * We look at the interface ire for the nexthop,
3115 * to see if ipha_src is in the same subnet
3116 * as the nexthop.
3117 */
3118 if ((src & nhop_ire->ire_mask) == (nhop & nhop_ire->ire_mask)) {
3119 /*
3120 * The source is directly connected.
3121 */
3122 mp1 = copymsg(mp);
3123 if (mp1 != NULL) {
3124 icmp_send_redirect(mp1, nhop, ira);
3125 }
3126 }
3127 ire_refrele(nhop_ire);
3128 }
3129
3130 /*
3131 * Generate an ICMP redirect message.
3132 */
3133 static void
3134 icmp_send_redirect(mblk_t *mp, ipaddr_t gateway, ip_recv_attr_t *ira)
3135 {
3136 icmph_t icmph;
3137 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3138
3139 mp = icmp_pkt_err_ok(mp, ira);
3140 if (mp == NULL)
3141 return;
3142
3143 bzero(&icmph, sizeof (icmph_t));
3144 icmph.icmph_type = ICMP_REDIRECT;
3145 icmph.icmph_code = 1;
3146 icmph.icmph_rd_gateway = gateway;
3147 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutRedirects);
3148 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3149 }
3150
3151 /*
3152 * Generate an ICMP time exceeded message.
3153 */
3154 void
3155 icmp_time_exceeded(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3156 {
3157 icmph_t icmph;
3158 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3159
3160 mp = icmp_pkt_err_ok(mp, ira);
3161 if (mp == NULL)
3162 return;
3163
3164 bzero(&icmph, sizeof (icmph_t));
3165 icmph.icmph_type = ICMP_TIME_EXCEEDED;
3166 icmph.icmph_code = code;
3167 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimeExcds);
3168 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3169 }
3170
3171 /*
3172 * Generate an ICMP unreachable message.
3173 * When called from ip_output side a minimal ip_recv_attr_t needs to be
3174 * constructed by the caller.
3175 */
3176 void
3177 icmp_unreachable(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira)
3178 {
3179 icmph_t icmph;
3180 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
3181
3182 mp = icmp_pkt_err_ok(mp, ira);
3183 if (mp == NULL)
3184 return;
3185
3186 bzero(&icmph, sizeof (icmph_t));
3187 icmph.icmph_type = ICMP_DEST_UNREACHABLE;
3188 icmph.icmph_code = code;
3189 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs);
3190 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira);
3191 }
3192
3193 /*
3194 * Latch in the IPsec state for a stream based the policy in the listener
3195 * and the actions in the ip_recv_attr_t.
3196 * Called directly from TCP and SCTP.
3197 */
3198 boolean_t
3199 ip_ipsec_policy_inherit(conn_t *connp, conn_t *lconnp, ip_recv_attr_t *ira)
3200 {
3201 ASSERT(lconnp->conn_policy != NULL);
3202 ASSERT(connp->conn_policy == NULL);
3203
3204 IPPH_REFHOLD(lconnp->conn_policy);
3205 connp->conn_policy = lconnp->conn_policy;
3206
3207 if (ira->ira_ipsec_action != NULL) {
3208 if (connp->conn_latch == NULL) {
3209 connp->conn_latch = iplatch_create();
3210 if (connp->conn_latch == NULL)
3211 return (B_FALSE);
3212 }
3213 ipsec_latch_inbound(connp, ira);
3214 }
3215 return (B_TRUE);
3216 }
3217
3218 /*
3219 * Verify whether or not the IP address is a valid local address.
3220 * Could be a unicast, including one for a down interface.
3221 * If allow_mcbc then a multicast or broadcast address is also
3222 * acceptable.
3223 *
3224 * In the case of a broadcast/multicast address, however, the
3225 * upper protocol is expected to reset the src address
3226 * to zero when we return IPVL_MCAST/IPVL_BCAST so that
3227 * no packets are emitted with broadcast/multicast address as
3228 * source address (that violates hosts requirements RFC 1122)
3229 * The addresses valid for bind are:
3230 * (1) - INADDR_ANY (0)
3231 * (2) - IP address of an UP interface
3232 * (3) - IP address of a DOWN interface
3233 * (4) - valid local IP broadcast addresses. In this case
3234 * the conn will only receive packets destined to
3235 * the specified broadcast address.
3236 * (5) - a multicast address. In this case
3237 * the conn will only receive packets destined to
3238 * the specified multicast address. Note: the
3239 * application still has to issue an
3240 * IP_ADD_MEMBERSHIP socket option.
3241 *
3242 * In all the above cases, the bound address must be valid in the current zone.
3243 * When the address is loopback, multicast or broadcast, there might be many
3244 * matching IREs so bind has to look up based on the zone.
3245 */
3246 ip_laddr_t
3247 ip_laddr_verify_v4(ipaddr_t src_addr, zoneid_t zoneid,
3248 ip_stack_t *ipst, boolean_t allow_mcbc)
3249 {
3250 ire_t *src_ire;
3251
3252 ASSERT(src_addr != INADDR_ANY);
3253
3254 src_ire = ire_ftable_lookup_v4(src_addr, 0, 0, 0,
3255 NULL, zoneid, NULL, MATCH_IRE_ZONEONLY, 0, ipst, NULL);
3256
3257 /*
3258 * If an address other than in6addr_any is requested,
3259 * we verify that it is a valid address for bind
3260 * Note: Following code is in if-else-if form for
3261 * readability compared to a condition check.
3262 */
3263 if (src_ire != NULL && (src_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK))) {
3264 /*
3265 * (2) Bind to address of local UP interface
3266 */
3267 ire_refrele(src_ire);
3268 return (IPVL_UNICAST_UP);
3269 } else if (src_ire != NULL && src_ire->ire_type & IRE_BROADCAST) {
3270 /*
3271 * (4) Bind to broadcast address
3272 */
3273 ire_refrele(src_ire);
3274 if (allow_mcbc)
3275 return (IPVL_BCAST);
3276 else
3277 return (IPVL_BAD);
3278 } else if (CLASSD(src_addr)) {
3279 /* (5) bind to multicast address. */
3280 if (src_ire != NULL)
3281 ire_refrele(src_ire);
3282
3283 if (allow_mcbc)
3284 return (IPVL_MCAST);
3285 else
3286 return (IPVL_BAD);
3287 } else {
3288 ipif_t *ipif;
3289
3290 /*
3291 * (3) Bind to address of local DOWN interface?
3292 * (ipif_lookup_addr() looks up all interfaces
3293 * but we do not get here for UP interfaces
3294 * - case (2) above)
3295 */
3296 if (src_ire != NULL)
3297 ire_refrele(src_ire);
3298
3299 ipif = ipif_lookup_addr(src_addr, NULL, zoneid, ipst);
3300 if (ipif == NULL)
3301 return (IPVL_BAD);
3302
3303 /* Not a useful source? */
3304 if (ipif->ipif_flags & (IPIF_NOLOCAL | IPIF_ANYCAST)) {
3305 ipif_refrele(ipif);
3306 return (IPVL_BAD);
3307 }
3308 ipif_refrele(ipif);
3309 return (IPVL_UNICAST_DOWN);
3310 }
3311 }
3312
3313 /*
3314 * Insert in the bind fanout for IPv4 and IPv6.
3315 * The caller should already have used ip_laddr_verify_v*() before calling
3316 * this.
3317 */
3318 int
3319 ip_laddr_fanout_insert(conn_t *connp)
3320 {
3321 int error;
3322
3323 /*
3324 * Allow setting new policies. For example, disconnects result
3325 * in us being called. As we would have set conn_policy_cached
3326 * to B_TRUE before, we should set it to B_FALSE, so that policy
3327 * can change after the disconnect.
3328 */
3329 connp->conn_policy_cached = B_FALSE;
3330
3331 error = ipcl_bind_insert(connp);
3332 if (error != 0) {
3333 if (connp->conn_anon_port) {
3334 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
3335 connp->conn_mlp_type, connp->conn_proto,
3336 ntohs(connp->conn_lport), B_FALSE);
3337 }
3338 connp->conn_mlp_type = mlptSingle;
3339 }
3340 return (error);
3341 }
3342
3343 /*
3344 * Verify that both the source and destination addresses are valid. If
3345 * IPDF_VERIFY_DST is not set, then the destination address may be unreachable,
3346 * i.e. have no route to it. Protocols like TCP want to verify destination
3347 * reachability, while tunnels do not.
3348 *
3349 * Determine the route, the interface, and (optionally) the source address
3350 * to use to reach a given destination.
3351 * Note that we allow connect to broadcast and multicast addresses when
3352 * IPDF_ALLOW_MCBC is set.
3353 * first_hop and dst_addr are normally the same, but if source routing
3354 * they will differ; in that case the first_hop is what we'll use for the
3355 * routing lookup but the dce and label checks will be done on dst_addr,
3356 *
3357 * If uinfo is set, then we fill in the best available information
3358 * we have for the destination. This is based on (in priority order) any
3359 * metrics and path MTU stored in a dce_t, route metrics, and finally the
3360 * ill_mtu/ill_mc_mtu.
3361 *
3362 * Tsol note: If we have a source route then dst_addr != firsthop. But we
3363 * always do the label check on dst_addr.
3364 */
3365 int
3366 ip_set_destination_v4(ipaddr_t *src_addrp, ipaddr_t dst_addr, ipaddr_t firsthop,
3367 ip_xmit_attr_t *ixa, iulp_t *uinfo, uint32_t flags, uint_t mac_mode)
3368 {
3369 ire_t *ire = NULL;
3370 int error = 0;
3371 ipaddr_t setsrc; /* RTF_SETSRC */
3372 zoneid_t zoneid = ixa->ixa_zoneid; /* Honors SO_ALLZONES */
3373 ip_stack_t *ipst = ixa->ixa_ipst;
3374 dce_t *dce;
3375 uint_t pmtu;
3376 uint_t generation;
3377 nce_t *nce;
3378 ill_t *ill = NULL;
3379 boolean_t multirt = B_FALSE;
3380
3381 ASSERT(ixa->ixa_flags & IXAF_IS_IPV4);
3382
3383 /*
3384 * We never send to zero; the ULPs map it to the loopback address.
3385 * We can't allow it since we use zero to mean unitialized in some
3386 * places.
3387 */
3388 ASSERT(dst_addr != INADDR_ANY);
3389
3390 if (is_system_labeled()) {
3391 ts_label_t *tsl = NULL;
3392
3393 error = tsol_check_dest(ixa->ixa_tsl, &dst_addr, IPV4_VERSION,
3394 mac_mode, (flags & IPDF_ZONE_IS_GLOBAL) != 0, &tsl);
3395 if (error != 0)
3396 return (error);
3397 if (tsl != NULL) {
3398 /* Update the label */
3399 ip_xmit_attr_replace_tsl(ixa, tsl);
3400 }
3401 }
3402
3403 setsrc = INADDR_ANY;
3404 /*
3405 * Select a route; For IPMP interfaces, we would only select
3406 * a "hidden" route (i.e., going through a specific under_ill)
3407 * if ixa_ifindex has been specified.
3408 */
3409 ire = ip_select_route_v4(firsthop, *src_addrp, ixa,
3410 &generation, &setsrc, &error, &multirt);
3411 ASSERT(ire != NULL); /* IRE_NOROUTE if none found */
3412 if (error != 0)
3413 goto bad_addr;
3414
3415 /*
3416 * ire can't be a broadcast or multicast unless IPDF_ALLOW_MCBC is set.
3417 * If IPDF_VERIFY_DST is set, the destination must be reachable;
3418 * Otherwise the destination needn't be reachable.
3419 *
3420 * If we match on a reject or black hole, then we've got a
3421 * local failure. May as well fail out the connect() attempt,
3422 * since it's never going to succeed.
3423 */
3424 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
3425 /*
3426 * If we're verifying destination reachability, we always want
3427 * to complain here.
3428 *
3429 * If we're not verifying destination reachability but the
3430 * destination has a route, we still want to fail on the
3431 * temporary address and broadcast address tests.
3432 *
3433 * In both cases do we let the code continue so some reasonable
3434 * information is returned to the caller. That enables the
3435 * caller to use (and even cache) the IRE. conn_ip_ouput will
3436 * use the generation mismatch path to check for the unreachable
3437 * case thereby avoiding any specific check in the main path.
3438 */
3439 ASSERT(generation == IRE_GENERATION_VERIFY);
3440 if (flags & IPDF_VERIFY_DST) {
3441 /*
3442 * Set errno but continue to set up ixa_ire to be
3443 * the RTF_REJECT|RTF_BLACKHOLE IRE.
3444 * That allows callers to use ip_output to get an
3445 * ICMP error back.
3446 */
3447 if (!(ire->ire_type & IRE_HOST))
3448 error = ENETUNREACH;
3449 else
3450 error = EHOSTUNREACH;
3451 }
3452 }
3453
3454 if ((ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST)) &&
3455 !(flags & IPDF_ALLOW_MCBC)) {
3456 ire_refrele(ire);
3457 ire = ire_reject(ipst, B_FALSE);
3458 generation = IRE_GENERATION_VERIFY;
3459 error = ENETUNREACH;
3460 }
3461
3462 /* Cache things */
3463 if (ixa->ixa_ire != NULL)
3464 ire_refrele_notr(ixa->ixa_ire);
3465 #ifdef DEBUG
3466 ire_refhold_notr(ire);
3467 ire_refrele(ire);
3468 #endif
3469 ixa->ixa_ire = ire;
3470 ixa->ixa_ire_generation = generation;
3471
3472 /*
3473 * Ensure that ixa_dce is always set any time that ixa_ire is set,
3474 * since some callers will send a packet to conn_ip_output() even if
3475 * there's an error.
3476 */
3477 if (flags & IPDF_UNIQUE_DCE) {
3478 /* Fallback to the default dce if allocation fails */
3479 dce = dce_lookup_and_add_v4(dst_addr, ipst);
3480 if (dce != NULL)
3481 generation = dce->dce_generation;
3482 else
3483 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3484 } else {
3485 dce = dce_lookup_v4(dst_addr, ipst, &generation);
3486 }
3487 ASSERT(dce != NULL);
3488 if (ixa->ixa_dce != NULL)
3489 dce_refrele_notr(ixa->ixa_dce);
3490 #ifdef DEBUG
3491 dce_refhold_notr(dce);
3492 dce_refrele(dce);
3493 #endif
3494 ixa->ixa_dce = dce;
3495 ixa->ixa_dce_generation = generation;
3496
3497 /*
3498 * For multicast with multirt we have a flag passed back from
3499 * ire_lookup_multi_ill_v4 since we don't have an IRE for each
3500 * possible multicast address.
3501 * We also need a flag for multicast since we can't check
3502 * whether RTF_MULTIRT is set in ixa_ire for multicast.
3503 */
3504 if (multirt) {
3505 ixa->ixa_postfragfn = ip_postfrag_multirt_v4;
3506 ixa->ixa_flags |= IXAF_MULTIRT_MULTICAST;
3507 } else {
3508 ixa->ixa_postfragfn = ire->ire_postfragfn;
3509 ixa->ixa_flags &= ~IXAF_MULTIRT_MULTICAST;
3510 }
3511 if (!(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3512 /* Get an nce to cache. */
3513 nce = ire_to_nce(ire, firsthop, NULL);
3514 if (nce == NULL) {
3515 /* Allocation failure? */
3516 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3517 } else {
3518 if (ixa->ixa_nce != NULL)
3519 nce_refrele(ixa->ixa_nce);
3520 ixa->ixa_nce = nce;
3521 }
3522 }
3523
3524 /*
3525 * If the source address is a loopback address, the
3526 * destination had best be local or multicast.
3527 * If we are sending to an IRE_LOCAL using a loopback source then
3528 * it had better be the same zoneid.
3529 */
3530 if (*src_addrp == htonl(INADDR_LOOPBACK)) {
3531 if ((ire->ire_type & IRE_LOCAL) && ire->ire_zoneid != zoneid) {
3532 ire = NULL; /* Stored in ixa_ire */
3533 error = EADDRNOTAVAIL;
3534 goto bad_addr;
3535 }
3536 if (!(ire->ire_type & (IRE_LOOPBACK|IRE_LOCAL|IRE_MULTICAST))) {
3537 ire = NULL; /* Stored in ixa_ire */
3538 error = EADDRNOTAVAIL;
3539 goto bad_addr;
3540 }
3541 }
3542 if (ire->ire_type & IRE_BROADCAST) {
3543 /*
3544 * If the ULP didn't have a specified source, then we
3545 * make sure we reselect the source when sending
3546 * broadcasts out different interfaces.
3547 */
3548 if (flags & IPDF_SELECT_SRC)
3549 ixa->ixa_flags |= IXAF_SET_SOURCE;
3550 else
3551 ixa->ixa_flags &= ~IXAF_SET_SOURCE;
3552 }
3553
3554 /*
3555 * Does the caller want us to pick a source address?
3556 */
3557 if (flags & IPDF_SELECT_SRC) {
3558 ipaddr_t src_addr;
3559
3560 /*
3561 * We use use ire_nexthop_ill to avoid the under ipmp
3562 * interface for source address selection. Note that for ipmp
3563 * probe packets, ixa_ifindex would have been specified, and
3564 * the ip_select_route() invocation would have picked an ire
3565 * will ire_ill pointing at an under interface.
3566 */
3567 ill = ire_nexthop_ill(ire);
3568
3569 /* If unreachable we have no ill but need some source */
3570 if (ill == NULL) {
3571 src_addr = htonl(INADDR_LOOPBACK);
3572 /* Make sure we look for a better source address */
3573 generation = SRC_GENERATION_VERIFY;
3574 } else {
3575 error = ip_select_source_v4(ill, setsrc, dst_addr,
3576 ixa->ixa_multicast_ifaddr, zoneid,
3577 ipst, &src_addr, &generation, NULL);
3578 if (error != 0) {
3579 ire = NULL; /* Stored in ixa_ire */
3580 goto bad_addr;
3581 }
3582 }
3583
3584 /*
3585 * We allow the source address to to down.
3586 * However, we check that we don't use the loopback address
3587 * as a source when sending out on the wire.
3588 */
3589 if ((src_addr == htonl(INADDR_LOOPBACK)) &&
3590 !(ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK|IRE_MULTICAST)) &&
3591 !(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) {
3592 ire = NULL; /* Stored in ixa_ire */
3593 error = EADDRNOTAVAIL;
3594 goto bad_addr;
3595 }
3596
3597 *src_addrp = src_addr;
3598 ixa->ixa_src_generation = generation;
3599 }
3600
3601 /*
3602 * Make sure we don't leave an unreachable ixa_nce in place
3603 * since ip_select_route is used when we unplumb i.e., remove
3604 * references on ixa_ire, ixa_nce, and ixa_dce.
3605 */
3606 nce = ixa->ixa_nce;
3607 if (nce != NULL && nce->nce_is_condemned) {
3608 nce_refrele(nce);
3609 ixa->ixa_nce = NULL;
3610 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3611 }
3612
3613 /*
3614 * The caller has set IXAF_PMTU_DISCOVERY if path MTU is desired.
3615 * However, we can't do it for IPv4 multicast or broadcast.
3616 */
3617 if (ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST))
3618 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3619
3620 /*
3621 * Set initial value for fragmentation limit. Either conn_ip_output
3622 * or ULP might updates it when there are routing changes.
3623 * Handles a NULL ixa_ire->ire_ill or a NULL ixa_nce for RTF_REJECT.
3624 */
3625 pmtu = ip_get_pmtu(ixa);
3626 ixa->ixa_fragsize = pmtu;
3627 /* Make sure ixa_fragsize and ixa_pmtu remain identical */
3628 if (ixa->ixa_flags & IXAF_VERIFY_PMTU)
3629 ixa->ixa_pmtu = pmtu;
3630
3631 /*
3632 * Extract information useful for some transports.
3633 * First we look for DCE metrics. Then we take what we have in
3634 * the metrics in the route, where the offlink is used if we have
3635 * one.
3636 */
3637 if (uinfo != NULL) {
3638 bzero(uinfo, sizeof (*uinfo));
3639
3640 if (dce->dce_flags & DCEF_UINFO)
3641 *uinfo = dce->dce_uinfo;
3642
3643 rts_merge_metrics(uinfo, &ire->ire_metrics);
3644
3645 /* Allow ire_metrics to decrease the path MTU from above */
3646 if (uinfo->iulp_mtu == 0 || uinfo->iulp_mtu > pmtu)
3647 uinfo->iulp_mtu = pmtu;
3648
3649 uinfo->iulp_localnet = (ire->ire_type & IRE_ONLINK) != 0;
3650 uinfo->iulp_loopback = (ire->ire_type & IRE_LOOPBACK) != 0;
3651 uinfo->iulp_local = (ire->ire_type & IRE_LOCAL) != 0;
3652 }
3653
3654 if (ill != NULL)
3655 ill_refrele(ill);
3656
3657 return (error);
3658
3659 bad_addr:
3660 if (ire != NULL)
3661 ire_refrele(ire);
3662
3663 if (ill != NULL)
3664 ill_refrele(ill);
3665
3666 /*
3667 * Make sure we don't leave an unreachable ixa_nce in place
3668 * since ip_select_route is used when we unplumb i.e., remove
3669 * references on ixa_ire, ixa_nce, and ixa_dce.
3670 */
3671 nce = ixa->ixa_nce;
3672 if (nce != NULL && nce->nce_is_condemned) {
3673 nce_refrele(nce);
3674 ixa->ixa_nce = NULL;
3675 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
3676 }
3677
3678 return (error);
3679 }
3680
3681
3682 /*
3683 * Get the base MTU for the case when path MTU discovery is not used.
3684 * Takes the MTU of the IRE into account.
3685 */
3686 uint_t
3687 ip_get_base_mtu(ill_t *ill, ire_t *ire)
3688 {
3689 uint_t mtu;
3690 uint_t iremtu = ire->ire_metrics.iulp_mtu;
3691
3692 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST))
3693 mtu = ill->ill_mc_mtu;
3694 else
3695 mtu = ill->ill_mtu;
3696
3697 if (iremtu != 0 && iremtu < mtu)
3698 mtu = iremtu;
3699
3700 return (mtu);
3701 }
3702
3703 /*
3704 * Get the PMTU for the attributes. Handles both IPv4 and IPv6.
3705 * Assumes that ixa_ire, dce, and nce have already been set up.
3706 *
3707 * The caller has set IXAF_PMTU_DISCOVERY if path MTU discovery is desired.
3708 * We avoid path MTU discovery if it is disabled with ndd.
3709 * Furtermore, if the path MTU is too small, then we don't set DF for IPv4.
3710 *
3711 * NOTE: We also used to turn it off for source routed packets. That
3712 * is no longer required since the dce is per final destination.
3713 */
3714 uint_t
3715 ip_get_pmtu(ip_xmit_attr_t *ixa)
3716 {
3717 ip_stack_t *ipst = ixa->ixa_ipst;
3718 dce_t *dce;
3719 nce_t *nce;
3720 ire_t *ire;
3721 uint_t pmtu;
3722
3723 ire = ixa->ixa_ire;
3724 dce = ixa->ixa_dce;
3725 nce = ixa->ixa_nce;
3726
3727 /*
3728 * If path MTU discovery has been turned off by ndd, then we ignore
3729 * any dce_pmtu and for IPv4 we will not set DF.
3730 */
3731 if (!ipst->ips_ip_path_mtu_discovery)
3732 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY;
3733
3734 pmtu = IP_MAXPACKET;
3735 /*
3736 * Decide whether whether IPv4 sets DF
3737 * For IPv6 "no DF" means to use the 1280 mtu
3738 */
3739 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3740 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3741 } else {
3742 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3743 if (!(ixa->ixa_flags & IXAF_IS_IPV4))
3744 pmtu = IPV6_MIN_MTU;
3745 }
3746
3747 /* Check if the PMTU is to old before we use it */
3748 if ((dce->dce_flags & DCEF_PMTU) &&
3749 TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time >
3750 ipst->ips_ip_pathmtu_interval) {
3751 /*
3752 * Older than 20 minutes. Drop the path MTU information.
3753 */
3754 mutex_enter(&dce->dce_lock);
3755 dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU);
3756 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
3757 mutex_exit(&dce->dce_lock);
3758 dce_increment_generation(dce);
3759 }
3760
3761 /* The metrics on the route can lower the path MTU */
3762 if (ire->ire_metrics.iulp_mtu != 0 &&
3763 ire->ire_metrics.iulp_mtu < pmtu)
3764 pmtu = ire->ire_metrics.iulp_mtu;
3765
3766 /*
3767 * If the path MTU is smaller than some minimum, we still use dce_pmtu
3768 * above (would be 576 for IPv4 and 1280 for IPv6), but we clear
3769 * IXAF_PMTU_IPV4_DF so that we avoid setting DF for IPv4.
3770 */
3771 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) {
3772 if (dce->dce_flags & DCEF_PMTU) {
3773 if (dce->dce_pmtu < pmtu)
3774 pmtu = dce->dce_pmtu;
3775
3776 if (dce->dce_flags & DCEF_TOO_SMALL_PMTU) {
3777 ixa->ixa_flags |= IXAF_PMTU_TOO_SMALL;
3778 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF;
3779 } else {
3780 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3781 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3782 }
3783 } else {
3784 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL;
3785 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF;
3786 }
3787 }
3788
3789 /*
3790 * If we have an IRE_LOCAL we use the loopback mtu instead of
3791 * the ill for going out the wire i.e., IRE_LOCAL gets the same
3792 * mtu as IRE_LOOPBACK.
3793 */
3794 if (ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) {
3795 uint_t loopback_mtu;
3796
3797 loopback_mtu = (ire->ire_ipversion == IPV6_VERSION) ?
3798 ip_loopback_mtu_v6plus : ip_loopback_mtuplus;
3799
3800 if (loopback_mtu < pmtu)
3801 pmtu = loopback_mtu;
3802 } else if (nce != NULL) {
3803 /*
3804 * Make sure we don't exceed the interface MTU.
3805 * In the case of RTF_REJECT or RTF_BLACKHOLE we might not have
3806 * an ill. We'd use the above IP_MAXPACKET in that case just
3807 * to tell the transport something larger than zero.
3808 */
3809 if (ire->ire_type & (IRE_MULTICAST|IRE_BROADCAST)) {
3810 if (nce->nce_common->ncec_ill->ill_mc_mtu < pmtu)
3811 pmtu = nce->nce_common->ncec_ill->ill_mc_mtu;
3812 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3813 nce->nce_ill->ill_mc_mtu < pmtu) {
3814 /*
3815 * for interfaces in an IPMP group, the mtu of
3816 * the nce_ill (under_ill) could be different
3817 * from the mtu of the ncec_ill, so we take the
3818 * min of the two.
3819 */
3820 pmtu = nce->nce_ill->ill_mc_mtu;
3821 }
3822 } else {
3823 if (nce->nce_common->ncec_ill->ill_mtu < pmtu)
3824 pmtu = nce->nce_common->ncec_ill->ill_mtu;
3825 if (nce->nce_common->ncec_ill != nce->nce_ill &&
3826 nce->nce_ill->ill_mtu < pmtu) {
3827 /*
3828 * for interfaces in an IPMP group, the mtu of
3829 * the nce_ill (under_ill) could be different
3830 * from the mtu of the ncec_ill, so we take the
3831 * min of the two.
3832 */
3833 pmtu = nce->nce_ill->ill_mtu;
3834 }
3835 }
3836 }
3837
3838 /*
3839 * Handle the IPV6_USE_MIN_MTU socket option or ancillary data.
3840 * Only applies to IPv6.
3841 */
3842 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3843 if (ixa->ixa_flags & IXAF_USE_MIN_MTU) {
3844 switch (ixa->ixa_use_min_mtu) {
3845 case IPV6_USE_MIN_MTU_MULTICAST:
3846 if (ire->ire_type & IRE_MULTICAST)
3847 pmtu = IPV6_MIN_MTU;
3848 break;
3849 case IPV6_USE_MIN_MTU_ALWAYS:
3850 pmtu = IPV6_MIN_MTU;
3851 break;
3852 case IPV6_USE_MIN_MTU_NEVER:
3853 break;
3854 }
3855 } else {
3856 /* Default is IPV6_USE_MIN_MTU_MULTICAST */
3857 if (ire->ire_type & IRE_MULTICAST)
3858 pmtu = IPV6_MIN_MTU;
3859 }
3860 }
3861
3862 /*
3863 * For multirouted IPv6 packets, the IP layer will insert a 8-byte
3864 * fragment header in every packet. We compensate for those cases by
3865 * returning a smaller path MTU to the ULP.
3866 *
3867 * In the case of CGTP then ip_output will add a fragment header.
3868 * Make sure there is room for it by telling a smaller number
3869 * to the transport.
3870 *
3871 * When IXAF_IPV6_ADDR_FRAGHDR we subtract the frag hdr here
3872 * so the ULPs consistently see a iulp_pmtu and ip_get_pmtu()
3873 * which is the size of the packets it can send.
3874 */
3875 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) {
3876 if ((ire->ire_flags & RTF_MULTIRT) ||
3877 (ixa->ixa_flags & IXAF_MULTIRT_MULTICAST)) {
3878 pmtu -= sizeof (ip6_frag_t);
3879 ixa->ixa_flags |= IXAF_IPV6_ADD_FRAGHDR;
3880 }
3881 }
3882
3883 return (pmtu);
3884 }
3885
3886 /*
3887 * Carve "len" bytes out of an mblk chain, consuming any we empty, and duping
3888 * the final piece where we don't. Return a pointer to the first mblk in the
3889 * result, and update the pointer to the next mblk to chew on. If anything
3890 * goes wrong (i.e., dupb fails), we waste everything in sight and return a
3891 * NULL pointer.
3892 */
3893 mblk_t *
3894 ip_carve_mp(mblk_t **mpp, ssize_t len)
3895 {
3896 mblk_t *mp0;
3897 mblk_t *mp1;
3898 mblk_t *mp2;
3899
3900 if (!len || !mpp || !(mp0 = *mpp))
3901 return (NULL);
3902 /* If we aren't going to consume the first mblk, we need a dup. */
3903 if (mp0->b_wptr - mp0->b_rptr > len) {
3904 mp1 = dupb(mp0);
3905 if (mp1) {
3906 /* Partition the data between the two mblks. */
3907 mp1->b_wptr = mp1->b_rptr + len;
3908 mp0->b_rptr = mp1->b_wptr;
3909 /*
3910 * after adjustments if mblk not consumed is now
3911 * unaligned, try to align it. If this fails free
3912 * all messages and let upper layer recover.
3913 */
3914 if (!OK_32PTR(mp0->b_rptr)) {
3915 if (!pullupmsg(mp0, -1)) {
3916 freemsg(mp0);
3917 freemsg(mp1);
3918 *mpp = NULL;
3919 return (NULL);
3920 }
3921 }
3922 }
3923 return (mp1);
3924 }
3925 /* Eat through as many mblks as we need to get len bytes. */
3926 len -= mp0->b_wptr - mp0->b_rptr;
3927 for (mp2 = mp1 = mp0; (mp2 = mp2->b_cont) != 0 && len; mp1 = mp2) {
3928 if (mp2->b_wptr - mp2->b_rptr > len) {
3929 /*
3930 * We won't consume the entire last mblk. Like
3931 * above, dup and partition it.
3932 */
3933 mp1->b_cont = dupb(mp2);
3934 mp1 = mp1->b_cont;
3935 if (!mp1) {
3936 /*
3937 * Trouble. Rather than go to a lot of
3938 * trouble to clean up, we free the messages.
3939 * This won't be any worse than losing it on
3940 * the wire.
3941 */
3942 freemsg(mp0);
3943 freemsg(mp2);
3944 *mpp = NULL;
3945 return (NULL);
3946 }
3947 mp1->b_wptr = mp1->b_rptr + len;
3948 mp2->b_rptr = mp1->b_wptr;
3949 /*
3950 * after adjustments if mblk not consumed is now
3951 * unaligned, try to align it. If this fails free
3952 * all messages and let upper layer recover.
3953 */
3954 if (!OK_32PTR(mp2->b_rptr)) {
3955 if (!pullupmsg(mp2, -1)) {
3956 freemsg(mp0);
3957 freemsg(mp2);
3958 *mpp = NULL;
3959 return (NULL);
3960 }
3961 }
3962 *mpp = mp2;
3963 return (mp0);
3964 }
3965 /* Decrement len by the amount we just got. */
3966 len -= mp2->b_wptr - mp2->b_rptr;
3967 }
3968 /*
3969 * len should be reduced to zero now. If not our caller has
3970 * screwed up.
3971 */
3972 if (len) {
3973 /* Shouldn't happen! */
3974 freemsg(mp0);
3975 *mpp = NULL;
3976 return (NULL);
3977 }
3978 /*
3979 * We consumed up to exactly the end of an mblk. Detach the part
3980 * we are returning from the rest of the chain.
3981 */
3982 mp1->b_cont = NULL;
3983 *mpp = mp2;
3984 return (mp0);
3985 }
3986
3987 /* The ill stream is being unplumbed. Called from ip_close */
3988 int
3989 ip_modclose(ill_t *ill)
3990 {
3991 boolean_t success;
3992 ipsq_t *ipsq;
3993 ipif_t *ipif;
3994 queue_t *q = ill->ill_rq;
3995 ip_stack_t *ipst = ill->ill_ipst;
3996 int i;
3997 arl_ill_common_t *ai = ill->ill_common;
3998
3999 /*
4000 * The punlink prior to this may have initiated a capability
4001 * negotiation. But ipsq_enter will block until that finishes or
4002 * times out.
4003 */
4004 success = ipsq_enter(ill, B_FALSE, NEW_OP);
4005
4006 /*
4007 * Open/close/push/pop is guaranteed to be single threaded
4008 * per stream by STREAMS. FS guarantees that all references
4009 * from top are gone before close is called. So there can't
4010 * be another close thread that has set CONDEMNED on this ill.
4011 * and cause ipsq_enter to return failure.
4012 */
4013 ASSERT(success);
4014 ipsq = ill->ill_phyint->phyint_ipsq;
4015
4016 /*
4017 * Mark it condemned. No new reference will be made to this ill.
4018 * Lookup functions will return an error. Threads that try to
4019 * increment the refcnt must check for ILL_CAN_LOOKUP. This ensures
4020 * that the refcnt will drop down to zero.
4021 */
4022 mutex_enter(&ill->ill_lock);
4023 ill->ill_state_flags |= ILL_CONDEMNED;
4024 for (ipif = ill->ill_ipif; ipif != NULL;
4025 ipif = ipif->ipif_next) {
4026 ipif->ipif_state_flags |= IPIF_CONDEMNED;
4027 }
4028 /*
4029 * Wake up anybody waiting to enter the ipsq. ipsq_enter
4030 * returns error if ILL_CONDEMNED is set
4031 */
4032 cv_broadcast(&ill->ill_cv);
4033 mutex_exit(&ill->ill_lock);
4034
4035 /*
4036 * Send all the deferred DLPI messages downstream which came in
4037 * during the small window right before ipsq_enter(). We do this
4038 * without waiting for the ACKs because all the ACKs for M_PROTO
4039 * messages are ignored in ip_rput() when ILL_CONDEMNED is set.
4040 */
4041 ill_dlpi_send_deferred(ill);
4042
4043 /*
4044 * Shut down fragmentation reassembly.
4045 * ill_frag_timer won't start a timer again.
4046 * Now cancel any existing timer
4047 */
4048 (void) untimeout(ill->ill_frag_timer_id);
4049 (void) ill_frag_timeout(ill, 0);
4050
4051 /*
4052 * Call ill_delete to bring down the ipifs, ilms and ill on
4053 * this ill. Then wait for the refcnts to drop to zero.
4054 * ill_is_freeable checks whether the ill is really quiescent.
4055 * Then make sure that threads that are waiting to enter the
4056 * ipsq have seen the error returned by ipsq_enter and have
4057 * gone away. Then we call ill_delete_tail which does the
4058 * DL_UNBIND_REQ with the driver and then qprocsoff.
4059 */
4060 ill_delete(ill);
4061 mutex_enter(&ill->ill_lock);
4062 while (!ill_is_freeable(ill))
4063 cv_wait(&ill->ill_cv, &ill->ill_lock);
4064
4065 while (ill->ill_waiters)
4066 cv_wait(&ill->ill_cv, &ill->ill_lock);
4067
4068 mutex_exit(&ill->ill_lock);
4069
4070 /*
4071 * ill_delete_tail drops reference on ill_ipst, but we need to keep
4072 * it held until the end of the function since the cleanup
4073 * below needs to be able to use the ip_stack_t.
4074 */
4075 netstack_hold(ipst->ips_netstack);
4076
4077 /* qprocsoff is done via ill_delete_tail */
4078 ill_delete_tail(ill);
4079 /*
4080 * synchronously wait for arp stream to unbind. After this, we
4081 * cannot get any data packets up from the driver.
4082 */
4083 arp_unbind_complete(ill);
4084 ASSERT(ill->ill_ipst == NULL);
4085
4086 /*
4087 * Walk through all conns and qenable those that have queued data.
4088 * Close synchronization needs this to
4089 * be done to ensure that all upper layers blocked
4090 * due to flow control to the closing device
4091 * get unblocked.
4092 */
4093 ip1dbg(("ip_wsrv: walking\n"));
4094 for (i = 0; i < TX_FANOUT_SIZE; i++) {
4095 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[i]);
4096 }
4097
4098 /*
4099 * ai can be null if this is an IPv6 ill, or if the IPv4
4100 * stream is being torn down before ARP was plumbed (e.g.,
4101 * /sbin/ifconfig plumbing a stream twice, and encountering
4102 * an error
4103 */
4104 if (ai != NULL) {
4105 ASSERT(!ill->ill_isv6);
4106 mutex_enter(&ai->ai_lock);
4107 ai->ai_ill = NULL;
4108 if (ai->ai_arl == NULL) {
4109 mutex_destroy(&ai->ai_lock);
4110 kmem_free(ai, sizeof (*ai));
4111 } else {
4112 cv_signal(&ai->ai_ill_unplumb_done);
4113 mutex_exit(&ai->ai_lock);
4114 }
4115 }
4116
4117 mutex_enter(&ipst->ips_ip_mi_lock);
4118 mi_close_unlink(&ipst->ips_ip_g_head, (IDP)ill);
4119 mutex_exit(&ipst->ips_ip_mi_lock);
4120
4121 /*
4122 * credp could be null if the open didn't succeed and ip_modopen
4123 * itself calls ip_close.
4124 */
4125 if (ill->ill_credp != NULL)
4126 crfree(ill->ill_credp);
4127
4128 mutex_destroy(&ill->ill_saved_ire_lock);
4129 mutex_destroy(&ill->ill_lock);
4130 rw_destroy(&ill->ill_mcast_lock);
4131 mutex_destroy(&ill->ill_mcast_serializer);
4132 list_destroy(&ill->ill_nce);
4133
4134 /*
4135 * Now we are done with the module close pieces that
4136 * need the netstack_t.
4137 */
4138 netstack_rele(ipst->ips_netstack);
4139
4140 mi_close_free((IDP)ill);
4141 q->q_ptr = WR(q)->q_ptr = NULL;
4142
4143 ipsq_exit(ipsq);
4144
4145 return (0);
4146 }
4147
4148 /*
4149 * This is called as part of close() for IP, UDP, ICMP, and RTS
4150 * in order to quiesce the conn.
4151 */
4152 void
4153 ip_quiesce_conn(conn_t *connp)
4154 {
4155 boolean_t drain_cleanup_reqd = B_FALSE;
4156 boolean_t conn_ioctl_cleanup_reqd = B_FALSE;
4157 boolean_t ilg_cleanup_reqd = B_FALSE;
4158 ip_stack_t *ipst;
4159
4160 ASSERT(!IPCL_IS_TCP(connp));
4161 ipst = connp->conn_netstack->netstack_ip;
4162
4163 /*
4164 * Mark the conn as closing, and this conn must not be
4165 * inserted in future into any list. Eg. conn_drain_insert(),
4166 * won't insert this conn into the conn_drain_list.
4167 *
4168 * conn_idl, and conn_ilg cannot get set henceforth.
4169 */
4170 mutex_enter(&connp->conn_lock);
4171 ASSERT(!(connp->conn_state_flags & CONN_QUIESCED));
4172 connp->conn_state_flags |= CONN_CLOSING;
4173 if (connp->conn_idl != NULL)
4174 drain_cleanup_reqd = B_TRUE;
4175 if (connp->conn_oper_pending_ill != NULL)
4176 conn_ioctl_cleanup_reqd = B_TRUE;
4177 if (connp->conn_dhcpinit_ill != NULL) {
4178 ASSERT(connp->conn_dhcpinit_ill->ill_dhcpinit != 0);
4179 atomic_dec_32(&connp->conn_dhcpinit_ill->ill_dhcpinit);
4180 ill_set_inputfn(connp->conn_dhcpinit_ill);
4181 connp->conn_dhcpinit_ill = NULL;
4182 }
4183 if (connp->conn_ilg != NULL)
4184 ilg_cleanup_reqd = B_TRUE;
4185 mutex_exit(&connp->conn_lock);
4186
4187 if (conn_ioctl_cleanup_reqd)
4188 conn_ioctl_cleanup(connp);
4189
4190 if (is_system_labeled() && connp->conn_anon_port) {
4191 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
4192 connp->conn_mlp_type, connp->conn_proto,
4193 ntohs(connp->conn_lport), B_FALSE);
4194 connp->conn_anon_port = 0;
4195 }
4196 connp->conn_mlp_type = mlptSingle;
4197
4198 /*
4199 * Remove this conn from any fanout list it is on.
4200 * and then wait for any threads currently operating
4201 * on this endpoint to finish
4202 */
4203 ipcl_hash_remove(connp);
4204
4205 /*
4206 * Remove this conn from the drain list, and do any other cleanup that
4207 * may be required. (TCP conns are never flow controlled, and
4208 * conn_idl will be NULL.)
4209 */
4210 if (drain_cleanup_reqd && connp->conn_idl != NULL) {
4211 idl_t *idl = connp->conn_idl;
4212
4213 mutex_enter(&idl->idl_lock);
4214 conn_drain(connp, B_TRUE);
4215 mutex_exit(&idl->idl_lock);
4216 }
4217
4218 if (connp == ipst->ips_ip_g_mrouter)
4219 (void) ip_mrouter_done(ipst);
4220
4221 if (ilg_cleanup_reqd)
4222 ilg_delete_all(connp);
4223
4224 /*
4225 * Now conn refcnt can increase only thru CONN_INC_REF_LOCKED.
4226 * callers from write side can't be there now because close
4227 * is in progress. The only other caller is ipcl_walk
4228 * which checks for the condemned flag.
4229 */
4230 mutex_enter(&connp->conn_lock);
4231 connp->conn_state_flags |= CONN_CONDEMNED;
4232 while (connp->conn_ref != 1)
4233 cv_wait(&connp->conn_cv, &connp->conn_lock);
4234 connp->conn_state_flags |= CONN_QUIESCED;
4235 mutex_exit(&connp->conn_lock);
4236 }
4237
4238 /* ARGSUSED */
4239 int
4240 ip_close(queue_t *q, int flags)
4241 {
4242 conn_t *connp;
4243
4244 /*
4245 * Call the appropriate delete routine depending on whether this is
4246 * a module or device.
4247 */
4248 if (WR(q)->q_next != NULL) {
4249 /* This is a module close */
4250 return (ip_modclose((ill_t *)q->q_ptr));
4251 }
4252
4253 connp = q->q_ptr;
4254 ip_quiesce_conn(connp);
4255
4256 qprocsoff(q);
4257
4258 /*
4259 * Now we are truly single threaded on this stream, and can
4260 * delete the things hanging off the connp, and finally the connp.
4261 * We removed this connp from the fanout list, it cannot be
4262 * accessed thru the fanouts, and we already waited for the
4263 * conn_ref to drop to 0. We are already in close, so
4264 * there cannot be any other thread from the top. qprocsoff
4265 * has completed, and service has completed or won't run in
4266 * future.
4267 */
4268 ASSERT(connp->conn_ref == 1);
4269
4270 inet_minor_free(connp->conn_minor_arena, connp->conn_dev);
4271
4272 connp->conn_ref--;
4273 ipcl_conn_destroy(connp);
4274
4275 q->q_ptr = WR(q)->q_ptr = NULL;
4276 return (0);
4277 }
4278
4279 /*
4280 * Wapper around putnext() so that ip_rts_request can merely use
4281 * conn_recv.
4282 */
4283 /*ARGSUSED2*/
4284 static void
4285 ip_conn_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4286 {
4287 conn_t *connp = (conn_t *)arg1;
4288
4289 putnext(connp->conn_rq, mp);
4290 }
4291
4292 /* Dummy in case ICMP error delivery is attempted to a /dev/ip instance */
4293 /* ARGSUSED */
4294 static void
4295 ip_conn_input_icmp(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
4296 {
4297 freemsg(mp);
4298 }
4299
4300 /*
4301 * Called when the module is about to be unloaded
4302 */
4303 void
4304 ip_ddi_destroy(void)
4305 {
4306 /* This needs to be called before destroying any transports. */
4307 mutex_enter(&cpu_lock);
4308 unregister_cpu_setup_func(ip_tp_cpu_update, NULL);
4309 mutex_exit(&cpu_lock);
4310
4311 tnet_fini();
4312
4313 icmp_ddi_g_destroy();
4314 rts_ddi_g_destroy();
4315 udp_ddi_g_destroy();
4316 sctp_ddi_g_destroy();
4317 tcp_ddi_g_destroy();
4318 ilb_ddi_g_destroy();
4319 dce_g_destroy();
4320 ipsec_policy_g_destroy();
4321 ipcl_g_destroy();
4322 ip_net_g_destroy();
4323 ip_ire_g_fini();
4324 inet_minor_destroy(ip_minor_arena_sa);
4325 #if defined(_LP64)
4326 inet_minor_destroy(ip_minor_arena_la);
4327 #endif
4328
4329 #ifdef DEBUG
4330 list_destroy(&ip_thread_list);
4331 rw_destroy(&ip_thread_rwlock);
4332 tsd_destroy(&ip_thread_data);
4333 #endif
4334
4335 netstack_unregister(NS_IP);
4336 }
4337
4338 /*
4339 * First step in cleanup.
4340 */
4341 /* ARGSUSED */
4342 static void
4343 ip_stack_shutdown(netstackid_t stackid, void *arg)
4344 {
4345 ip_stack_t *ipst = (ip_stack_t *)arg;
4346 kt_did_t ktid;
4347
4348 #ifdef NS_DEBUG
4349 printf("ip_stack_shutdown(%p, stack %d)\n", (void *)ipst, stackid);
4350 #endif
4351
4352 /*
4353 * Perform cleanup for special interfaces (loopback and IPMP).
4354 */
4355 ip_interface_cleanup(ipst);
4356
4357 /*
4358 * The *_hook_shutdown()s start the process of notifying any
4359 * consumers that things are going away.... nothing is destroyed.
4360 */
4361 ipv4_hook_shutdown(ipst);
4362 ipv6_hook_shutdown(ipst);
4363 arp_hook_shutdown(ipst);
4364
4365 mutex_enter(&ipst->ips_capab_taskq_lock);
4366 ktid = ipst->ips_capab_taskq_thread->t_did;
4367 ipst->ips_capab_taskq_quit = B_TRUE;
4368 cv_signal(&ipst->ips_capab_taskq_cv);
4369 mutex_exit(&ipst->ips_capab_taskq_lock);
4370
4371 /*
4372 * In rare occurrences, particularly on virtual hardware where CPUs can
4373 * be de-scheduled, the thread that we just signaled will not run until
4374 * after we have gotten through parts of ip_stack_fini. If that happens
4375 * then we'll try to grab the ips_capab_taskq_lock as part of returning
4376 * from cv_wait which no longer exists.
4377 */
4378 thread_join(ktid);
4379 }
4380
4381 /*
4382 * Free the IP stack instance.
4383 */
4384 static void
4385 ip_stack_fini(netstackid_t stackid, void *arg)
4386 {
4387 ip_stack_t *ipst = (ip_stack_t *)arg;
4388 int ret;
4389
4390 #ifdef NS_DEBUG
4391 printf("ip_stack_fini(%p, stack %d)\n", (void *)ipst, stackid);
4392 #endif
4393 /*
4394 * At this point, all of the notifications that the events and
4395 * protocols are going away have been run, meaning that we can
4396 * now set about starting to clean things up.
4397 */
4398 ipobs_fini(ipst);
4399 ipv4_hook_destroy(ipst);
4400 ipv6_hook_destroy(ipst);
4401 arp_hook_destroy(ipst);
4402 ip_net_destroy(ipst);
4403
4404 ipmp_destroy(ipst);
4405
4406 ip_kstat_fini(stackid, ipst->ips_ip_mibkp);
4407 ipst->ips_ip_mibkp = NULL;
4408 icmp_kstat_fini(stackid, ipst->ips_icmp_mibkp);
4409 ipst->ips_icmp_mibkp = NULL;
4410 ip_kstat2_fini(stackid, ipst->ips_ip_kstat);
4411 ipst->ips_ip_kstat = NULL;
4412 bzero(&ipst->ips_ip_statistics, sizeof (ipst->ips_ip_statistics));
4413 ip6_kstat_fini(stackid, ipst->ips_ip6_kstat);
4414 ipst->ips_ip6_kstat = NULL;
4415 bzero(&ipst->ips_ip6_statistics, sizeof (ipst->ips_ip6_statistics));
4416
4417 kmem_free(ipst->ips_propinfo_tbl,
4418 ip_propinfo_count * sizeof (mod_prop_info_t));
4419 ipst->ips_propinfo_tbl = NULL;
4420
4421 dce_stack_destroy(ipst);
4422 ip_mrouter_stack_destroy(ipst);
4423
4424 /*
4425 * Quiesce all of our timers. Note we set the quiesce flags before we
4426 * call untimeout. The slowtimers may actually kick off another instance
4427 * of the non-slow timers.
4428 */
4429 mutex_enter(&ipst->ips_igmp_timer_lock);
4430 ipst->ips_igmp_timer_quiesce = B_TRUE;
4431 mutex_exit(&ipst->ips_igmp_timer_lock);
4432
4433 mutex_enter(&ipst->ips_mld_timer_lock);
4434 ipst->ips_mld_timer_quiesce = B_TRUE;
4435 mutex_exit(&ipst->ips_mld_timer_lock);
4436
4437 mutex_enter(&ipst->ips_igmp_slowtimeout_lock);
4438 ipst->ips_igmp_slowtimeout_quiesce = B_TRUE;
4439 mutex_exit(&ipst->ips_igmp_slowtimeout_lock);
4440
4441 mutex_enter(&ipst->ips_mld_slowtimeout_lock);
4442 ipst->ips_mld_slowtimeout_quiesce = B_TRUE;
4443 mutex_exit(&ipst->ips_mld_slowtimeout_lock);
4444
4445 ret = untimeout(ipst->ips_igmp_timeout_id);
4446 if (ret == -1) {
4447 ASSERT(ipst->ips_igmp_timeout_id == 0);
4448 } else {
4449 ASSERT(ipst->ips_igmp_timeout_id != 0);
4450 ipst->ips_igmp_timeout_id = 0;
4451 }
4452 ret = untimeout(ipst->ips_igmp_slowtimeout_id);
4453 if (ret == -1) {
4454 ASSERT(ipst->ips_igmp_slowtimeout_id == 0);
4455 } else {
4456 ASSERT(ipst->ips_igmp_slowtimeout_id != 0);
4457 ipst->ips_igmp_slowtimeout_id = 0;
4458 }
4459 ret = untimeout(ipst->ips_mld_timeout_id);
4460 if (ret == -1) {
4461 ASSERT(ipst->ips_mld_timeout_id == 0);
4462 } else {
4463 ASSERT(ipst->ips_mld_timeout_id != 0);
4464 ipst->ips_mld_timeout_id = 0;
4465 }
4466 ret = untimeout(ipst->ips_mld_slowtimeout_id);
4467 if (ret == -1) {
4468 ASSERT(ipst->ips_mld_slowtimeout_id == 0);
4469 } else {
4470 ASSERT(ipst->ips_mld_slowtimeout_id != 0);
4471 ipst->ips_mld_slowtimeout_id = 0;
4472 }
4473
4474 ip_ire_fini(ipst);
4475 ip6_asp_free(ipst);
4476 conn_drain_fini(ipst);
4477 ipcl_destroy(ipst);
4478
4479 mutex_destroy(&ipst->ips_ndp4->ndp_g_lock);
4480 mutex_destroy(&ipst->ips_ndp6->ndp_g_lock);
4481 kmem_free(ipst->ips_ndp4, sizeof (ndp_g_t));
4482 ipst->ips_ndp4 = NULL;
4483 kmem_free(ipst->ips_ndp6, sizeof (ndp_g_t));
4484 ipst->ips_ndp6 = NULL;
4485
4486 if (ipst->ips_loopback_ksp != NULL) {
4487 kstat_delete_netstack(ipst->ips_loopback_ksp, stackid);
4488 ipst->ips_loopback_ksp = NULL;
4489 }
4490
4491 mutex_destroy(&ipst->ips_capab_taskq_lock);
4492 cv_destroy(&ipst->ips_capab_taskq_cv);
4493
4494 rw_destroy(&ipst->ips_srcid_lock);
4495
4496 mutex_destroy(&ipst->ips_ip_mi_lock);
4497 rw_destroy(&ipst->ips_ill_g_usesrc_lock);
4498
4499 mutex_destroy(&ipst->ips_igmp_timer_lock);
4500 mutex_destroy(&ipst->ips_mld_timer_lock);
4501 mutex_destroy(&ipst->ips_igmp_slowtimeout_lock);
4502 mutex_destroy(&ipst->ips_mld_slowtimeout_lock);
4503 mutex_destroy(&ipst->ips_ip_addr_avail_lock);
4504 rw_destroy(&ipst->ips_ill_g_lock);
4505
4506 kmem_free(ipst->ips_phyint_g_list, sizeof (phyint_list_t));
4507 ipst->ips_phyint_g_list = NULL;
4508 kmem_free(ipst->ips_ill_g_heads, sizeof (ill_g_head_t) * MAX_G_HEADS);
4509 ipst->ips_ill_g_heads = NULL;
4510
4511 ldi_ident_release(ipst->ips_ldi_ident);
4512 kmem_free(ipst, sizeof (*ipst));
4513 }
4514
4515 /*
4516 * This function is called from the TSD destructor, and is used to debug
4517 * reference count issues in IP. See block comment in <inet/ip_if.h> for
4518 * details.
4519 */
4520 static void
4521 ip_thread_exit(void *phash)
4522 {
4523 th_hash_t *thh = phash;
4524
4525 rw_enter(&ip_thread_rwlock, RW_WRITER);
4526 list_remove(&ip_thread_list, thh);
4527 rw_exit(&ip_thread_rwlock);
4528 mod_hash_destroy_hash(thh->thh_hash);
4529 kmem_free(thh, sizeof (*thh));
4530 }
4531
4532 /*
4533 * Called when the IP kernel module is loaded into the kernel
4534 */
4535 void
4536 ip_ddi_init(void)
4537 {
4538 ip_squeue_flag = ip_squeue_switch(ip_squeue_enter);
4539
4540 /*
4541 * For IP and TCP the minor numbers should start from 2 since we have 4
4542 * initial devices: ip, ip6, tcp, tcp6.
4543 */
4544 /*
4545 * If this is a 64-bit kernel, then create two separate arenas -
4546 * one for TLIs in the range of INET_MIN_DEV+2 through 2^^18-1, and the
4547 * other for socket apps in the range 2^^18 through 2^^32-1.
4548 */
4549 ip_minor_arena_la = NULL;
4550 ip_minor_arena_sa = NULL;
4551 #if defined(_LP64)
4552 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4553 INET_MIN_DEV + 2, MAXMIN32, KM_SLEEP)) == NULL) {
4554 cmn_err(CE_PANIC,
4555 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4556 }
4557 if ((ip_minor_arena_la = inet_minor_create("ip_minor_arena_la",
4558 MAXMIN32 + 1, MAXMIN64, KM_SLEEP)) == NULL) {
4559 cmn_err(CE_PANIC,
4560 "ip_ddi_init: ip_minor_arena_la creation failed\n");
4561 }
4562 #else
4563 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa",
4564 INET_MIN_DEV + 2, MAXMIN, KM_SLEEP)) == NULL) {
4565 cmn_err(CE_PANIC,
4566 "ip_ddi_init: ip_minor_arena_sa creation failed\n");
4567 }
4568 #endif
4569 ip_poll_normal_ticks = MSEC_TO_TICK_ROUNDUP(ip_poll_normal_ms);
4570
4571 cc_init();
4572
4573 ipcl_g_init();
4574 ip_ire_g_init();
4575 ip_net_g_init();
4576
4577 #ifdef DEBUG
4578 tsd_create(&ip_thread_data, ip_thread_exit);
4579 rw_init(&ip_thread_rwlock, NULL, RW_DEFAULT, NULL);
4580 list_create(&ip_thread_list, sizeof (th_hash_t),
4581 offsetof(th_hash_t, thh_link));
4582 #endif
4583 ipsec_policy_g_init();
4584 tcp_ddi_g_init();
4585 sctp_ddi_g_init();
4586 dce_g_init();
4587
4588 /*
4589 * We want to be informed each time a stack is created or
4590 * destroyed in the kernel, so we can maintain the
4591 * set of udp_stack_t's.
4592 */
4593 netstack_register(NS_IP, ip_stack_init, ip_stack_shutdown,
4594 ip_stack_fini);
4595
4596 tnet_init();
4597
4598 udp_ddi_g_init();
4599 rts_ddi_g_init();
4600 icmp_ddi_g_init();
4601 ilb_ddi_g_init();
4602
4603 /* This needs to be called after all transports are initialized. */
4604 mutex_enter(&cpu_lock);
4605 register_cpu_setup_func(ip_tp_cpu_update, NULL);
4606 mutex_exit(&cpu_lock);
4607 }
4608
4609 /*
4610 * Initialize the IP stack instance.
4611 */
4612 static void *
4613 ip_stack_init(netstackid_t stackid, netstack_t *ns)
4614 {
4615 ip_stack_t *ipst;
4616 size_t arrsz;
4617 major_t major;
4618
4619 #ifdef NS_DEBUG
4620 printf("ip_stack_init(stack %d)\n", stackid);
4621 #endif
4622
4623 ipst = (ip_stack_t *)kmem_zalloc(sizeof (*ipst), KM_SLEEP);
4624 ipst->ips_netstack = ns;
4625
4626 ipst->ips_ill_g_heads = kmem_zalloc(sizeof (ill_g_head_t) * MAX_G_HEADS,
4627 KM_SLEEP);
4628 ipst->ips_phyint_g_list = kmem_zalloc(sizeof (phyint_list_t),
4629 KM_SLEEP);
4630 ipst->ips_ndp4 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4631 ipst->ips_ndp6 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP);
4632 mutex_init(&ipst->ips_ndp4->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4633 mutex_init(&ipst->ips_ndp6->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL);
4634
4635 mutex_init(&ipst->ips_igmp_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4636 ipst->ips_igmp_deferred_next = INFINITY;
4637 mutex_init(&ipst->ips_mld_timer_lock, NULL, MUTEX_DEFAULT, NULL);
4638 ipst->ips_mld_deferred_next = INFINITY;
4639 mutex_init(&ipst->ips_igmp_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4640 mutex_init(&ipst->ips_mld_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL);
4641 mutex_init(&ipst->ips_ip_mi_lock, NULL, MUTEX_DEFAULT, NULL);
4642 mutex_init(&ipst->ips_ip_addr_avail_lock, NULL, MUTEX_DEFAULT, NULL);
4643 rw_init(&ipst->ips_ill_g_lock, NULL, RW_DEFAULT, NULL);
4644 rw_init(&ipst->ips_ill_g_usesrc_lock, NULL, RW_DEFAULT, NULL);
4645
4646 ipcl_init(ipst);
4647 ip_ire_init(ipst);
4648 ip6_asp_init(ipst);
4649 ipif_init(ipst);
4650 conn_drain_init(ipst);
4651 ip_mrouter_stack_init(ipst);
4652 dce_stack_init(ipst);
4653
4654 ipst->ips_ip_multirt_log_interval = 1000;
4655
4656 ipst->ips_ill_index = 1;
4657
4658 ipst->ips_saved_ip_forwarding = -1;
4659 ipst->ips_reg_vif_num = ALL_VIFS; /* Index to Register vif */
4660
4661 arrsz = ip_propinfo_count * sizeof (mod_prop_info_t);
4662 ipst->ips_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz, KM_SLEEP);
4663 bcopy(ip_propinfo_tbl, ipst->ips_propinfo_tbl, arrsz);
4664
4665 ipst->ips_ip_mibkp = ip_kstat_init(stackid, ipst);
4666 ipst->ips_icmp_mibkp = icmp_kstat_init(stackid);
4667 ipst->ips_ip_kstat = ip_kstat2_init(stackid, &ipst->ips_ip_statistics);
4668 ipst->ips_ip6_kstat =
4669 ip6_kstat_init(stackid, &ipst->ips_ip6_statistics);
4670
4671 ipst->ips_ip_src_id = 1;
4672 rw_init(&ipst->ips_srcid_lock, NULL, RW_DEFAULT, NULL);
4673
4674 ipst->ips_src_generation = SRC_GENERATION_INITIAL;
4675
4676 ip_net_init(ipst, ns);
4677 ipv4_hook_init(ipst);
4678 ipv6_hook_init(ipst);
4679 arp_hook_init(ipst);
4680 ipmp_init(ipst);
4681 ipobs_init(ipst);
4682
4683 /*
4684 * Create the taskq dispatcher thread and initialize related stuff.
4685 */
4686 mutex_init(&ipst->ips_capab_taskq_lock, NULL, MUTEX_DEFAULT, NULL);
4687 cv_init(&ipst->ips_capab_taskq_cv, NULL, CV_DEFAULT, NULL);
4688 ipst->ips_capab_taskq_thread = thread_create(NULL, 0,
4689 ill_taskq_dispatch, ipst, 0, &p0, TS_RUN, minclsyspri);
4690
4691 major = mod_name_to_major(INET_NAME);
4692 (void) ldi_ident_from_major(major, &ipst->ips_ldi_ident);
4693 return (ipst);
4694 }
4695
4696 /*
4697 * Allocate and initialize a DLPI template of the specified length. (May be
4698 * called as writer.)
4699 */
4700 mblk_t *
4701 ip_dlpi_alloc(size_t len, t_uscalar_t prim)
4702 {
4703 mblk_t *mp;
4704
4705 mp = allocb(len, BPRI_MED);
4706 if (!mp)
4707 return (NULL);
4708
4709 /*
4710 * DLPIv2 says that DL_INFO_REQ and DL_TOKEN_REQ (the latter
4711 * of which we don't seem to use) are sent with M_PCPROTO, and
4712 * that other DLPI are M_PROTO.
4713 */
4714 if (prim == DL_INFO_REQ) {
4715 mp->b_datap->db_type = M_PCPROTO;
4716 } else {
4717 mp->b_datap->db_type = M_PROTO;
4718 }
4719
4720 mp->b_wptr = mp->b_rptr + len;
4721 bzero(mp->b_rptr, len);
4722 ((dl_unitdata_req_t *)mp->b_rptr)->dl_primitive = prim;
4723 return (mp);
4724 }
4725
4726 /*
4727 * Allocate and initialize a DLPI notification. (May be called as writer.)
4728 */
4729 mblk_t *
4730 ip_dlnotify_alloc(uint_t notification, uint_t data)
4731 {
4732 dl_notify_ind_t *notifyp;
4733 mblk_t *mp;
4734
4735 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4736 return (NULL);
4737
4738 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4739 notifyp->dl_notification = notification;
4740 notifyp->dl_data = data;
4741 return (mp);
4742 }
4743
4744 mblk_t *
4745 ip_dlnotify_alloc2(uint_t notification, uint_t data1, uint_t data2)
4746 {
4747 dl_notify_ind_t *notifyp;
4748 mblk_t *mp;
4749
4750 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL)
4751 return (NULL);
4752
4753 notifyp = (dl_notify_ind_t *)mp->b_rptr;
4754 notifyp->dl_notification = notification;
4755 notifyp->dl_data1 = data1;
4756 notifyp->dl_data2 = data2;
4757 return (mp);
4758 }
4759
4760 /*
4761 * Debug formatting routine. Returns a character string representation of the
4762 * addr in buf, of the form xxx.xxx.xxx.xxx. This routine takes the address
4763 * in the form of a ipaddr_t and calls ip_dot_saddr with a pointer.
4764 *
4765 * Once the ndd table-printing interfaces are removed, this can be changed to
4766 * standard dotted-decimal form.
4767 */
4768 char *
4769 ip_dot_addr(ipaddr_t addr, char *buf)
4770 {
4771 uint8_t *ap = (uint8_t *)&addr;
4772
4773 (void) mi_sprintf(buf, "%03d.%03d.%03d.%03d",
4774 ap[0] & 0xFF, ap[1] & 0xFF, ap[2] & 0xFF, ap[3] & 0xFF);
4775 return (buf);
4776 }
4777
4778 /*
4779 * Write the given MAC address as a printable string in the usual colon-
4780 * separated format.
4781 */
4782 const char *
4783 mac_colon_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
4784 {
4785 char *bp;
4786
4787 if (alen == 0 || buflen < 4)
4788 return ("?");
4789 bp = buf;
4790 for (;;) {
4791 /*
4792 * If there are more MAC address bytes available, but we won't
4793 * have any room to print them, then add "..." to the string
4794 * instead. See below for the 'magic number' explanation.
4795 */
4796 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
4797 (void) strcpy(bp, "...");
4798 break;
4799 }
4800 (void) sprintf(bp, "%02x", *addr++);
4801 bp += 2;
4802 if (--alen == 0)
4803 break;
4804 *bp++ = ':';
4805 buflen -= 3;
4806 /*
4807 * At this point, based on the first 'if' statement above,
4808 * either alen == 1 and buflen >= 3, or alen > 1 and
4809 * buflen >= 4. The first case leaves room for the final "xx"
4810 * number and trailing NUL byte. The second leaves room for at
4811 * least "...". Thus the apparently 'magic' numbers chosen for
4812 * that statement.
4813 */
4814 }
4815 return (buf);
4816 }
4817
4818 /*
4819 * Called when it is conceptually a ULP that would sent the packet
4820 * e.g., port unreachable and protocol unreachable. Check that the packet
4821 * would have passed the IPsec global policy before sending the error.
4822 *
4823 * Send an ICMP error after patching up the packet appropriately.
4824 * Uses ip_drop_input and bumps the appropriate MIB.
4825 */
4826 void
4827 ip_fanout_send_icmp_v4(mblk_t *mp, uint_t icmp_type, uint_t icmp_code,
4828 ip_recv_attr_t *ira)
4829 {
4830 ipha_t *ipha;
4831 boolean_t secure;
4832 ill_t *ill = ira->ira_ill;
4833 ip_stack_t *ipst = ill->ill_ipst;
4834 netstack_t *ns = ipst->ips_netstack;
4835 ipsec_stack_t *ipss = ns->netstack_ipsec;
4836
4837 secure = ira->ira_flags & IRAF_IPSEC_SECURE;
4838
4839 /*
4840 * We are generating an icmp error for some inbound packet.
4841 * Called from all ip_fanout_(udp, tcp, proto) functions.
4842 * Before we generate an error, check with global policy
4843 * to see whether this is allowed to enter the system. As
4844 * there is no "conn", we are checking with global policy.
4845 */
4846 ipha = (ipha_t *)mp->b_rptr;
4847 if (secure || ipss->ipsec_inbound_v4_policy_present) {
4848 mp = ipsec_check_global_policy(mp, NULL, ipha, NULL, ira, ns);
4849 if (mp == NULL)
4850 return;
4851 }
4852
4853 /* We never send errors for protocols that we do implement */
4854 if (ira->ira_protocol == IPPROTO_ICMP ||
4855 ira->ira_protocol == IPPROTO_IGMP) {
4856 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4857 ip_drop_input("ip_fanout_send_icmp_v4", mp, ill);
4858 freemsg(mp);
4859 return;
4860 }
4861 /*
4862 * Have to correct checksum since
4863 * the packet might have been
4864 * fragmented and the reassembly code in ip_rput
4865 * does not restore the IP checksum.
4866 */
4867 ipha->ipha_hdr_checksum = 0;
4868 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
4869
4870 switch (icmp_type) {
4871 case ICMP_DEST_UNREACHABLE:
4872 switch (icmp_code) {
4873 case ICMP_PROTOCOL_UNREACHABLE:
4874 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInUnknownProtos);
4875 ip_drop_input("ipIfStatsInUnknownProtos", mp, ill);
4876 break;
4877 case ICMP_PORT_UNREACHABLE:
4878 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
4879 ip_drop_input("ipIfStatsNoPorts", mp, ill);
4880 break;
4881 }
4882
4883 icmp_unreachable(mp, icmp_code, ira);
4884 break;
4885 default:
4886 #ifdef DEBUG
4887 panic("ip_fanout_send_icmp_v4: wrong type");
4888 /*NOTREACHED*/
4889 #else
4890 freemsg(mp);
4891 break;
4892 #endif
4893 }
4894 }
4895
4896 /*
4897 * Used to send an ICMP error message when a packet is received for
4898 * a protocol that is not supported. The mblk passed as argument
4899 * is consumed by this function.
4900 */
4901 void
4902 ip_proto_not_sup(mblk_t *mp, ip_recv_attr_t *ira)
4903 {
4904 ipha_t *ipha;
4905
4906 ipha = (ipha_t *)mp->b_rptr;
4907 if (ira->ira_flags & IRAF_IS_IPV4) {
4908 ASSERT(IPH_HDR_VERSION(ipha) == IP_VERSION);
4909 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
4910 ICMP_PROTOCOL_UNREACHABLE, ira);
4911 } else {
4912 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION);
4913 ip_fanout_send_icmp_v6(mp, ICMP6_PARAM_PROB,
4914 ICMP6_PARAMPROB_NEXTHEADER, ira);
4915 }
4916 }
4917
4918 /*
4919 * Deliver a rawip packet to the given conn, possibly applying ipsec policy.
4920 * Handles IPv4 and IPv6.
4921 * We are responsible for disposing of mp, such as by freemsg() or putnext()
4922 * Caller is responsible for dropping references to the conn.
4923 */
4924 void
4925 ip_fanout_proto_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4926 ip_recv_attr_t *ira)
4927 {
4928 ill_t *ill = ira->ira_ill;
4929 ip_stack_t *ipst = ill->ill_ipst;
4930 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
4931 boolean_t secure;
4932 uint_t protocol = ira->ira_protocol;
4933 iaflags_t iraflags = ira->ira_flags;
4934 queue_t *rq;
4935
4936 secure = iraflags & IRAF_IPSEC_SECURE;
4937
4938 rq = connp->conn_rq;
4939 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
4940 switch (protocol) {
4941 case IPPROTO_ICMPV6:
4942 BUMP_MIB(ill->ill_icmp6_mib, ipv6IfIcmpInOverflows);
4943 break;
4944 case IPPROTO_ICMP:
4945 BUMP_MIB(&ipst->ips_icmp_mib, icmpInOverflows);
4946 break;
4947 default:
4948 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
4949 break;
4950 }
4951 freemsg(mp);
4952 return;
4953 }
4954
4955 ASSERT(!(IPCL_IS_IPTUN(connp)));
4956
4957 if (((iraflags & IRAF_IS_IPV4) ?
4958 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
4959 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
4960 secure) {
4961 mp = ipsec_check_inbound_policy(mp, connp, ipha,
4962 ip6h, ira);
4963 if (mp == NULL) {
4964 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
4965 /* Note that mp is NULL */
4966 ip_drop_input("ipIfStatsInDiscards", mp, ill);
4967 return;
4968 }
4969 }
4970
4971 if (iraflags & IRAF_ICMP_ERROR) {
4972 (connp->conn_recvicmp)(connp, mp, NULL, ira);
4973 } else {
4974 ill_t *rill = ira->ira_rill;
4975
4976 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
4977 ira->ira_ill = ira->ira_rill = NULL;
4978 /* Send it upstream */
4979 (connp->conn_recv)(connp, mp, NULL, ira);
4980 ira->ira_ill = ill;
4981 ira->ira_rill = rill;
4982 }
4983 }
4984
4985 /*
4986 * Handle protocols with which IP is less intimate. There
4987 * can be more than one stream bound to a particular
4988 * protocol. When this is the case, normally each one gets a copy
4989 * of any incoming packets.
4990 *
4991 * IPsec NOTE :
4992 *
4993 * Don't allow a secure packet going up a non-secure connection.
4994 * We don't allow this because
4995 *
4996 * 1) Reply might go out in clear which will be dropped at
4997 * the sending side.
4998 * 2) If the reply goes out in clear it will give the
4999 * adversary enough information for getting the key in
5000 * most of the cases.
5001 *
5002 * Moreover getting a secure packet when we expect clear
5003 * implies that SA's were added without checking for
5004 * policy on both ends. This should not happen once ISAKMP
5005 * is used to negotiate SAs as SAs will be added only after
5006 * verifying the policy.
5007 *
5008 * Zones notes:
5009 * Earlier in ip_input on a system with multiple shared-IP zones we
5010 * duplicate the multicast and broadcast packets and send them up
5011 * with each explicit zoneid that exists on that ill.
5012 * This means that here we can match the zoneid with SO_ALLZONES being special.
5013 */
5014 void
5015 ip_fanout_proto_v4(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
5016 {
5017 mblk_t *mp1;
5018 ipaddr_t laddr;
5019 conn_t *connp, *first_connp, *next_connp;
5020 connf_t *connfp;
5021 ill_t *ill = ira->ira_ill;
5022 ip_stack_t *ipst = ill->ill_ipst;
5023
5024 laddr = ipha->ipha_dst;
5025
5026 connfp = &ipst->ips_ipcl_proto_fanout_v4[ira->ira_protocol];
5027 mutex_enter(&connfp->connf_lock);
5028 connp = connfp->connf_head;
5029 for (connp = connfp->connf_head; connp != NULL;
5030 connp = connp->conn_next) {
5031 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
5032 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
5033 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5034 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) {
5035 break;
5036 }
5037 }
5038
5039 if (connp == NULL) {
5040 /*
5041 * No one bound to these addresses. Is
5042 * there a client that wants all
5043 * unclaimed datagrams?
5044 */
5045 mutex_exit(&connfp->connf_lock);
5046 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE,
5047 ICMP_PROTOCOL_UNREACHABLE, ira);
5048 return;
5049 }
5050
5051 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5052
5053 CONN_INC_REF(connp);
5054 first_connp = connp;
5055 connp = connp->conn_next;
5056
5057 for (;;) {
5058 while (connp != NULL) {
5059 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */
5060 if (IPCL_PROTO_MATCH(connp, ira, ipha) &&
5061 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5062 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5063 ira, connp)))
5064 break;
5065 connp = connp->conn_next;
5066 }
5067
5068 if (connp == NULL) {
5069 /* No more interested clients */
5070 connp = first_connp;
5071 break;
5072 }
5073 if (((mp1 = dupmsg(mp)) == NULL) &&
5074 ((mp1 = copymsg(mp)) == NULL)) {
5075 /* Memory allocation failed */
5076 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5077 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5078 connp = first_connp;
5079 break;
5080 }
5081
5082 CONN_INC_REF(connp);
5083 mutex_exit(&connfp->connf_lock);
5084
5085 ip_fanout_proto_conn(connp, mp1, (ipha_t *)mp1->b_rptr, NULL,
5086 ira);
5087
5088 mutex_enter(&connfp->connf_lock);
5089 /* Follow the next pointer before releasing the conn. */
5090 next_connp = connp->conn_next;
5091 CONN_DEC_REF(connp);
5092 connp = next_connp;
5093 }
5094
5095 /* Last one. Send it upstream. */
5096 mutex_exit(&connfp->connf_lock);
5097
5098 ip_fanout_proto_conn(connp, mp, ipha, NULL, ira);
5099
5100 CONN_DEC_REF(connp);
5101 }
5102
5103 /*
5104 * If we have a IPsec NAT-Traversal packet, strip the zero-SPI or
5105 * pass it along to ESP if the SPI is non-zero. Returns the mblk if the mblk
5106 * is not consumed.
5107 *
5108 * One of three things can happen, all of which affect the passed-in mblk:
5109 *
5110 * 1.) The packet is stock UDP and gets its zero-SPI stripped. Return mblk..
5111 *
5112 * 2.) The packet is ESP-in-UDP, gets transformed into an equivalent
5113 * ESP packet, and is passed along to ESP for consumption. Return NULL.
5114 *
5115 * 3.) The packet is an ESP-in-UDP Keepalive. Drop it and return NULL.
5116 */
5117 mblk_t *
5118 zero_spi_check(mblk_t *mp, ip_recv_attr_t *ira)
5119 {
5120 int shift, plen, iph_len;
5121 ipha_t *ipha;
5122 udpha_t *udpha;
5123 uint32_t *spi;
5124 uint32_t esp_ports;
5125 uint8_t *orptr;
5126 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
5127 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5128
5129 ipha = (ipha_t *)mp->b_rptr;
5130 iph_len = ira->ira_ip_hdr_length;
5131 plen = ira->ira_pktlen;
5132
5133 if (plen - iph_len - sizeof (udpha_t) < sizeof (uint32_t)) {
5134 /*
5135 * Most likely a keepalive for the benefit of an intervening
5136 * NAT. These aren't for us, per se, so drop it.
5137 *
5138 * RFC 3947/8 doesn't say for sure what to do for 2-3
5139 * byte packets (keepalives are 1-byte), but we'll drop them
5140 * also.
5141 */
5142 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5143 DROPPER(ipss, ipds_esp_nat_t_ka), &ipss->ipsec_dropper);
5144 return (NULL);
5145 }
5146
5147 if (MBLKL(mp) < iph_len + sizeof (udpha_t) + sizeof (*spi)) {
5148 /* might as well pull it all up - it might be ESP. */
5149 if (!pullupmsg(mp, -1)) {
5150 ip_drop_packet(mp, B_TRUE, ira->ira_ill,
5151 DROPPER(ipss, ipds_esp_nomem),
5152 &ipss->ipsec_dropper);
5153 return (NULL);
5154 }
5155
5156 ipha = (ipha_t *)mp->b_rptr;
5157 }
5158 spi = (uint32_t *)(mp->b_rptr + iph_len + sizeof (udpha_t));
5159 if (*spi == 0) {
5160 /* UDP packet - remove 0-spi. */
5161 shift = sizeof (uint32_t);
5162 } else {
5163 /* ESP-in-UDP packet - reduce to ESP. */
5164 ipha->ipha_protocol = IPPROTO_ESP;
5165 shift = sizeof (udpha_t);
5166 }
5167
5168 /* Fix IP header */
5169 ira->ira_pktlen = (plen - shift);
5170 ipha->ipha_length = htons(ira->ira_pktlen);
5171 ipha->ipha_hdr_checksum = 0;
5172
5173 orptr = mp->b_rptr;
5174 mp->b_rptr += shift;
5175
5176 udpha = (udpha_t *)(orptr + iph_len);
5177 if (*spi == 0) {
5178 ASSERT((uint8_t *)ipha == orptr);
5179 udpha->uha_length = htons(plen - shift - iph_len);
5180 iph_len += sizeof (udpha_t); /* For the call to ovbcopy(). */
5181 esp_ports = 0;
5182 } else {
5183 esp_ports = *((uint32_t *)udpha);
5184 ASSERT(esp_ports != 0);
5185 }
5186 ovbcopy(orptr, orptr + shift, iph_len);
5187 if (esp_ports != 0) /* Punt up for ESP processing. */ {
5188 ipha = (ipha_t *)(orptr + shift);
5189
5190 ira->ira_flags |= IRAF_ESP_UDP_PORTS;
5191 ira->ira_esp_udp_ports = esp_ports;
5192 ip_fanout_v4(mp, ipha, ira);
5193 return (NULL);
5194 }
5195 return (mp);
5196 }
5197
5198 /*
5199 * Deliver a udp packet to the given conn, possibly applying ipsec policy.
5200 * Handles IPv4 and IPv6.
5201 * We are responsible for disposing of mp, such as by freemsg() or putnext()
5202 * Caller is responsible for dropping references to the conn.
5203 */
5204 void
5205 ip_fanout_udp_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
5206 ip_recv_attr_t *ira)
5207 {
5208 ill_t *ill = ira->ira_ill;
5209 ip_stack_t *ipst = ill->ill_ipst;
5210 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5211 boolean_t secure;
5212 iaflags_t iraflags = ira->ira_flags;
5213
5214 secure = iraflags & IRAF_IPSEC_SECURE;
5215
5216 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld :
5217 !canputnext(connp->conn_rq)) {
5218 BUMP_MIB(ill->ill_ip_mib, udpIfStatsInOverflows);
5219 freemsg(mp);
5220 return;
5221 }
5222
5223 if (((iraflags & IRAF_IS_IPV4) ?
5224 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
5225 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
5226 secure) {
5227 mp = ipsec_check_inbound_policy(mp, connp, ipha,
5228 ip6h, ira);
5229 if (mp == NULL) {
5230 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5231 /* Note that mp is NULL */
5232 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5233 return;
5234 }
5235 }
5236
5237 /*
5238 * Since this code is not used for UDP unicast we don't need a NAT_T
5239 * check. Only ip_fanout_v4 has that check.
5240 */
5241 if (ira->ira_flags & IRAF_ICMP_ERROR) {
5242 (connp->conn_recvicmp)(connp, mp, NULL, ira);
5243 } else {
5244 ill_t *rill = ira->ira_rill;
5245
5246 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
5247 ira->ira_ill = ira->ira_rill = NULL;
5248 /* Send it upstream */
5249 (connp->conn_recv)(connp, mp, NULL, ira);
5250 ira->ira_ill = ill;
5251 ira->ira_rill = rill;
5252 }
5253 }
5254
5255 /*
5256 * Fanout for UDP packets that are multicast or broadcast, and ICMP errors.
5257 * (Unicast fanout is handled in ip_input_v4.)
5258 *
5259 * If SO_REUSEADDR is set all multicast and broadcast packets
5260 * will be delivered to all conns bound to the same port.
5261 *
5262 * If there is at least one matching AF_INET receiver, then we will
5263 * ignore any AF_INET6 receivers.
5264 * In the special case where an AF_INET socket binds to 0.0.0.0/<port> and an
5265 * AF_INET6 socket binds to ::/<port>, only the AF_INET socket receives the IPv4
5266 * packets.
5267 *
5268 * Zones notes:
5269 * Earlier in ip_input on a system with multiple shared-IP zones we
5270 * duplicate the multicast and broadcast packets and send them up
5271 * with each explicit zoneid that exists on that ill.
5272 * This means that here we can match the zoneid with SO_ALLZONES being special.
5273 */
5274 void
5275 ip_fanout_udp_multi_v4(mblk_t *mp, ipha_t *ipha, uint16_t lport, uint16_t fport,
5276 ip_recv_attr_t *ira)
5277 {
5278 ipaddr_t laddr;
5279 in6_addr_t v6faddr;
5280 conn_t *connp;
5281 connf_t *connfp;
5282 ipaddr_t faddr;
5283 ill_t *ill = ira->ira_ill;
5284 ip_stack_t *ipst = ill->ill_ipst;
5285
5286 ASSERT(ira->ira_flags & (IRAF_MULTIBROADCAST|IRAF_ICMP_ERROR));
5287
5288 laddr = ipha->ipha_dst;
5289 faddr = ipha->ipha_src;
5290
5291 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5292 mutex_enter(&connfp->connf_lock);
5293 connp = connfp->connf_head;
5294
5295 /*
5296 * If SO_REUSEADDR has been set on the first we send the
5297 * packet to all clients that have joined the group and
5298 * match the port.
5299 */
5300 while (connp != NULL) {
5301 if ((IPCL_UDP_MATCH(connp, lport, laddr, fport, faddr)) &&
5302 conn_wantpacket(connp, ira, ipha) &&
5303 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5304 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5305 break;
5306 connp = connp->conn_next;
5307 }
5308
5309 if (connp == NULL)
5310 goto notfound;
5311
5312 CONN_INC_REF(connp);
5313
5314 if (connp->conn_reuseaddr) {
5315 conn_t *first_connp = connp;
5316 conn_t *next_connp;
5317 mblk_t *mp1;
5318
5319 connp = connp->conn_next;
5320 for (;;) {
5321 while (connp != NULL) {
5322 if (IPCL_UDP_MATCH(connp, lport, laddr,
5323 fport, faddr) &&
5324 conn_wantpacket(connp, ira, ipha) &&
5325 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5326 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5327 ira, connp)))
5328 break;
5329 connp = connp->conn_next;
5330 }
5331 if (connp == NULL) {
5332 /* No more interested clients */
5333 connp = first_connp;
5334 break;
5335 }
5336 if (((mp1 = dupmsg(mp)) == NULL) &&
5337 ((mp1 = copymsg(mp)) == NULL)) {
5338 /* Memory allocation failed */
5339 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5340 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5341 connp = first_connp;
5342 break;
5343 }
5344 CONN_INC_REF(connp);
5345 mutex_exit(&connfp->connf_lock);
5346
5347 IP_STAT(ipst, ip_udp_fanmb);
5348 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5349 NULL, ira);
5350 mutex_enter(&connfp->connf_lock);
5351 /* Follow the next pointer before releasing the conn */
5352 next_connp = connp->conn_next;
5353 CONN_DEC_REF(connp);
5354 connp = next_connp;
5355 }
5356 }
5357
5358 /* Last one. Send it upstream. */
5359 mutex_exit(&connfp->connf_lock);
5360 IP_STAT(ipst, ip_udp_fanmb);
5361 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5362 CONN_DEC_REF(connp);
5363 return;
5364
5365 notfound:
5366 mutex_exit(&connfp->connf_lock);
5367 /*
5368 * IPv6 endpoints bound to multicast IPv4-mapped addresses
5369 * have already been matched above, since they live in the IPv4
5370 * fanout tables. This implies we only need to
5371 * check for IPv6 in6addr_any endpoints here.
5372 * Thus we compare using ipv6_all_zeros instead of the destination
5373 * address, except for the multicast group membership lookup which
5374 * uses the IPv4 destination.
5375 */
5376 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &v6faddr);
5377 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)];
5378 mutex_enter(&connfp->connf_lock);
5379 connp = connfp->connf_head;
5380 /*
5381 * IPv4 multicast packet being delivered to an AF_INET6
5382 * in6addr_any endpoint.
5383 * Need to check conn_wantpacket(). Note that we use conn_wantpacket()
5384 * and not conn_wantpacket_v6() since any multicast membership is
5385 * for an IPv4-mapped multicast address.
5386 */
5387 while (connp != NULL) {
5388 if (IPCL_UDP_MATCH_V6(connp, lport, ipv6_all_zeros,
5389 fport, v6faddr) &&
5390 conn_wantpacket(connp, ira, ipha) &&
5391 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5392 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp)))
5393 break;
5394 connp = connp->conn_next;
5395 }
5396
5397 if (connp == NULL) {
5398 /*
5399 * No one bound to this port. Is
5400 * there a client that wants all
5401 * unclaimed datagrams?
5402 */
5403 mutex_exit(&connfp->connf_lock);
5404
5405 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_UDP].connf_head !=
5406 NULL) {
5407 ASSERT(ira->ira_protocol == IPPROTO_UDP);
5408 ip_fanout_proto_v4(mp, ipha, ira);
5409 } else {
5410 /*
5411 * We used to attempt to send an icmp error here, but
5412 * since this is known to be a multicast packet
5413 * and we don't send icmp errors in response to
5414 * multicast, just drop the packet and give up sooner.
5415 */
5416 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts);
5417 freemsg(mp);
5418 }
5419 return;
5420 }
5421 CONN_INC_REF(connp);
5422 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL);
5423
5424 /*
5425 * If SO_REUSEADDR has been set on the first we send the
5426 * packet to all clients that have joined the group and
5427 * match the port.
5428 */
5429 if (connp->conn_reuseaddr) {
5430 conn_t *first_connp = connp;
5431 conn_t *next_connp;
5432 mblk_t *mp1;
5433
5434 connp = connp->conn_next;
5435 for (;;) {
5436 while (connp != NULL) {
5437 if (IPCL_UDP_MATCH_V6(connp, lport,
5438 ipv6_all_zeros, fport, v6faddr) &&
5439 conn_wantpacket(connp, ira, ipha) &&
5440 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) ||
5441 tsol_receive_local(mp, &laddr, IPV4_VERSION,
5442 ira, connp)))
5443 break;
5444 connp = connp->conn_next;
5445 }
5446 if (connp == NULL) {
5447 /* No more interested clients */
5448 connp = first_connp;
5449 break;
5450 }
5451 if (((mp1 = dupmsg(mp)) == NULL) &&
5452 ((mp1 = copymsg(mp)) == NULL)) {
5453 /* Memory allocation failed */
5454 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
5455 ip_drop_input("ipIfStatsInDiscards", mp, ill);
5456 connp = first_connp;
5457 break;
5458 }
5459 CONN_INC_REF(connp);
5460 mutex_exit(&connfp->connf_lock);
5461
5462 IP_STAT(ipst, ip_udp_fanmb);
5463 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr,
5464 NULL, ira);
5465 mutex_enter(&connfp->connf_lock);
5466 /* Follow the next pointer before releasing the conn */
5467 next_connp = connp->conn_next;
5468 CONN_DEC_REF(connp);
5469 connp = next_connp;
5470 }
5471 }
5472
5473 /* Last one. Send it upstream. */
5474 mutex_exit(&connfp->connf_lock);
5475 IP_STAT(ipst, ip_udp_fanmb);
5476 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira);
5477 CONN_DEC_REF(connp);
5478 }
5479
5480 /*
5481 * Split an incoming packet's IPv4 options into the label and the other options.
5482 * If 'allocate' is set it does memory allocation for the ip_pkt_t, including
5483 * clearing out any leftover label or options.
5484 * Otherwise it just makes ipp point into the packet.
5485 *
5486 * Returns zero if ok; ENOMEM if the buffer couldn't be allocated.
5487 */
5488 int
5489 ip_find_hdr_v4(ipha_t *ipha, ip_pkt_t *ipp, boolean_t allocate)
5490 {
5491 uchar_t *opt;
5492 uint32_t totallen;
5493 uint32_t optval;
5494 uint32_t optlen;
5495
5496 ipp->ipp_fields |= IPPF_HOPLIMIT | IPPF_TCLASS | IPPF_ADDR;
5497 ipp->ipp_hoplimit = ipha->ipha_ttl;
5498 ipp->ipp_type_of_service = ipha->ipha_type_of_service;
5499 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &ipp->ipp_addr);
5500
5501 /*
5502 * Get length (in 4 byte octets) of IP header options.
5503 */
5504 totallen = ipha->ipha_version_and_hdr_length -
5505 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5506
5507 if (totallen == 0) {
5508 if (!allocate)
5509 return (0);
5510
5511 /* Clear out anything from a previous packet */
5512 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5513 kmem_free(ipp->ipp_ipv4_options,
5514 ipp->ipp_ipv4_options_len);
5515 ipp->ipp_ipv4_options = NULL;
5516 ipp->ipp_ipv4_options_len = 0;
5517 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5518 }
5519 if (ipp->ipp_fields & IPPF_LABEL_V4) {
5520 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5521 ipp->ipp_label_v4 = NULL;
5522 ipp->ipp_label_len_v4 = 0;
5523 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5524 }
5525 return (0);
5526 }
5527
5528 totallen <<= 2;
5529 opt = (uchar_t *)&ipha[1];
5530 if (!is_system_labeled()) {
5531
5532 copyall:
5533 if (!allocate) {
5534 if (totallen != 0) {
5535 ipp->ipp_ipv4_options = opt;
5536 ipp->ipp_ipv4_options_len = totallen;
5537 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5538 }
5539 return (0);
5540 }
5541 /* Just copy all of options */
5542 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
5543 if (totallen == ipp->ipp_ipv4_options_len) {
5544 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5545 return (0);
5546 }
5547 kmem_free(ipp->ipp_ipv4_options,
5548 ipp->ipp_ipv4_options_len);
5549 ipp->ipp_ipv4_options = NULL;
5550 ipp->ipp_ipv4_options_len = 0;
5551 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS;
5552 }
5553 if (totallen == 0)
5554 return (0);
5555
5556 ipp->ipp_ipv4_options = kmem_alloc(totallen, KM_NOSLEEP);
5557 if (ipp->ipp_ipv4_options == NULL)
5558 return (ENOMEM);
5559 ipp->ipp_ipv4_options_len = totallen;
5560 ipp->ipp_fields |= IPPF_IPV4_OPTIONS;
5561 bcopy(opt, ipp->ipp_ipv4_options, totallen);
5562 return (0);
5563 }
5564
5565 if (allocate && (ipp->ipp_fields & IPPF_LABEL_V4)) {
5566 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
5567 ipp->ipp_label_v4 = NULL;
5568 ipp->ipp_label_len_v4 = 0;
5569 ipp->ipp_fields &= ~IPPF_LABEL_V4;
5570 }
5571
5572 /*
5573 * Search for CIPSO option.
5574 * We assume CIPSO is first in options if it is present.
5575 * If it isn't, then ipp_opt_ipv4_options will not include the options
5576 * prior to the CIPSO option.
5577 */
5578 while (totallen != 0) {
5579 switch (optval = opt[IPOPT_OPTVAL]) {
5580 case IPOPT_EOL:
5581 return (0);
5582 case IPOPT_NOP:
5583 optlen = 1;
5584 break;
5585 default:
5586 if (totallen <= IPOPT_OLEN)
5587 return (EINVAL);
5588 optlen = opt[IPOPT_OLEN];
5589 if (optlen < 2)
5590 return (EINVAL);
5591 }
5592 if (optlen > totallen)
5593 return (EINVAL);
5594
5595 switch (optval) {
5596 case IPOPT_COMSEC:
5597 if (!allocate) {
5598 ipp->ipp_label_v4 = opt;
5599 ipp->ipp_label_len_v4 = optlen;
5600 ipp->ipp_fields |= IPPF_LABEL_V4;
5601 } else {
5602 ipp->ipp_label_v4 = kmem_alloc(optlen,
5603 KM_NOSLEEP);
5604 if (ipp->ipp_label_v4 == NULL)
5605 return (ENOMEM);
5606 ipp->ipp_label_len_v4 = optlen;
5607 ipp->ipp_fields |= IPPF_LABEL_V4;
5608 bcopy(opt, ipp->ipp_label_v4, optlen);
5609 }
5610 totallen -= optlen;
5611 opt += optlen;
5612
5613 /* Skip padding bytes until we get to a multiple of 4 */
5614 while ((totallen & 3) != 0 && opt[0] == IPOPT_NOP) {
5615 totallen--;
5616 opt++;
5617 }
5618 /* Remaining as ipp_ipv4_options */
5619 goto copyall;
5620 }
5621 totallen -= optlen;
5622 opt += optlen;
5623 }
5624 /* No CIPSO found; return everything as ipp_ipv4_options */
5625 totallen = ipha->ipha_version_and_hdr_length -
5626 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS);
5627 totallen <<= 2;
5628 opt = (uchar_t *)&ipha[1];
5629 goto copyall;
5630 }
5631
5632 /*
5633 * Efficient versions of lookup for an IRE when we only
5634 * match the address.
5635 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5636 * Does not handle multicast addresses.
5637 */
5638 uint_t
5639 ip_type_v4(ipaddr_t addr, ip_stack_t *ipst)
5640 {
5641 ire_t *ire;
5642 uint_t result;
5643
5644 ire = ire_ftable_lookup_simple_v4(addr, 0, ipst, NULL);
5645 ASSERT(ire != NULL);
5646 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5647 result = IRE_NOROUTE;
5648 else
5649 result = ire->ire_type;
5650 ire_refrele(ire);
5651 return (result);
5652 }
5653
5654 /*
5655 * Efficient versions of lookup for an IRE when we only
5656 * match the address.
5657 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE.
5658 * Does not handle multicast addresses.
5659 */
5660 uint_t
5661 ip_type_v6(const in6_addr_t *addr, ip_stack_t *ipst)
5662 {
5663 ire_t *ire;
5664 uint_t result;
5665
5666 ire = ire_ftable_lookup_simple_v6(addr, 0, ipst, NULL);
5667 ASSERT(ire != NULL);
5668 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))
5669 result = IRE_NOROUTE;
5670 else
5671 result = ire->ire_type;
5672 ire_refrele(ire);
5673 return (result);
5674 }
5675
5676 /*
5677 * Nobody should be sending
5678 * packets up this stream
5679 */
5680 static void
5681 ip_lrput(queue_t *q, mblk_t *mp)
5682 {
5683 switch (mp->b_datap->db_type) {
5684 case M_FLUSH:
5685 /* Turn around */
5686 if (*mp->b_rptr & FLUSHW) {
5687 *mp->b_rptr &= ~FLUSHR;
5688 qreply(q, mp);
5689 return;
5690 }
5691 break;
5692 }
5693 freemsg(mp);
5694 }
5695
5696 /* Nobody should be sending packets down this stream */
5697 /* ARGSUSED */
5698 void
5699 ip_lwput(queue_t *q, mblk_t *mp)
5700 {
5701 freemsg(mp);
5702 }
5703
5704 /*
5705 * Move the first hop in any source route to ipha_dst and remove that part of
5706 * the source route. Called by other protocols. Errors in option formatting
5707 * are ignored - will be handled by ip_output_options. Return the final
5708 * destination (either ipha_dst or the last entry in a source route.)
5709 */
5710 ipaddr_t
5711 ip_massage_options(ipha_t *ipha, netstack_t *ns)
5712 {
5713 ipoptp_t opts;
5714 uchar_t *opt;
5715 uint8_t optval;
5716 uint8_t optlen;
5717 ipaddr_t dst;
5718 int i;
5719 ip_stack_t *ipst = ns->netstack_ip;
5720
5721 ip2dbg(("ip_massage_options\n"));
5722 dst = ipha->ipha_dst;
5723 for (optval = ipoptp_first(&opts, ipha);
5724 optval != IPOPT_EOL;
5725 optval = ipoptp_next(&opts)) {
5726 opt = opts.ipoptp_cur;
5727 switch (optval) {
5728 uint8_t off;
5729 case IPOPT_SSRR:
5730 case IPOPT_LSRR:
5731 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
5732 ip1dbg(("ip_massage_options: bad src route\n"));
5733 break;
5734 }
5735 optlen = opts.ipoptp_len;
5736 off = opt[IPOPT_OFFSET];
5737 off--;
5738 redo_srr:
5739 if (optlen < IP_ADDR_LEN ||
5740 off > optlen - IP_ADDR_LEN) {
5741 /* End of source route */
5742 ip1dbg(("ip_massage_options: end of SR\n"));
5743 break;
5744 }
5745 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
5746 ip1dbg(("ip_massage_options: next hop 0x%x\n",
5747 ntohl(dst)));
5748 /*
5749 * Check if our address is present more than
5750 * once as consecutive hops in source route.
5751 * XXX verify per-interface ip_forwarding
5752 * for source route?
5753 */
5754 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
5755 off += IP_ADDR_LEN;
5756 goto redo_srr;
5757 }
5758 if (dst == htonl(INADDR_LOOPBACK)) {
5759 ip1dbg(("ip_massage_options: loopback addr in "
5760 "source route!\n"));
5761 break;
5762 }
5763 /*
5764 * Update ipha_dst to be the first hop and remove the
5765 * first hop from the source route (by overwriting
5766 * part of the option with NOP options).
5767 */
5768 ipha->ipha_dst = dst;
5769 /* Put the last entry in dst */
5770 off = ((optlen - IP_ADDR_LEN - 3) & ~(IP_ADDR_LEN-1)) +
5771 3;
5772 bcopy(&opt[off], &dst, IP_ADDR_LEN);
5773
5774 ip1dbg(("ip_massage_options: last hop 0x%x\n",
5775 ntohl(dst)));
5776 /* Move down and overwrite */
5777 opt[IP_ADDR_LEN] = opt[0];
5778 opt[IP_ADDR_LEN+1] = opt[IPOPT_OLEN] - IP_ADDR_LEN;
5779 opt[IP_ADDR_LEN+2] = opt[IPOPT_OFFSET];
5780 for (i = 0; i < IP_ADDR_LEN; i++)
5781 opt[i] = IPOPT_NOP;
5782 break;
5783 }
5784 }
5785 return (dst);
5786 }
5787
5788 /*
5789 * Return the network mask
5790 * associated with the specified address.
5791 */
5792 ipaddr_t
5793 ip_net_mask(ipaddr_t addr)
5794 {
5795 uchar_t *up = (uchar_t *)&addr;
5796 ipaddr_t mask = 0;
5797 uchar_t *maskp = (uchar_t *)&mask;
5798
5799 #if defined(__i386) || defined(__amd64)
5800 #define TOTALLY_BRAIN_DAMAGED_C_COMPILER
5801 #endif
5802 #ifdef TOTALLY_BRAIN_DAMAGED_C_COMPILER
5803 maskp[0] = maskp[1] = maskp[2] = maskp[3] = 0;
5804 #endif
5805 if (CLASSD(addr)) {
5806 maskp[0] = 0xF0;
5807 return (mask);
5808 }
5809
5810 /* We assume Class E default netmask to be 32 */
5811 if (CLASSE(addr))
5812 return (0xffffffffU);
5813
5814 if (addr == 0)
5815 return (0);
5816 maskp[0] = 0xFF;
5817 if ((up[0] & 0x80) == 0)
5818 return (mask);
5819
5820 maskp[1] = 0xFF;
5821 if ((up[0] & 0xC0) == 0x80)
5822 return (mask);
5823
5824 maskp[2] = 0xFF;
5825 if ((up[0] & 0xE0) == 0xC0)
5826 return (mask);
5827
5828 /* Otherwise return no mask */
5829 return ((ipaddr_t)0);
5830 }
5831
5832 /* Name/Value Table Lookup Routine */
5833 char *
5834 ip_nv_lookup(nv_t *nv, int value)
5835 {
5836 if (!nv)
5837 return (NULL);
5838 for (; nv->nv_name; nv++) {
5839 if (nv->nv_value == value)
5840 return (nv->nv_name);
5841 }
5842 return ("unknown");
5843 }
5844
5845 static int
5846 ip_wait_for_info_ack(ill_t *ill)
5847 {
5848 int err;
5849
5850 mutex_enter(&ill->ill_lock);
5851 while (ill->ill_state_flags & ILL_LL_SUBNET_PENDING) {
5852 /*
5853 * Return value of 0 indicates a pending signal.
5854 */
5855 err = cv_wait_sig(&ill->ill_cv, &ill->ill_lock);
5856 if (err == 0) {
5857 mutex_exit(&ill->ill_lock);
5858 return (EINTR);
5859 }
5860 }
5861 mutex_exit(&ill->ill_lock);
5862 /*
5863 * ip_rput_other could have set an error in ill_error on
5864 * receipt of M_ERROR.
5865 */
5866 return (ill->ill_error);
5867 }
5868
5869 /*
5870 * This is a module open, i.e. this is a control stream for access
5871 * to a DLPI device. We allocate an ill_t as the instance data in
5872 * this case.
5873 */
5874 static int
5875 ip_modopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5876 {
5877 ill_t *ill;
5878 int err;
5879 zoneid_t zoneid;
5880 netstack_t *ns;
5881 ip_stack_t *ipst;
5882
5883 /*
5884 * Prevent unprivileged processes from pushing IP so that
5885 * they can't send raw IP.
5886 */
5887 if (secpolicy_net_rawaccess(credp) != 0)
5888 return (EPERM);
5889
5890 ns = netstack_find_by_cred(credp);
5891 ASSERT(ns != NULL);
5892 ipst = ns->netstack_ip;
5893 ASSERT(ipst != NULL);
5894
5895 /*
5896 * For exclusive stacks we set the zoneid to zero
5897 * to make IP operate as if in the global zone.
5898 */
5899 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
5900 zoneid = GLOBAL_ZONEID;
5901 else
5902 zoneid = crgetzoneid(credp);
5903
5904 ill = (ill_t *)mi_open_alloc_sleep(sizeof (ill_t));
5905 q->q_ptr = WR(q)->q_ptr = ill;
5906 ill->ill_ipst = ipst;
5907 ill->ill_zoneid = zoneid;
5908
5909 /*
5910 * ill_init initializes the ill fields and then sends down
5911 * down a DL_INFO_REQ after calling qprocson.
5912 */
5913 err = ill_init(q, ill);
5914
5915 if (err != 0) {
5916 mi_free(ill);
5917 netstack_rele(ipst->ips_netstack);
5918 q->q_ptr = NULL;
5919 WR(q)->q_ptr = NULL;
5920 return (err);
5921 }
5922
5923 /*
5924 * Wait for the DL_INFO_ACK if a DL_INFO_REQ was sent.
5925 *
5926 * ill_init initializes the ipsq marking this thread as
5927 * writer
5928 */
5929 ipsq_exit(ill->ill_phyint->phyint_ipsq);
5930 err = ip_wait_for_info_ack(ill);
5931 if (err == 0)
5932 ill->ill_credp = credp;
5933 else
5934 goto fail;
5935
5936 crhold(credp);
5937
5938 mutex_enter(&ipst->ips_ip_mi_lock);
5939 err = mi_open_link(&ipst->ips_ip_g_head, (IDP)q->q_ptr, devp, flag,
5940 sflag, credp);
5941 mutex_exit(&ipst->ips_ip_mi_lock);
5942 fail:
5943 if (err) {
5944 (void) ip_close(q, 0);
5945 return (err);
5946 }
5947 return (0);
5948 }
5949
5950 /* For /dev/ip aka AF_INET open */
5951 int
5952 ip_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5953 {
5954 return (ip_open(q, devp, flag, sflag, credp, B_FALSE));
5955 }
5956
5957 /* For /dev/ip6 aka AF_INET6 open */
5958 int
5959 ip_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
5960 {
5961 return (ip_open(q, devp, flag, sflag, credp, B_TRUE));
5962 }
5963
5964 /* IP open routine. */
5965 int
5966 ip_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
5967 boolean_t isv6)
5968 {
5969 conn_t *connp;
5970 major_t maj;
5971 zoneid_t zoneid;
5972 netstack_t *ns;
5973 ip_stack_t *ipst;
5974
5975 /* Allow reopen. */
5976 if (q->q_ptr != NULL)
5977 return (0);
5978
5979 if (sflag & MODOPEN) {
5980 /* This is a module open */
5981 return (ip_modopen(q, devp, flag, sflag, credp));
5982 }
5983
5984 if ((flag & ~(FKLYR)) == IP_HELPER_STR) {
5985 /*
5986 * Non streams based socket looking for a stream
5987 * to access IP
5988 */
5989 return (ip_helper_stream_setup(q, devp, flag, sflag,
5990 credp, isv6));
5991 }
5992
5993 ns = netstack_find_by_cred(credp);
5994 ASSERT(ns != NULL);
5995 ipst = ns->netstack_ip;
5996 ASSERT(ipst != NULL);
5997
5998 /*
5999 * For exclusive stacks we set the zoneid to zero
6000 * to make IP operate as if in the global zone.
6001 */
6002 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID)
6003 zoneid = GLOBAL_ZONEID;
6004 else
6005 zoneid = crgetzoneid(credp);
6006
6007 /*
6008 * We are opening as a device. This is an IP client stream, and we
6009 * allocate an conn_t as the instance data.
6010 */
6011 connp = ipcl_conn_create(IPCL_IPCCONN, KM_SLEEP, ipst->ips_netstack);
6012
6013 /*
6014 * ipcl_conn_create did a netstack_hold. Undo the hold that was
6015 * done by netstack_find_by_cred()
6016 */
6017 netstack_rele(ipst->ips_netstack);
6018
6019 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM;
6020 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
6021 connp->conn_ixa->ixa_zoneid = zoneid;
6022 connp->conn_zoneid = zoneid;
6023
6024 connp->conn_rq = q;
6025 q->q_ptr = WR(q)->q_ptr = connp;
6026
6027 /* Minor tells us which /dev entry was opened */
6028 if (isv6) {
6029 connp->conn_family = AF_INET6;
6030 connp->conn_ipversion = IPV6_VERSION;
6031 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4;
6032 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT;
6033 } else {
6034 connp->conn_family = AF_INET;
6035 connp->conn_ipversion = IPV4_VERSION;
6036 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4;
6037 }
6038
6039 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
6040 ((connp->conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
6041 connp->conn_minor_arena = ip_minor_arena_la;
6042 } else {
6043 /*
6044 * Either minor numbers in the large arena were exhausted
6045 * or a non socket application is doing the open.
6046 * Try to allocate from the small arena.
6047 */
6048 if ((connp->conn_dev =
6049 inet_minor_alloc(ip_minor_arena_sa)) == 0) {
6050 /* CONN_DEC_REF takes care of netstack_rele() */
6051 q->q_ptr = WR(q)->q_ptr = NULL;
6052 CONN_DEC_REF(connp);
6053 return (EBUSY);
6054 }
6055 connp->conn_minor_arena = ip_minor_arena_sa;
6056 }
6057
6058 maj = getemajor(*devp);
6059 *devp = makedevice(maj, (minor_t)connp->conn_dev);
6060
6061 /*
6062 * connp->conn_cred is crfree()ed in ipcl_conn_destroy()
6063 */
6064 connp->conn_cred = credp;
6065 connp->conn_cpid = curproc->p_pid;
6066 /* Cache things in ixa without an extra refhold */
6067 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
6068 connp->conn_ixa->ixa_cred = connp->conn_cred;
6069 connp->conn_ixa->ixa_cpid = connp->conn_cpid;
6070 if (is_system_labeled())
6071 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred);
6072
6073 /*
6074 * Handle IP_IOC_RTS_REQUEST and other ioctls which use conn_recv
6075 */
6076 connp->conn_recv = ip_conn_input;
6077 connp->conn_recvicmp = ip_conn_input_icmp;
6078
6079 crhold(connp->conn_cred);
6080
6081 /*
6082 * If the caller has the process-wide flag set, then default to MAC
6083 * exempt mode. This allows read-down to unlabeled hosts.
6084 */
6085 if (getpflags(NET_MAC_AWARE, credp) != 0)
6086 connp->conn_mac_mode = CONN_MAC_AWARE;
6087
6088 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID);
6089
6090 connp->conn_rq = q;
6091 connp->conn_wq = WR(q);
6092
6093 /* Non-zero default values */
6094 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP;
6095
6096 /*
6097 * Make the conn globally visible to walkers
6098 */
6099 ASSERT(connp->conn_ref == 1);
6100 mutex_enter(&connp->conn_lock);
6101 connp->conn_state_flags &= ~CONN_INCIPIENT;
6102 mutex_exit(&connp->conn_lock);
6103
6104 qprocson(q);
6105
6106 return (0);
6107 }
6108
6109 /*
6110 * Set IPsec policy from an ipsec_req_t. If the req is not "zero" and valid,
6111 * all of them are copied to the conn_t. If the req is "zero", the policy is
6112 * zeroed out. A "zero" policy has zero ipsr_{ah,req,self_encap}_req
6113 * fields.
6114 * We keep only the latest setting of the policy and thus policy setting
6115 * is not incremental/cumulative.
6116 *
6117 * Requests to set policies with multiple alternative actions will
6118 * go through a different API.
6119 */
6120 int
6121 ipsec_set_req(cred_t *cr, conn_t *connp, ipsec_req_t *req)
6122 {
6123 uint_t ah_req = 0;
6124 uint_t esp_req = 0;
6125 uint_t se_req = 0;
6126 ipsec_act_t *actp = NULL;
6127 uint_t nact;
6128 ipsec_policy_head_t *ph;
6129 boolean_t is_pol_reset, is_pol_inserted = B_FALSE;
6130 int error = 0;
6131 netstack_t *ns = connp->conn_netstack;
6132 ip_stack_t *ipst = ns->netstack_ip;
6133 ipsec_stack_t *ipss = ns->netstack_ipsec;
6134
6135 #define REQ_MASK (IPSEC_PREF_REQUIRED|IPSEC_PREF_NEVER)
6136
6137 /*
6138 * The IP_SEC_OPT option does not allow variable length parameters,
6139 * hence a request cannot be NULL.
6140 */
6141 if (req == NULL)
6142 return (EINVAL);
6143
6144 ah_req = req->ipsr_ah_req;
6145 esp_req = req->ipsr_esp_req;
6146 se_req = req->ipsr_self_encap_req;
6147
6148 /* Don't allow setting self-encap without one or more of AH/ESP. */
6149 if (se_req != 0 && esp_req == 0 && ah_req == 0)
6150 return (EINVAL);
6151
6152 /*
6153 * Are we dealing with a request to reset the policy (i.e.
6154 * zero requests).
6155 */
6156 is_pol_reset = ((ah_req & REQ_MASK) == 0 &&
6157 (esp_req & REQ_MASK) == 0 &&
6158 (se_req & REQ_MASK) == 0);
6159
6160 if (!is_pol_reset) {
6161 /*
6162 * If we couldn't load IPsec, fail with "protocol
6163 * not supported".
6164 * IPsec may not have been loaded for a request with zero
6165 * policies, so we don't fail in this case.
6166 */
6167 mutex_enter(&ipss->ipsec_loader_lock);
6168 if (ipss->ipsec_loader_state != IPSEC_LOADER_SUCCEEDED) {
6169 mutex_exit(&ipss->ipsec_loader_lock);
6170 return (EPROTONOSUPPORT);
6171 }
6172 mutex_exit(&ipss->ipsec_loader_lock);
6173
6174 /*
6175 * Test for valid requests. Invalid algorithms
6176 * need to be tested by IPsec code because new
6177 * algorithms can be added dynamically.
6178 */
6179 if ((ah_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6180 (esp_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 ||
6181 (se_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0) {
6182 return (EINVAL);
6183 }
6184
6185 /*
6186 * Only privileged users can issue these
6187 * requests.
6188 */
6189 if (((ah_req & IPSEC_PREF_NEVER) ||
6190 (esp_req & IPSEC_PREF_NEVER) ||
6191 (se_req & IPSEC_PREF_NEVER)) &&
6192 secpolicy_ip_config(cr, B_FALSE) != 0) {
6193 return (EPERM);
6194 }
6195
6196 /*
6197 * The IPSEC_PREF_REQUIRED and IPSEC_PREF_NEVER
6198 * are mutually exclusive.
6199 */
6200 if (((ah_req & REQ_MASK) == REQ_MASK) ||
6201 ((esp_req & REQ_MASK) == REQ_MASK) ||
6202 ((se_req & REQ_MASK) == REQ_MASK)) {
6203 /* Both of them are set */
6204 return (EINVAL);
6205 }
6206 }
6207
6208 ASSERT(MUTEX_HELD(&connp->conn_lock));
6209
6210 /*
6211 * If we have already cached policies in conn_connect(), don't
6212 * let them change now. We cache policies for connections
6213 * whose src,dst [addr, port] is known.
6214 */
6215 if (connp->conn_policy_cached) {
6216 return (EINVAL);
6217 }
6218
6219 /*
6220 * We have a zero policies, reset the connection policy if already
6221 * set. This will cause the connection to inherit the
6222 * global policy, if any.
6223 */
6224 if (is_pol_reset) {
6225 if (connp->conn_policy != NULL) {
6226 IPPH_REFRELE(connp->conn_policy, ipst->ips_netstack);
6227 connp->conn_policy = NULL;
6228 }
6229 connp->conn_in_enforce_policy = B_FALSE;
6230 connp->conn_out_enforce_policy = B_FALSE;
6231 return (0);
6232 }
6233
6234 ph = connp->conn_policy = ipsec_polhead_split(connp->conn_policy,
6235 ipst->ips_netstack);
6236 if (ph == NULL)
6237 goto enomem;
6238
6239 ipsec_actvec_from_req(req, &actp, &nact, ipst->ips_netstack);
6240 if (actp == NULL)
6241 goto enomem;
6242
6243 /*
6244 * Always insert IPv4 policy entries, since they can also apply to
6245 * ipv6 sockets being used in ipv4-compat mode.
6246 */
6247 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6248 IPSEC_TYPE_INBOUND, ns))
6249 goto enomem;
6250 is_pol_inserted = B_TRUE;
6251 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4,
6252 IPSEC_TYPE_OUTBOUND, ns))
6253 goto enomem;
6254
6255 /*
6256 * We're looking at a v6 socket, also insert the v6-specific
6257 * entries.
6258 */
6259 if (connp->conn_family == AF_INET6) {
6260 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6261 IPSEC_TYPE_INBOUND, ns))
6262 goto enomem;
6263 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6,
6264 IPSEC_TYPE_OUTBOUND, ns))
6265 goto enomem;
6266 }
6267
6268 ipsec_actvec_free(actp, nact);
6269
6270 /*
6271 * If the requests need security, set enforce_policy.
6272 * If the requests are IPSEC_PREF_NEVER, one should
6273 * still set conn_out_enforce_policy so that ip_set_destination
6274 * marks the ip_xmit_attr_t appropriatly. This is needed so that
6275 * for connections that we don't cache policy in at connect time,
6276 * if global policy matches in ip_output_attach_policy, we
6277 * don't wrongly inherit global policy. Similarly, we need
6278 * to set conn_in_enforce_policy also so that we don't verify
6279 * policy wrongly.
6280 */
6281 if ((ah_req & REQ_MASK) != 0 ||
6282 (esp_req & REQ_MASK) != 0 ||
6283 (se_req & REQ_MASK) != 0) {
6284 connp->conn_in_enforce_policy = B_TRUE;
6285 connp->conn_out_enforce_policy = B_TRUE;
6286 }
6287
6288 return (error);
6289 #undef REQ_MASK
6290
6291 /*
6292 * Common memory-allocation-failure exit path.
6293 */
6294 enomem:
6295 if (actp != NULL)
6296 ipsec_actvec_free(actp, nact);
6297 if (is_pol_inserted)
6298 ipsec_polhead_flush(ph, ns);
6299 return (ENOMEM);
6300 }
6301
6302 /*
6303 * Set socket options for joining and leaving multicast groups.
6304 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6305 * The caller has already check that the option name is consistent with
6306 * the address family of the socket.
6307 */
6308 int
6309 ip_opt_set_multicast_group(conn_t *connp, t_scalar_t name,
6310 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6311 {
6312 int *i1 = (int *)invalp;
6313 int error = 0;
6314 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6315 struct ip_mreq *v4_mreqp;
6316 struct ipv6_mreq *v6_mreqp;
6317 struct group_req *greqp;
6318 ire_t *ire;
6319 boolean_t done = B_FALSE;
6320 ipaddr_t ifaddr;
6321 in6_addr_t v6group;
6322 uint_t ifindex;
6323 boolean_t mcast_opt = B_TRUE;
6324 mcast_record_t fmode;
6325 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6326 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6327
6328 switch (name) {
6329 case IP_ADD_MEMBERSHIP:
6330 case IPV6_JOIN_GROUP:
6331 mcast_opt = B_FALSE;
6332 /* FALLTHRU */
6333 case MCAST_JOIN_GROUP:
6334 fmode = MODE_IS_EXCLUDE;
6335 optfn = ip_opt_add_group;
6336 break;
6337
6338 case IP_DROP_MEMBERSHIP:
6339 case IPV6_LEAVE_GROUP:
6340 mcast_opt = B_FALSE;
6341 /* FALLTHRU */
6342 case MCAST_LEAVE_GROUP:
6343 fmode = MODE_IS_INCLUDE;
6344 optfn = ip_opt_delete_group;
6345 break;
6346 default:
6347 ASSERT(0);
6348 }
6349
6350 if (mcast_opt) {
6351 struct sockaddr_in *sin;
6352 struct sockaddr_in6 *sin6;
6353
6354 greqp = (struct group_req *)i1;
6355 if (greqp->gr_group.ss_family == AF_INET) {
6356 sin = (struct sockaddr_in *)&(greqp->gr_group);
6357 IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &v6group);
6358 } else {
6359 if (!inet6)
6360 return (EINVAL); /* Not on INET socket */
6361
6362 sin6 = (struct sockaddr_in6 *)&(greqp->gr_group);
6363 v6group = sin6->sin6_addr;
6364 }
6365 ifaddr = INADDR_ANY;
6366 ifindex = greqp->gr_interface;
6367 } else if (inet6) {
6368 v6_mreqp = (struct ipv6_mreq *)i1;
6369 v6group = v6_mreqp->ipv6mr_multiaddr;
6370 ifaddr = INADDR_ANY;
6371 ifindex = v6_mreqp->ipv6mr_interface;
6372 } else {
6373 v4_mreqp = (struct ip_mreq *)i1;
6374 IN6_INADDR_TO_V4MAPPED(&v4_mreqp->imr_multiaddr, &v6group);
6375 ifaddr = (ipaddr_t)v4_mreqp->imr_interface.s_addr;
6376 ifindex = 0;
6377 }
6378
6379 /*
6380 * In the multirouting case, we need to replicate
6381 * the request on all interfaces that will take part
6382 * in replication. We do so because multirouting is
6383 * reflective, thus we will probably receive multi-
6384 * casts on those interfaces.
6385 * The ip_multirt_apply_membership() succeeds if
6386 * the operation succeeds on at least one interface.
6387 */
6388 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6389 ipaddr_t group;
6390
6391 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6392
6393 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6394 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6395 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6396 } else {
6397 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6398 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6399 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6400 }
6401 if (ire != NULL) {
6402 if (ire->ire_flags & RTF_MULTIRT) {
6403 error = ip_multirt_apply_membership(optfn, ire, connp,
6404 checkonly, &v6group, fmode, &ipv6_all_zeros);
6405 done = B_TRUE;
6406 }
6407 ire_refrele(ire);
6408 }
6409
6410 if (!done) {
6411 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6412 fmode, &ipv6_all_zeros);
6413 }
6414 return (error);
6415 }
6416
6417 /*
6418 * Set socket options for joining and leaving multicast groups
6419 * for specific sources.
6420 * Common to IPv4 and IPv6; inet6 indicates the type of socket.
6421 * The caller has already check that the option name is consistent with
6422 * the address family of the socket.
6423 */
6424 int
6425 ip_opt_set_multicast_sources(conn_t *connp, t_scalar_t name,
6426 uchar_t *invalp, boolean_t inet6, boolean_t checkonly)
6427 {
6428 int *i1 = (int *)invalp;
6429 int error = 0;
6430 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
6431 struct ip_mreq_source *imreqp;
6432 struct group_source_req *gsreqp;
6433 in6_addr_t v6group, v6src;
6434 uint32_t ifindex;
6435 ipaddr_t ifaddr;
6436 boolean_t mcast_opt = B_TRUE;
6437 mcast_record_t fmode;
6438 ire_t *ire;
6439 boolean_t done = B_FALSE;
6440 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *,
6441 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *);
6442
6443 switch (name) {
6444 case IP_BLOCK_SOURCE:
6445 mcast_opt = B_FALSE;
6446 /* FALLTHRU */
6447 case MCAST_BLOCK_SOURCE:
6448 fmode = MODE_IS_EXCLUDE;
6449 optfn = ip_opt_add_group;
6450 break;
6451
6452 case IP_UNBLOCK_SOURCE:
6453 mcast_opt = B_FALSE;
6454 /* FALLTHRU */
6455 case MCAST_UNBLOCK_SOURCE:
6456 fmode = MODE_IS_EXCLUDE;
6457 optfn = ip_opt_delete_group;
6458 break;
6459
6460 case IP_ADD_SOURCE_MEMBERSHIP:
6461 mcast_opt = B_FALSE;
6462 /* FALLTHRU */
6463 case MCAST_JOIN_SOURCE_GROUP:
6464 fmode = MODE_IS_INCLUDE;
6465 optfn = ip_opt_add_group;
6466 break;
6467
6468 case IP_DROP_SOURCE_MEMBERSHIP:
6469 mcast_opt = B_FALSE;
6470 /* FALLTHRU */
6471 case MCAST_LEAVE_SOURCE_GROUP:
6472 fmode = MODE_IS_INCLUDE;
6473 optfn = ip_opt_delete_group;
6474 break;
6475 default:
6476 ASSERT(0);
6477 }
6478
6479 if (mcast_opt) {
6480 gsreqp = (struct group_source_req *)i1;
6481 ifindex = gsreqp->gsr_interface;
6482 if (gsreqp->gsr_group.ss_family == AF_INET) {
6483 struct sockaddr_in *s;
6484 s = (struct sockaddr_in *)&gsreqp->gsr_group;
6485 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6group);
6486 s = (struct sockaddr_in *)&gsreqp->gsr_source;
6487 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6src);
6488 } else {
6489 struct sockaddr_in6 *s6;
6490
6491 if (!inet6)
6492 return (EINVAL); /* Not on INET socket */
6493
6494 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_group;
6495 v6group = s6->sin6_addr;
6496 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_source;
6497 v6src = s6->sin6_addr;
6498 }
6499 ifaddr = INADDR_ANY;
6500 } else {
6501 imreqp = (struct ip_mreq_source *)i1;
6502 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_multiaddr, &v6group);
6503 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_sourceaddr, &v6src);
6504 ifaddr = (ipaddr_t)imreqp->imr_interface.s_addr;
6505 ifindex = 0;
6506 }
6507
6508 /*
6509 * Handle src being mapped INADDR_ANY by changing it to unspecified.
6510 */
6511 if (IN6_IS_ADDR_V4MAPPED_ANY(&v6src))
6512 v6src = ipv6_all_zeros;
6513
6514 /*
6515 * In the multirouting case, we need to replicate
6516 * the request as noted in the mcast cases above.
6517 */
6518 if (IN6_IS_ADDR_V4MAPPED(&v6group)) {
6519 ipaddr_t group;
6520
6521 IN6_V4MAPPED_TO_IPADDR(&v6group, group);
6522
6523 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0,
6524 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6525 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6526 } else {
6527 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0,
6528 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL,
6529 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL);
6530 }
6531 if (ire != NULL) {
6532 if (ire->ire_flags & RTF_MULTIRT) {
6533 error = ip_multirt_apply_membership(optfn, ire, connp,
6534 checkonly, &v6group, fmode, &v6src);
6535 done = B_TRUE;
6536 }
6537 ire_refrele(ire);
6538 }
6539 if (!done) {
6540 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex,
6541 fmode, &v6src);
6542 }
6543 return (error);
6544 }
6545
6546 /*
6547 * Given a destination address and a pointer to where to put the information
6548 * this routine fills in the mtuinfo.
6549 * The socket must be connected.
6550 * For sctp conn_faddr is the primary address.
6551 */
6552 int
6553 ip_fill_mtuinfo(conn_t *connp, ip_xmit_attr_t *ixa, struct ip6_mtuinfo *mtuinfo)
6554 {
6555 uint32_t pmtu = IP_MAXPACKET;
6556 uint_t scopeid;
6557
6558 if (IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6))
6559 return (-1);
6560
6561 /* In case we never sent or called ip_set_destination_v4/v6 */
6562 if (ixa->ixa_ire != NULL)
6563 pmtu = ip_get_pmtu(ixa);
6564
6565 if (ixa->ixa_flags & IXAF_SCOPEID_SET)
6566 scopeid = ixa->ixa_scopeid;
6567 else
6568 scopeid = 0;
6569
6570 bzero(mtuinfo, sizeof (*mtuinfo));
6571 mtuinfo->ip6m_addr.sin6_family = AF_INET6;
6572 mtuinfo->ip6m_addr.sin6_port = connp->conn_fport;
6573 mtuinfo->ip6m_addr.sin6_addr = connp->conn_faddr_v6;
6574 mtuinfo->ip6m_addr.sin6_scope_id = scopeid;
6575 mtuinfo->ip6m_mtu = pmtu;
6576
6577 return (sizeof (struct ip6_mtuinfo));
6578 }
6579
6580 /*
6581 * When the src multihoming is changed from weak to [strong, preferred]
6582 * ip_ire_rebind_walker is called to walk the list of all ire_t entries
6583 * and identify routes that were created by user-applications in the
6584 * unbound state (i.e., without RTA_IFP), and for which an ire_ill is not
6585 * currently defined. These routes are then 'rebound', i.e., their ire_ill
6586 * is selected by finding an interface route for the gateway.
6587 */
6588 /* ARGSUSED */
6589 void
6590 ip_ire_rebind_walker(ire_t *ire, void *notused)
6591 {
6592 if (!ire->ire_unbound || ire->ire_ill != NULL)
6593 return;
6594 ire_rebind(ire);
6595 ire_delete(ire);
6596 }
6597
6598 /*
6599 * When the src multihoming is changed from [strong, preferred] to weak,
6600 * ip_ire_unbind_walker is called to walk the list of all ire_t entries, and
6601 * set any entries that were created by user-applications in the unbound state
6602 * (i.e., without RTA_IFP) back to having a NULL ire_ill.
6603 */
6604 /* ARGSUSED */
6605 void
6606 ip_ire_unbind_walker(ire_t *ire, void *notused)
6607 {
6608 ire_t *new_ire;
6609
6610 if (!ire->ire_unbound || ire->ire_ill == NULL)
6611 return;
6612 if (ire->ire_ipversion == IPV6_VERSION) {
6613 new_ire = ire_create_v6(&ire->ire_addr_v6, &ire->ire_mask_v6,
6614 &ire->ire_gateway_addr_v6, ire->ire_type, NULL,
6615 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6616 } else {
6617 new_ire = ire_create((uchar_t *)&ire->ire_addr,
6618 (uchar_t *)&ire->ire_mask,
6619 (uchar_t *)&ire->ire_gateway_addr, ire->ire_type, NULL,
6620 ire->ire_zoneid, ire->ire_flags, NULL, ire->ire_ipst);
6621 }
6622 if (new_ire == NULL)
6623 return;
6624 new_ire->ire_unbound = B_TRUE;
6625 /*
6626 * The bound ire must first be deleted so that we don't return
6627 * the existing one on the attempt to add the unbound new_ire.
6628 */
6629 ire_delete(ire);
6630 new_ire = ire_add(new_ire);
6631 if (new_ire != NULL)
6632 ire_refrele(new_ire);
6633 }
6634
6635 /*
6636 * When the settings of ip*_strict_src_multihoming tunables are changed,
6637 * all cached routes need to be recomputed. This recomputation needs to be
6638 * done when going from weaker to stronger modes so that the cached ire
6639 * for the connection does not violate the current ip*_strict_src_multihoming
6640 * setting. It also needs to be done when going from stronger to weaker modes,
6641 * so that we fall back to matching on the longest-matching-route (as opposed
6642 * to a shorter match that may have been selected in the strong mode
6643 * to satisfy src_multihoming settings).
6644 *
6645 * The cached ixa_ire entires for all conn_t entries are marked as
6646 * "verify" so that they will be recomputed for the next packet.
6647 */
6648 void
6649 conn_ire_revalidate(conn_t *connp, void *arg)
6650 {
6651 boolean_t isv6 = (boolean_t)arg;
6652
6653 if ((isv6 && connp->conn_ipversion != IPV6_VERSION) ||
6654 (!isv6 && connp->conn_ipversion != IPV4_VERSION))
6655 return;
6656 connp->conn_ixa->ixa_ire_generation = IRE_GENERATION_VERIFY;
6657 }
6658
6659 /*
6660 * Handles both IPv4 and IPv6 reassembly - doing the out-of-order cases,
6661 * When an ipf is passed here for the first time, if
6662 * we already have in-order fragments on the queue, we convert from the fast-
6663 * path reassembly scheme to the hard-case scheme. From then on, additional
6664 * fragments are reassembled here. We keep track of the start and end offsets
6665 * of each piece, and the number of holes in the chain. When the hole count
6666 * goes to zero, we are done!
6667 *
6668 * The ipf_count will be updated to account for any mblk(s) added (pointed to
6669 * by mp) or subtracted (freeb()ed dups), upon return the caller must update
6670 * ipfb_count and ill_frag_count by the difference of ipf_count before and
6671 * after the call to ip_reassemble().
6672 */
6673 int
6674 ip_reassemble(mblk_t *mp, ipf_t *ipf, uint_t start, boolean_t more, ill_t *ill,
6675 size_t msg_len)
6676 {
6677 uint_t end;
6678 mblk_t *next_mp;
6679 mblk_t *mp1;
6680 uint_t offset;
6681 boolean_t incr_dups = B_TRUE;
6682 boolean_t offset_zero_seen = B_FALSE;
6683 boolean_t pkt_boundary_checked = B_FALSE;
6684
6685 /* If start == 0 then ipf_nf_hdr_len has to be set. */
6686 ASSERT(start != 0 || ipf->ipf_nf_hdr_len != 0);
6687
6688 /* Add in byte count */
6689 ipf->ipf_count += msg_len;
6690 if (ipf->ipf_end) {
6691 /*
6692 * We were part way through in-order reassembly, but now there
6693 * is a hole. We walk through messages already queued, and
6694 * mark them for hard case reassembly. We know that up till
6695 * now they were in order starting from offset zero.
6696 */
6697 offset = 0;
6698 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
6699 IP_REASS_SET_START(mp1, offset);
6700 if (offset == 0) {
6701 ASSERT(ipf->ipf_nf_hdr_len != 0);
6702 offset = -ipf->ipf_nf_hdr_len;
6703 }
6704 offset += mp1->b_wptr - mp1->b_rptr;
6705 IP_REASS_SET_END(mp1, offset);
6706 }
6707 /* One hole at the end. */
6708 ipf->ipf_hole_cnt = 1;
6709 /* Brand it as a hard case, forever. */
6710 ipf->ipf_end = 0;
6711 }
6712 /* Walk through all the new pieces. */
6713 do {
6714 end = start + (mp->b_wptr - mp->b_rptr);
6715 /*
6716 * If start is 0, decrease 'end' only for the first mblk of
6717 * the fragment. Otherwise 'end' can get wrong value in the
6718 * second pass of the loop if first mblk is exactly the
6719 * size of ipf_nf_hdr_len.
6720 */
6721 if (start == 0 && !offset_zero_seen) {
6722 /* First segment */
6723 ASSERT(ipf->ipf_nf_hdr_len != 0);
6724 end -= ipf->ipf_nf_hdr_len;
6725 offset_zero_seen = B_TRUE;
6726 }
6727 next_mp = mp->b_cont;
6728 /*
6729 * We are checking to see if there is any interesing data
6730 * to process. If there isn't and the mblk isn't the
6731 * one which carries the unfragmentable header then we
6732 * drop it. It's possible to have just the unfragmentable
6733 * header come through without any data. That needs to be
6734 * saved.
6735 *
6736 * If the assert at the top of this function holds then the
6737 * term "ipf->ipf_nf_hdr_len != 0" isn't needed. This code
6738 * is infrequently traveled enough that the test is left in
6739 * to protect against future code changes which break that
6740 * invariant.
6741 */
6742 if (start == end && start != 0 && ipf->ipf_nf_hdr_len != 0) {
6743 /* Empty. Blast it. */
6744 IP_REASS_SET_START(mp, 0);
6745 IP_REASS_SET_END(mp, 0);
6746 /*
6747 * If the ipf points to the mblk we are about to free,
6748 * update ipf to point to the next mblk (or NULL
6749 * if none).
6750 */
6751 if (ipf->ipf_mp->b_cont == mp)
6752 ipf->ipf_mp->b_cont = next_mp;
6753 freeb(mp);
6754 continue;
6755 }
6756 mp->b_cont = NULL;
6757 IP_REASS_SET_START(mp, start);
6758 IP_REASS_SET_END(mp, end);
6759 if (!ipf->ipf_tail_mp) {
6760 ipf->ipf_tail_mp = mp;
6761 ipf->ipf_mp->b_cont = mp;
6762 if (start == 0 || !more) {
6763 ipf->ipf_hole_cnt = 1;
6764 /*
6765 * if the first fragment comes in more than one
6766 * mblk, this loop will be executed for each
6767 * mblk. Need to adjust hole count so exiting
6768 * this routine will leave hole count at 1.
6769 */
6770 if (next_mp)
6771 ipf->ipf_hole_cnt++;
6772 } else
6773 ipf->ipf_hole_cnt = 2;
6774 continue;
6775 } else if (ipf->ipf_last_frag_seen && !more &&
6776 !pkt_boundary_checked) {
6777 /*
6778 * We check datagram boundary only if this fragment
6779 * claims to be the last fragment and we have seen a
6780 * last fragment in the past too. We do this only
6781 * once for a given fragment.
6782 *
6783 * start cannot be 0 here as fragments with start=0
6784 * and MF=0 gets handled as a complete packet. These
6785 * fragments should not reach here.
6786 */
6787
6788 if (start + msgdsize(mp) !=
6789 IP_REASS_END(ipf->ipf_tail_mp)) {
6790 /*
6791 * We have two fragments both of which claim
6792 * to be the last fragment but gives conflicting
6793 * information about the whole datagram size.
6794 * Something fishy is going on. Drop the
6795 * fragment and free up the reassembly list.
6796 */
6797 return (IP_REASS_FAILED);
6798 }
6799
6800 /*
6801 * We shouldn't come to this code block again for this
6802 * particular fragment.
6803 */
6804 pkt_boundary_checked = B_TRUE;
6805 }
6806
6807 /* New stuff at or beyond tail? */
6808 offset = IP_REASS_END(ipf->ipf_tail_mp);
6809 if (start >= offset) {
6810 if (ipf->ipf_last_frag_seen) {
6811 /* current fragment is beyond last fragment */
6812 return (IP_REASS_FAILED);
6813 }
6814 /* Link it on end. */
6815 ipf->ipf_tail_mp->b_cont = mp;
6816 ipf->ipf_tail_mp = mp;
6817 if (more) {
6818 if (start != offset)
6819 ipf->ipf_hole_cnt++;
6820 } else if (start == offset && next_mp == NULL)
6821 ipf->ipf_hole_cnt--;
6822 continue;
6823 }
6824 mp1 = ipf->ipf_mp->b_cont;
6825 offset = IP_REASS_START(mp1);
6826 /* New stuff at the front? */
6827 if (start < offset) {
6828 if (start == 0) {
6829 if (end >= offset) {
6830 /* Nailed the hole at the begining. */
6831 ipf->ipf_hole_cnt--;
6832 }
6833 } else if (end < offset) {
6834 /*
6835 * A hole, stuff, and a hole where there used
6836 * to be just a hole.
6837 */
6838 ipf->ipf_hole_cnt++;
6839 }
6840 mp->b_cont = mp1;
6841 /* Check for overlap. */
6842 while (end > offset) {
6843 if (end < IP_REASS_END(mp1)) {
6844 mp->b_wptr -= end - offset;
6845 IP_REASS_SET_END(mp, offset);
6846 BUMP_MIB(ill->ill_ip_mib,
6847 ipIfStatsReasmPartDups);
6848 break;
6849 }
6850 /* Did we cover another hole? */
6851 if ((mp1->b_cont &&
6852 IP_REASS_END(mp1) !=
6853 IP_REASS_START(mp1->b_cont) &&
6854 end >= IP_REASS_START(mp1->b_cont)) ||
6855 (!ipf->ipf_last_frag_seen && !more)) {
6856 ipf->ipf_hole_cnt--;
6857 }
6858 /* Clip out mp1. */
6859 if ((mp->b_cont = mp1->b_cont) == NULL) {
6860 /*
6861 * After clipping out mp1, this guy
6862 * is now hanging off the end.
6863 */
6864 ipf->ipf_tail_mp = mp;
6865 }
6866 IP_REASS_SET_START(mp1, 0);
6867 IP_REASS_SET_END(mp1, 0);
6868 /* Subtract byte count */
6869 ipf->ipf_count -= mp1->b_datap->db_lim -
6870 mp1->b_datap->db_base;
6871 freeb(mp1);
6872 BUMP_MIB(ill->ill_ip_mib,
6873 ipIfStatsReasmPartDups);
6874 mp1 = mp->b_cont;
6875 if (!mp1)
6876 break;
6877 offset = IP_REASS_START(mp1);
6878 }
6879 ipf->ipf_mp->b_cont = mp;
6880 continue;
6881 }
6882 /*
6883 * The new piece starts somewhere between the start of the head
6884 * and before the end of the tail.
6885 */
6886 for (; mp1; mp1 = mp1->b_cont) {
6887 offset = IP_REASS_END(mp1);
6888 if (start < offset) {
6889 if (end <= offset) {
6890 /* Nothing new. */
6891 IP_REASS_SET_START(mp, 0);
6892 IP_REASS_SET_END(mp, 0);
6893 /* Subtract byte count */
6894 ipf->ipf_count -= mp->b_datap->db_lim -
6895 mp->b_datap->db_base;
6896 if (incr_dups) {
6897 ipf->ipf_num_dups++;
6898 incr_dups = B_FALSE;
6899 }
6900 freeb(mp);
6901 BUMP_MIB(ill->ill_ip_mib,
6902 ipIfStatsReasmDuplicates);
6903 break;
6904 }
6905 /*
6906 * Trim redundant stuff off beginning of new
6907 * piece.
6908 */
6909 IP_REASS_SET_START(mp, offset);
6910 mp->b_rptr += offset - start;
6911 BUMP_MIB(ill->ill_ip_mib,
6912 ipIfStatsReasmPartDups);
6913 start = offset;
6914 if (!mp1->b_cont) {
6915 /*
6916 * After trimming, this guy is now
6917 * hanging off the end.
6918 */
6919 mp1->b_cont = mp;
6920 ipf->ipf_tail_mp = mp;
6921 if (!more) {
6922 ipf->ipf_hole_cnt--;
6923 }
6924 break;
6925 }
6926 }
6927 if (start >= IP_REASS_START(mp1->b_cont))
6928 continue;
6929 /* Fill a hole */
6930 if (start > offset)
6931 ipf->ipf_hole_cnt++;
6932 mp->b_cont = mp1->b_cont;
6933 mp1->b_cont = mp;
6934 mp1 = mp->b_cont;
6935 offset = IP_REASS_START(mp1);
6936 if (end >= offset) {
6937 ipf->ipf_hole_cnt--;
6938 /* Check for overlap. */
6939 while (end > offset) {
6940 if (end < IP_REASS_END(mp1)) {
6941 mp->b_wptr -= end - offset;
6942 IP_REASS_SET_END(mp, offset);
6943 /*
6944 * TODO we might bump
6945 * this up twice if there is
6946 * overlap at both ends.
6947 */
6948 BUMP_MIB(ill->ill_ip_mib,
6949 ipIfStatsReasmPartDups);
6950 break;
6951 }
6952 /* Did we cover another hole? */
6953 if ((mp1->b_cont &&
6954 IP_REASS_END(mp1)
6955 != IP_REASS_START(mp1->b_cont) &&
6956 end >=
6957 IP_REASS_START(mp1->b_cont)) ||
6958 (!ipf->ipf_last_frag_seen &&
6959 !more)) {
6960 ipf->ipf_hole_cnt--;
6961 }
6962 /* Clip out mp1. */
6963 if ((mp->b_cont = mp1->b_cont) ==
6964 NULL) {
6965 /*
6966 * After clipping out mp1,
6967 * this guy is now hanging
6968 * off the end.
6969 */
6970 ipf->ipf_tail_mp = mp;
6971 }
6972 IP_REASS_SET_START(mp1, 0);
6973 IP_REASS_SET_END(mp1, 0);
6974 /* Subtract byte count */
6975 ipf->ipf_count -=
6976 mp1->b_datap->db_lim -
6977 mp1->b_datap->db_base;
6978 freeb(mp1);
6979 BUMP_MIB(ill->ill_ip_mib,
6980 ipIfStatsReasmPartDups);
6981 mp1 = mp->b_cont;
6982 if (!mp1)
6983 break;
6984 offset = IP_REASS_START(mp1);
6985 }
6986 }
6987 break;
6988 }
6989 } while (start = end, mp = next_mp);
6990
6991 /* Fragment just processed could be the last one. Remember this fact */
6992 if (!more)
6993 ipf->ipf_last_frag_seen = B_TRUE;
6994
6995 /* Still got holes? */
6996 if (ipf->ipf_hole_cnt)
6997 return (IP_REASS_PARTIAL);
6998 /* Clean up overloaded fields to avoid upstream disasters. */
6999 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) {
7000 IP_REASS_SET_START(mp1, 0);
7001 IP_REASS_SET_END(mp1, 0);
7002 }
7003 return (IP_REASS_COMPLETE);
7004 }
7005
7006 /*
7007 * Fragmentation reassembly. Each ILL has a hash table for
7008 * queuing packets undergoing reassembly for all IPIFs
7009 * associated with the ILL. The hash is based on the packet
7010 * IP ident field. The ILL frag hash table was allocated
7011 * as a timer block at the time the ILL was created. Whenever
7012 * there is anything on the reassembly queue, the timer will
7013 * be running. Returns the reassembled packet if reassembly completes.
7014 */
7015 mblk_t *
7016 ip_input_fragment(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
7017 {
7018 uint32_t frag_offset_flags;
7019 mblk_t *t_mp;
7020 ipaddr_t dst;
7021 uint8_t proto = ipha->ipha_protocol;
7022 uint32_t sum_val;
7023 uint16_t sum_flags;
7024 ipf_t *ipf;
7025 ipf_t **ipfp;
7026 ipfb_t *ipfb;
7027 uint16_t ident;
7028 uint32_t offset;
7029 ipaddr_t src;
7030 uint_t hdr_length;
7031 uint32_t end;
7032 mblk_t *mp1;
7033 mblk_t *tail_mp;
7034 size_t count;
7035 size_t msg_len;
7036 uint8_t ecn_info = 0;
7037 uint32_t packet_size;
7038 boolean_t pruned = B_FALSE;
7039 ill_t *ill = ira->ira_ill;
7040 ip_stack_t *ipst = ill->ill_ipst;
7041
7042 /*
7043 * Drop the fragmented as early as possible, if
7044 * we don't have resource(s) to re-assemble.
7045 */
7046 if (ipst->ips_ip_reass_queue_bytes == 0) {
7047 freemsg(mp);
7048 return (NULL);
7049 }
7050
7051 /* Check for fragmentation offset; return if there's none */
7052 if ((frag_offset_flags = ntohs(ipha->ipha_fragment_offset_and_flags) &
7053 (IPH_MF | IPH_OFFSET)) == 0)
7054 return (mp);
7055
7056 /*
7057 * We utilize hardware computed checksum info only for UDP since
7058 * IP fragmentation is a normal occurrence for the protocol. In
7059 * addition, checksum offload support for IP fragments carrying
7060 * UDP payload is commonly implemented across network adapters.
7061 */
7062 ASSERT(ira->ira_rill != NULL);
7063 if (proto == IPPROTO_UDP && dohwcksum &&
7064 ILL_HCKSUM_CAPABLE(ira->ira_rill) &&
7065 (DB_CKSUMFLAGS(mp) & (HCK_FULLCKSUM | HCK_PARTIALCKSUM))) {
7066 mblk_t *mp1 = mp->b_cont;
7067 int32_t len;
7068
7069 /* Record checksum information from the packet */
7070 sum_val = (uint32_t)DB_CKSUM16(mp);
7071 sum_flags = DB_CKSUMFLAGS(mp);
7072
7073 /* IP payload offset from beginning of mblk */
7074 offset = ((uchar_t *)ipha + IPH_HDR_LENGTH(ipha)) - mp->b_rptr;
7075
7076 if ((sum_flags & HCK_PARTIALCKSUM) &&
7077 (mp1 == NULL || mp1->b_cont == NULL) &&
7078 offset >= DB_CKSUMSTART(mp) &&
7079 ((len = offset - DB_CKSUMSTART(mp)) & 1) == 0) {
7080 uint32_t adj;
7081 /*
7082 * Partial checksum has been calculated by hardware
7083 * and attached to the packet; in addition, any
7084 * prepended extraneous data is even byte aligned.
7085 * If any such data exists, we adjust the checksum;
7086 * this would also handle any postpended data.
7087 */
7088 IP_ADJCKSUM_PARTIAL(mp->b_rptr + DB_CKSUMSTART(mp),
7089 mp, mp1, len, adj);
7090
7091 /* One's complement subtract extraneous checksum */
7092 if (adj >= sum_val)
7093 sum_val = ~(adj - sum_val) & 0xFFFF;
7094 else
7095 sum_val -= adj;
7096 }
7097 } else {
7098 sum_val = 0;
7099 sum_flags = 0;
7100 }
7101
7102 /* Clear hardware checksumming flag */
7103 DB_CKSUMFLAGS(mp) = 0;
7104
7105 ident = ipha->ipha_ident;
7106 offset = (frag_offset_flags << 3) & 0xFFFF;
7107 src = ipha->ipha_src;
7108 dst = ipha->ipha_dst;
7109 hdr_length = IPH_HDR_LENGTH(ipha);
7110 end = ntohs(ipha->ipha_length) - hdr_length;
7111
7112 /* If end == 0 then we have a packet with no data, so just free it */
7113 if (end == 0) {
7114 freemsg(mp);
7115 return (NULL);
7116 }
7117
7118 /* Record the ECN field info. */
7119 ecn_info = (ipha->ipha_type_of_service & 0x3);
7120 if (offset != 0) {
7121 /*
7122 * If this isn't the first piece, strip the header, and
7123 * add the offset to the end value.
7124 */
7125 mp->b_rptr += hdr_length;
7126 end += offset;
7127 }
7128
7129 /* Handle vnic loopback of fragments */
7130 if (mp->b_datap->db_ref > 2)
7131 msg_len = 0;
7132 else
7133 msg_len = MBLKSIZE(mp);
7134
7135 tail_mp = mp;
7136 while (tail_mp->b_cont != NULL) {
7137 tail_mp = tail_mp->b_cont;
7138 if (tail_mp->b_datap->db_ref <= 2)
7139 msg_len += MBLKSIZE(tail_mp);
7140 }
7141
7142 /* If the reassembly list for this ILL will get too big, prune it */
7143 if ((msg_len + sizeof (*ipf) + ill->ill_frag_count) >=
7144 ipst->ips_ip_reass_queue_bytes) {
7145 DTRACE_PROBE3(ip_reass_queue_bytes, uint_t, msg_len,
7146 uint_t, ill->ill_frag_count,
7147 uint_t, ipst->ips_ip_reass_queue_bytes);
7148 ill_frag_prune(ill,
7149 (ipst->ips_ip_reass_queue_bytes < msg_len) ? 0 :
7150 (ipst->ips_ip_reass_queue_bytes - msg_len));
7151 pruned = B_TRUE;
7152 }
7153
7154 ipfb = &ill->ill_frag_hash_tbl[ILL_FRAG_HASH(src, ident)];
7155 mutex_enter(&ipfb->ipfb_lock);
7156
7157 ipfp = &ipfb->ipfb_ipf;
7158 /* Try to find an existing fragment queue for this packet. */
7159 for (;;) {
7160 ipf = ipfp[0];
7161 if (ipf != NULL) {
7162 /*
7163 * It has to match on ident and src/dst address.
7164 */
7165 if (ipf->ipf_ident == ident &&
7166 ipf->ipf_src == src &&
7167 ipf->ipf_dst == dst &&
7168 ipf->ipf_protocol == proto) {
7169 /*
7170 * If we have received too many
7171 * duplicate fragments for this packet
7172 * free it.
7173 */
7174 if (ipf->ipf_num_dups > ip_max_frag_dups) {
7175 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7176 freemsg(mp);
7177 mutex_exit(&ipfb->ipfb_lock);
7178 return (NULL);
7179 }
7180 /* Found it. */
7181 break;
7182 }
7183 ipfp = &ipf->ipf_hash_next;
7184 continue;
7185 }
7186
7187 /*
7188 * If we pruned the list, do we want to store this new
7189 * fragment?. We apply an optimization here based on the
7190 * fact that most fragments will be received in order.
7191 * So if the offset of this incoming fragment is zero,
7192 * it is the first fragment of a new packet. We will
7193 * keep it. Otherwise drop the fragment, as we have
7194 * probably pruned the packet already (since the
7195 * packet cannot be found).
7196 */
7197 if (pruned && offset != 0) {
7198 mutex_exit(&ipfb->ipfb_lock);
7199 freemsg(mp);
7200 return (NULL);
7201 }
7202
7203 if (ipfb->ipfb_frag_pkts >= MAX_FRAG_PKTS(ipst)) {
7204 /*
7205 * Too many fragmented packets in this hash
7206 * bucket. Free the oldest.
7207 */
7208 ill_frag_free_pkts(ill, ipfb, ipfb->ipfb_ipf, 1);
7209 }
7210
7211 /* New guy. Allocate a frag message. */
7212 mp1 = allocb(sizeof (*ipf), BPRI_MED);
7213 if (mp1 == NULL) {
7214 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7215 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7216 freemsg(mp);
7217 reass_done:
7218 mutex_exit(&ipfb->ipfb_lock);
7219 return (NULL);
7220 }
7221
7222 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmReqds);
7223 mp1->b_cont = mp;
7224
7225 /* Initialize the fragment header. */
7226 ipf = (ipf_t *)mp1->b_rptr;
7227 ipf->ipf_mp = mp1;
7228 ipf->ipf_ptphn = ipfp;
7229 ipfp[0] = ipf;
7230 ipf->ipf_hash_next = NULL;
7231 ipf->ipf_ident = ident;
7232 ipf->ipf_protocol = proto;
7233 ipf->ipf_src = src;
7234 ipf->ipf_dst = dst;
7235 ipf->ipf_nf_hdr_len = 0;
7236 /* Record reassembly start time. */
7237 ipf->ipf_timestamp = gethrestime_sec();
7238 /* Record ipf generation and account for frag header */
7239 ipf->ipf_gen = ill->ill_ipf_gen++;
7240 ipf->ipf_count = MBLKSIZE(mp1);
7241 ipf->ipf_last_frag_seen = B_FALSE;
7242 ipf->ipf_ecn = ecn_info;
7243 ipf->ipf_num_dups = 0;
7244 ipfb->ipfb_frag_pkts++;
7245 ipf->ipf_checksum = 0;
7246 ipf->ipf_checksum_flags = 0;
7247
7248 /* Store checksum value in fragment header */
7249 if (sum_flags != 0) {
7250 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7251 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7252 ipf->ipf_checksum = sum_val;
7253 ipf->ipf_checksum_flags = sum_flags;
7254 }
7255
7256 /*
7257 * We handle reassembly two ways. In the easy case,
7258 * where all the fragments show up in order, we do
7259 * minimal bookkeeping, and just clip new pieces on
7260 * the end. If we ever see a hole, then we go off
7261 * to ip_reassemble which has to mark the pieces and
7262 * keep track of the number of holes, etc. Obviously,
7263 * the point of having both mechanisms is so we can
7264 * handle the easy case as efficiently as possible.
7265 */
7266 if (offset == 0) {
7267 /* Easy case, in-order reassembly so far. */
7268 ipf->ipf_count += msg_len;
7269 ipf->ipf_tail_mp = tail_mp;
7270 /*
7271 * Keep track of next expected offset in
7272 * ipf_end.
7273 */
7274 ipf->ipf_end = end;
7275 ipf->ipf_nf_hdr_len = hdr_length;
7276 } else {
7277 /* Hard case, hole at the beginning. */
7278 ipf->ipf_tail_mp = NULL;
7279 /*
7280 * ipf_end == 0 means that we have given up
7281 * on easy reassembly.
7282 */
7283 ipf->ipf_end = 0;
7284
7285 /* Forget checksum offload from now on */
7286 ipf->ipf_checksum_flags = 0;
7287
7288 /*
7289 * ipf_hole_cnt is set by ip_reassemble.
7290 * ipf_count is updated by ip_reassemble.
7291 * No need to check for return value here
7292 * as we don't expect reassembly to complete
7293 * or fail for the first fragment itself.
7294 */
7295 (void) ip_reassemble(mp, ipf,
7296 (frag_offset_flags & IPH_OFFSET) << 3,
7297 (frag_offset_flags & IPH_MF), ill, msg_len);
7298 }
7299 /* Update per ipfb and ill byte counts */
7300 ipfb->ipfb_count += ipf->ipf_count;
7301 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7302 atomic_add_32(&ill->ill_frag_count, ipf->ipf_count);
7303 /* If the frag timer wasn't already going, start it. */
7304 mutex_enter(&ill->ill_lock);
7305 ill_frag_timer_start(ill);
7306 mutex_exit(&ill->ill_lock);
7307 goto reass_done;
7308 }
7309
7310 /*
7311 * If the packet's flag has changed (it could be coming up
7312 * from an interface different than the previous, therefore
7313 * possibly different checksum capability), then forget about
7314 * any stored checksum states. Otherwise add the value to
7315 * the existing one stored in the fragment header.
7316 */
7317 if (sum_flags != 0 && sum_flags == ipf->ipf_checksum_flags) {
7318 sum_val += ipf->ipf_checksum;
7319 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7320 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16);
7321 ipf->ipf_checksum = sum_val;
7322 } else if (ipf->ipf_checksum_flags != 0) {
7323 /* Forget checksum offload from now on */
7324 ipf->ipf_checksum_flags = 0;
7325 }
7326
7327 /*
7328 * We have a new piece of a datagram which is already being
7329 * reassembled. Update the ECN info if all IP fragments
7330 * are ECN capable. If there is one which is not, clear
7331 * all the info. If there is at least one which has CE
7332 * code point, IP needs to report that up to transport.
7333 */
7334 if (ecn_info != IPH_ECN_NECT && ipf->ipf_ecn != IPH_ECN_NECT) {
7335 if (ecn_info == IPH_ECN_CE)
7336 ipf->ipf_ecn = IPH_ECN_CE;
7337 } else {
7338 ipf->ipf_ecn = IPH_ECN_NECT;
7339 }
7340 if (offset && ipf->ipf_end == offset) {
7341 /* The new fragment fits at the end */
7342 ipf->ipf_tail_mp->b_cont = mp;
7343 /* Update the byte count */
7344 ipf->ipf_count += msg_len;
7345 /* Update per ipfb and ill byte counts */
7346 ipfb->ipfb_count += msg_len;
7347 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7348 atomic_add_32(&ill->ill_frag_count, msg_len);
7349 if (frag_offset_flags & IPH_MF) {
7350 /* More to come. */
7351 ipf->ipf_end = end;
7352 ipf->ipf_tail_mp = tail_mp;
7353 goto reass_done;
7354 }
7355 } else {
7356 /* Go do the hard cases. */
7357 int ret;
7358
7359 if (offset == 0)
7360 ipf->ipf_nf_hdr_len = hdr_length;
7361
7362 /* Save current byte count */
7363 count = ipf->ipf_count;
7364 ret = ip_reassemble(mp, ipf,
7365 (frag_offset_flags & IPH_OFFSET) << 3,
7366 (frag_offset_flags & IPH_MF), ill, msg_len);
7367 /* Count of bytes added and subtracted (freeb()ed) */
7368 count = ipf->ipf_count - count;
7369 if (count) {
7370 /* Update per ipfb and ill byte counts */
7371 ipfb->ipfb_count += count;
7372 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */
7373 atomic_add_32(&ill->ill_frag_count, count);
7374 }
7375 if (ret == IP_REASS_PARTIAL) {
7376 goto reass_done;
7377 } else if (ret == IP_REASS_FAILED) {
7378 /* Reassembly failed. Free up all resources */
7379 ill_frag_free_pkts(ill, ipfb, ipf, 1);
7380 for (t_mp = mp; t_mp != NULL; t_mp = t_mp->b_cont) {
7381 IP_REASS_SET_START(t_mp, 0);
7382 IP_REASS_SET_END(t_mp, 0);
7383 }
7384 freemsg(mp);
7385 goto reass_done;
7386 }
7387 /* We will reach here iff 'ret' is IP_REASS_COMPLETE */
7388 }
7389 /*
7390 * We have completed reassembly. Unhook the frag header from
7391 * the reassembly list.
7392 *
7393 * Before we free the frag header, record the ECN info
7394 * to report back to the transport.
7395 */
7396 ecn_info = ipf->ipf_ecn;
7397 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmOKs);
7398 ipfp = ipf->ipf_ptphn;
7399
7400 /* We need to supply these to caller */
7401 if ((sum_flags = ipf->ipf_checksum_flags) != 0)
7402 sum_val = ipf->ipf_checksum;
7403 else
7404 sum_val = 0;
7405
7406 mp1 = ipf->ipf_mp;
7407 count = ipf->ipf_count;
7408 ipf = ipf->ipf_hash_next;
7409 if (ipf != NULL)
7410 ipf->ipf_ptphn = ipfp;
7411 ipfp[0] = ipf;
7412 atomic_add_32(&ill->ill_frag_count, -count);
7413 ASSERT(ipfb->ipfb_count >= count);
7414 ipfb->ipfb_count -= count;
7415 ipfb->ipfb_frag_pkts--;
7416 mutex_exit(&ipfb->ipfb_lock);
7417 /* Ditch the frag header. */
7418 mp = mp1->b_cont;
7419
7420 freeb(mp1);
7421
7422 /* Restore original IP length in header. */
7423 packet_size = (uint32_t)msgdsize(mp);
7424 if (packet_size > IP_MAXPACKET) {
7425 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7426 ip_drop_input("Reassembled packet too large", mp, ill);
7427 freemsg(mp);
7428 return (NULL);
7429 }
7430
7431 if (DB_REF(mp) > 1) {
7432 mblk_t *mp2 = copymsg(mp);
7433
7434 if (mp2 == NULL) {
7435 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7436 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7437 freemsg(mp);
7438 return (NULL);
7439 }
7440 freemsg(mp);
7441 mp = mp2;
7442 }
7443 ipha = (ipha_t *)mp->b_rptr;
7444
7445 ipha->ipha_length = htons((uint16_t)packet_size);
7446 /* We're now complete, zip the frag state */
7447 ipha->ipha_fragment_offset_and_flags = 0;
7448 /* Record the ECN info. */
7449 ipha->ipha_type_of_service &= 0xFC;
7450 ipha->ipha_type_of_service |= ecn_info;
7451
7452 /* Update the receive attributes */
7453 ira->ira_pktlen = packet_size;
7454 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
7455
7456 /* Reassembly is successful; set checksum information in packet */
7457 DB_CKSUM16(mp) = (uint16_t)sum_val;
7458 DB_CKSUMFLAGS(mp) = sum_flags;
7459 DB_CKSUMSTART(mp) = ira->ira_ip_hdr_length;
7460
7461 return (mp);
7462 }
7463
7464 /*
7465 * Pullup function that should be used for IP input in order to
7466 * ensure we do not loose the L2 source address; we need the l2 source
7467 * address for IP_RECVSLLA and for ndp_input.
7468 *
7469 * We return either NULL or b_rptr.
7470 */
7471 void *
7472 ip_pullup(mblk_t *mp, ssize_t len, ip_recv_attr_t *ira)
7473 {
7474 ill_t *ill = ira->ira_ill;
7475
7476 if (ip_rput_pullups++ == 0) {
7477 (void) mi_strlog(ill->ill_rq, 1, SL_ERROR|SL_TRACE,
7478 "ip_pullup: %s forced us to "
7479 " pullup pkt, hdr len %ld, hdr addr %p",
7480 ill->ill_name, len, (void *)mp->b_rptr);
7481 }
7482 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7483 ip_setl2src(mp, ira, ira->ira_rill);
7484 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7485 if (!pullupmsg(mp, len))
7486 return (NULL);
7487 else
7488 return (mp->b_rptr);
7489 }
7490
7491 /*
7492 * Make sure ira_l2src has an address. If we don't have one fill with zeros.
7493 * When called from the ULP ira_rill will be NULL hence the caller has to
7494 * pass in the ill.
7495 */
7496 /* ARGSUSED */
7497 void
7498 ip_setl2src(mblk_t *mp, ip_recv_attr_t *ira, ill_t *ill)
7499 {
7500 const uchar_t *addr;
7501 int alen;
7502
7503 if (ira->ira_flags & IRAF_L2SRC_SET)
7504 return;
7505
7506 ASSERT(ill != NULL);
7507 alen = ill->ill_phys_addr_length;
7508 ASSERT(alen <= sizeof (ira->ira_l2src));
7509 if (ira->ira_mhip != NULL &&
7510 (addr = ira->ira_mhip->mhi_saddr) != NULL) {
7511 bcopy(addr, ira->ira_l2src, alen);
7512 } else if ((ira->ira_flags & IRAF_L2SRC_LOOPBACK) &&
7513 (addr = ill->ill_phys_addr) != NULL) {
7514 bcopy(addr, ira->ira_l2src, alen);
7515 } else {
7516 bzero(ira->ira_l2src, alen);
7517 }
7518 ira->ira_flags |= IRAF_L2SRC_SET;
7519 }
7520
7521 /*
7522 * check ip header length and align it.
7523 */
7524 mblk_t *
7525 ip_check_and_align_header(mblk_t *mp, uint_t min_size, ip_recv_attr_t *ira)
7526 {
7527 ill_t *ill = ira->ira_ill;
7528 ssize_t len;
7529
7530 len = MBLKL(mp);
7531
7532 if (!OK_32PTR(mp->b_rptr))
7533 IP_STAT(ill->ill_ipst, ip_notaligned);
7534 else
7535 IP_STAT(ill->ill_ipst, ip_recv_pullup);
7536
7537 /* Guard against bogus device drivers */
7538 if (len < 0) {
7539 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7540 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7541 freemsg(mp);
7542 return (NULL);
7543 }
7544
7545 if (len == 0) {
7546 /* GLD sometimes sends up mblk with b_rptr == b_wptr! */
7547 mblk_t *mp1 = mp->b_cont;
7548
7549 if (!(ira->ira_flags & IRAF_L2SRC_SET))
7550 ip_setl2src(mp, ira, ira->ira_rill);
7551 ASSERT(ira->ira_flags & IRAF_L2SRC_SET);
7552
7553 freeb(mp);
7554 mp = mp1;
7555 if (mp == NULL)
7556 return (NULL);
7557
7558 if (OK_32PTR(mp->b_rptr) && MBLKL(mp) >= min_size)
7559 return (mp);
7560 }
7561 if (ip_pullup(mp, min_size, ira) == NULL) {
7562 if (msgdsize(mp) < min_size) {
7563 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7564 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7565 } else {
7566 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7567 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7568 }
7569 freemsg(mp);
7570 return (NULL);
7571 }
7572 return (mp);
7573 }
7574
7575 /*
7576 * Common code for IPv4 and IPv6 to check and pullup multi-mblks
7577 */
7578 mblk_t *
7579 ip_check_length(mblk_t *mp, uchar_t *rptr, ssize_t len, uint_t pkt_len,
7580 uint_t min_size, ip_recv_attr_t *ira)
7581 {
7582 ill_t *ill = ira->ira_ill;
7583
7584 /*
7585 * Make sure we have data length consistent
7586 * with the IP header.
7587 */
7588 if (mp->b_cont == NULL) {
7589 /* pkt_len is based on ipha_len, not the mblk length */
7590 if (pkt_len < min_size) {
7591 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7592 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7593 freemsg(mp);
7594 return (NULL);
7595 }
7596 if (len < 0) {
7597 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7598 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7599 freemsg(mp);
7600 return (NULL);
7601 }
7602 /* Drop any pad */
7603 mp->b_wptr = rptr + pkt_len;
7604 } else if ((len += msgdsize(mp->b_cont)) != 0) {
7605 ASSERT(pkt_len >= min_size);
7606 if (pkt_len < min_size) {
7607 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7608 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7609 freemsg(mp);
7610 return (NULL);
7611 }
7612 if (len < 0) {
7613 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts);
7614 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill);
7615 freemsg(mp);
7616 return (NULL);
7617 }
7618 /* Drop any pad */
7619 (void) adjmsg(mp, -len);
7620 /*
7621 * adjmsg may have freed an mblk from the chain, hence
7622 * invalidate any hw checksum here. This will force IP to
7623 * calculate the checksum in sw, but only for this packet.
7624 */
7625 DB_CKSUMFLAGS(mp) = 0;
7626 IP_STAT(ill->ill_ipst, ip_multimblk);
7627 }
7628 return (mp);
7629 }
7630
7631 /*
7632 * Check that the IPv4 opt_len is consistent with the packet and pullup
7633 * the options.
7634 */
7635 mblk_t *
7636 ip_check_optlen(mblk_t *mp, ipha_t *ipha, uint_t opt_len, uint_t pkt_len,
7637 ip_recv_attr_t *ira)
7638 {
7639 ill_t *ill = ira->ira_ill;
7640 ssize_t len;
7641
7642 /* Assume no IPv6 packets arrive over the IPv4 queue */
7643 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) {
7644 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7645 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInWrongIPVersion);
7646 ip_drop_input("IPvN packet on IPv4 ill", mp, ill);
7647 freemsg(mp);
7648 return (NULL);
7649 }
7650
7651 if (opt_len > (15 - IP_SIMPLE_HDR_LENGTH_IN_WORDS)) {
7652 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7653 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7654 freemsg(mp);
7655 return (NULL);
7656 }
7657 /*
7658 * Recompute complete header length and make sure we
7659 * have access to all of it.
7660 */
7661 len = ((size_t)opt_len + IP_SIMPLE_HDR_LENGTH_IN_WORDS) << 2;
7662 if (len > (mp->b_wptr - mp->b_rptr)) {
7663 if (len > pkt_len) {
7664 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors);
7665 ip_drop_input("ipIfStatsInHdrErrors", mp, ill);
7666 freemsg(mp);
7667 return (NULL);
7668 }
7669 if (ip_pullup(mp, len, ira) == NULL) {
7670 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
7671 ip_drop_input("ipIfStatsInDiscards", mp, ill);
7672 freemsg(mp);
7673 return (NULL);
7674 }
7675 }
7676 return (mp);
7677 }
7678
7679 /*
7680 * Returns a new ire, or the same ire, or NULL.
7681 * If a different IRE is returned, then it is held; the caller
7682 * needs to release it.
7683 * In no case is there any hold/release on the ire argument.
7684 */
7685 ire_t *
7686 ip_check_multihome(void *addr, ire_t *ire, ill_t *ill)
7687 {
7688 ire_t *new_ire;
7689 ill_t *ire_ill;
7690 uint_t ifindex;
7691 ip_stack_t *ipst = ill->ill_ipst;
7692 boolean_t strict_check = B_FALSE;
7693
7694 /*
7695 * IPMP common case: if IRE and ILL are in the same group, there's no
7696 * issue (e.g. packet received on an underlying interface matched an
7697 * IRE_LOCAL on its associated group interface).
7698 */
7699 ASSERT(ire->ire_ill != NULL);
7700 if (IS_IN_SAME_ILLGRP(ill, ire->ire_ill))
7701 return (ire);
7702
7703 /*
7704 * Do another ire lookup here, using the ingress ill, to see if the
7705 * interface is in a usesrc group.
7706 * As long as the ills belong to the same group, we don't consider
7707 * them to be arriving on the wrong interface. Thus, if the switch
7708 * is doing inbound load spreading, we won't drop packets when the
7709 * ip*_strict_dst_multihoming switch is on.
7710 * We also need to check for IPIF_UNNUMBERED point2point interfaces
7711 * where the local address may not be unique. In this case we were
7712 * at the mercy of the initial ire lookup and the IRE_LOCAL it
7713 * actually returned. The new lookup, which is more specific, should
7714 * only find the IRE_LOCAL associated with the ingress ill if one
7715 * exists.
7716 */
7717 if (ire->ire_ipversion == IPV4_VERSION) {
7718 if (ipst->ips_ip_strict_dst_multihoming)
7719 strict_check = B_TRUE;
7720 new_ire = ire_ftable_lookup_v4(*((ipaddr_t *)addr), 0, 0,
7721 IRE_LOCAL, ill, ALL_ZONES, NULL,
7722 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7723 } else {
7724 ASSERT(!IN6_IS_ADDR_MULTICAST((in6_addr_t *)addr));
7725 if (ipst->ips_ipv6_strict_dst_multihoming)
7726 strict_check = B_TRUE;
7727 new_ire = ire_ftable_lookup_v6((in6_addr_t *)addr, NULL, NULL,
7728 IRE_LOCAL, ill, ALL_ZONES, NULL,
7729 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL);
7730 }
7731 /*
7732 * If the same ire that was returned in ip_input() is found then this
7733 * is an indication that usesrc groups are in use. The packet
7734 * arrived on a different ill in the group than the one associated with
7735 * the destination address. If a different ire was found then the same
7736 * IP address must be hosted on multiple ills. This is possible with
7737 * unnumbered point2point interfaces. We switch to use this new ire in
7738 * order to have accurate interface statistics.
7739 */
7740 if (new_ire != NULL) {
7741 /* Note: held in one case but not the other? Caller handles */
7742 if (new_ire != ire)
7743 return (new_ire);
7744 /* Unchanged */
7745 ire_refrele(new_ire);
7746 return (ire);
7747 }
7748
7749 /*
7750 * Chase pointers once and store locally.
7751 */
7752 ASSERT(ire->ire_ill != NULL);
7753 ire_ill = ire->ire_ill;
7754 ifindex = ill->ill_usesrc_ifindex;
7755
7756 /*
7757 * Check if it's a legal address on the 'usesrc' interface.
7758 * For IPMP data addresses the IRE_LOCAL is the upper, hence we
7759 * can just check phyint_ifindex.
7760 */
7761 if (ifindex != 0 && ifindex == ire_ill->ill_phyint->phyint_ifindex) {
7762 return (ire);
7763 }
7764
7765 /*
7766 * If the ip*_strict_dst_multihoming switch is on then we can
7767 * only accept this packet if the interface is marked as routing.
7768 */
7769 if (!(strict_check))
7770 return (ire);
7771
7772 if ((ill->ill_flags & ire->ire_ill->ill_flags & ILLF_ROUTER) != 0) {
7773 return (ire);
7774 }
7775 return (NULL);
7776 }
7777
7778 /*
7779 * This function is used to construct a mac_header_info_s from a
7780 * DL_UNITDATA_IND message.
7781 * The address fields in the mhi structure points into the message,
7782 * thus the caller can't use those fields after freeing the message.
7783 *
7784 * We determine whether the packet received is a non-unicast packet
7785 * and in doing so, determine whether or not it is broadcast vs multicast.
7786 * For it to be a broadcast packet, we must have the appropriate mblk_t
7787 * hanging off the ill_t. If this is either not present or doesn't match
7788 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7789 * to be multicast. Thus NICs that have no broadcast address (or no
7790 * capability for one, such as point to point links) cannot return as
7791 * the packet being broadcast.
7792 */
7793 void
7794 ip_dlur_to_mhi(ill_t *ill, mblk_t *mb, struct mac_header_info_s *mhip)
7795 {
7796 dl_unitdata_ind_t *ind = (dl_unitdata_ind_t *)mb->b_rptr;
7797 mblk_t *bmp;
7798 uint_t extra_offset;
7799
7800 bzero(mhip, sizeof (struct mac_header_info_s));
7801
7802 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7803
7804 if (ill->ill_sap_length < 0)
7805 extra_offset = 0;
7806 else
7807 extra_offset = ill->ill_sap_length;
7808
7809 mhip->mhi_daddr = (uchar_t *)ind + ind->dl_dest_addr_offset +
7810 extra_offset;
7811 mhip->mhi_saddr = (uchar_t *)ind + ind->dl_src_addr_offset +
7812 extra_offset;
7813
7814 if (!ind->dl_group_address)
7815 return;
7816
7817 /* Multicast or broadcast */
7818 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7819
7820 if (ind->dl_dest_addr_offset > sizeof (*ind) &&
7821 ind->dl_dest_addr_offset + ind->dl_dest_addr_length < MBLKL(mb) &&
7822 (bmp = ill->ill_bcast_mp) != NULL) {
7823 dl_unitdata_req_t *dlur;
7824 uint8_t *bphys_addr;
7825
7826 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7827 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset +
7828 extra_offset;
7829
7830 if (bcmp(mhip->mhi_daddr, bphys_addr,
7831 ind->dl_dest_addr_length) == 0)
7832 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7833 }
7834 }
7835
7836 /*
7837 * This function is used to construct a mac_header_info_s from a
7838 * M_DATA fastpath message from a DLPI driver.
7839 * The address fields in the mhi structure points into the message,
7840 * thus the caller can't use those fields after freeing the message.
7841 *
7842 * We determine whether the packet received is a non-unicast packet
7843 * and in doing so, determine whether or not it is broadcast vs multicast.
7844 * For it to be a broadcast packet, we must have the appropriate mblk_t
7845 * hanging off the ill_t. If this is either not present or doesn't match
7846 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed
7847 * to be multicast. Thus NICs that have no broadcast address (or no
7848 * capability for one, such as point to point links) cannot return as
7849 * the packet being broadcast.
7850 */
7851 void
7852 ip_mdata_to_mhi(ill_t *ill, mblk_t *mp, struct mac_header_info_s *mhip)
7853 {
7854 mblk_t *bmp;
7855 struct ether_header *pether;
7856
7857 bzero(mhip, sizeof (struct mac_header_info_s));
7858
7859 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST;
7860
7861 pether = (struct ether_header *)((char *)mp->b_rptr
7862 - sizeof (struct ether_header));
7863
7864 /*
7865 * Make sure the interface is an ethernet type, since we don't
7866 * know the header format for anything but Ethernet. Also make
7867 * sure we are pointing correctly above db_base.
7868 */
7869 if (ill->ill_type != IFT_ETHER)
7870 return;
7871
7872 retry:
7873 if ((uchar_t *)pether < mp->b_datap->db_base)
7874 return;
7875
7876 /* Is there a VLAN tag? */
7877 if (ill->ill_isv6) {
7878 if (pether->ether_type != htons(ETHERTYPE_IPV6)) {
7879 pether = (struct ether_header *)((char *)pether - 4);
7880 goto retry;
7881 }
7882 } else {
7883 if (pether->ether_type != htons(ETHERTYPE_IP)) {
7884 pether = (struct ether_header *)((char *)pether - 4);
7885 goto retry;
7886 }
7887 }
7888 mhip->mhi_daddr = (uchar_t *)&pether->ether_dhost;
7889 mhip->mhi_saddr = (uchar_t *)&pether->ether_shost;
7890
7891 if (!(mhip->mhi_daddr[0] & 0x01))
7892 return;
7893
7894 /* Multicast or broadcast */
7895 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST;
7896
7897 if ((bmp = ill->ill_bcast_mp) != NULL) {
7898 dl_unitdata_req_t *dlur;
7899 uint8_t *bphys_addr;
7900 uint_t addrlen;
7901
7902 dlur = (dl_unitdata_req_t *)bmp->b_rptr;
7903 addrlen = dlur->dl_dest_addr_length;
7904 if (ill->ill_sap_length < 0) {
7905 bphys_addr = (uchar_t *)dlur +
7906 dlur->dl_dest_addr_offset;
7907 addrlen += ill->ill_sap_length;
7908 } else {
7909 bphys_addr = (uchar_t *)dlur +
7910 dlur->dl_dest_addr_offset +
7911 ill->ill_sap_length;
7912 addrlen -= ill->ill_sap_length;
7913 }
7914 if (bcmp(mhip->mhi_daddr, bphys_addr, addrlen) == 0)
7915 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST;
7916 }
7917 }
7918
7919 /*
7920 * Handle anything but M_DATA messages
7921 * We see the DL_UNITDATA_IND which are part
7922 * of the data path, and also the other messages from the driver.
7923 */
7924 void
7925 ip_rput_notdata(ill_t *ill, mblk_t *mp)
7926 {
7927 mblk_t *first_mp;
7928 struct iocblk *iocp;
7929 struct mac_header_info_s mhi;
7930
7931 switch (DB_TYPE(mp)) {
7932 case M_PROTO:
7933 case M_PCPROTO: {
7934 if (((dl_unitdata_ind_t *)mp->b_rptr)->dl_primitive !=
7935 DL_UNITDATA_IND) {
7936 /* Go handle anything other than data elsewhere. */
7937 ip_rput_dlpi(ill, mp);
7938 return;
7939 }
7940
7941 first_mp = mp;
7942 mp = first_mp->b_cont;
7943 first_mp->b_cont = NULL;
7944
7945 if (mp == NULL) {
7946 freeb(first_mp);
7947 return;
7948 }
7949 ip_dlur_to_mhi(ill, first_mp, &mhi);
7950 if (ill->ill_isv6)
7951 ip_input_v6(ill, NULL, mp, &mhi);
7952 else
7953 ip_input(ill, NULL, mp, &mhi);
7954
7955 /* Ditch the DLPI header. */
7956 freeb(first_mp);
7957 return;
7958 }
7959 case M_IOCACK:
7960 iocp = (struct iocblk *)mp->b_rptr;
7961 switch (iocp->ioc_cmd) {
7962 case DL_IOC_HDR_INFO:
7963 ill_fastpath_ack(ill, mp);
7964 return;
7965 default:
7966 putnext(ill->ill_rq, mp);
7967 return;
7968 }
7969 /* FALLTHRU */
7970 case M_ERROR:
7971 case M_HANGUP:
7972 mutex_enter(&ill->ill_lock);
7973 if (ill->ill_state_flags & ILL_CONDEMNED) {
7974 mutex_exit(&ill->ill_lock);
7975 freemsg(mp);
7976 return;
7977 }
7978 ill_refhold_locked(ill);
7979 mutex_exit(&ill->ill_lock);
7980 qwriter_ip(ill, ill->ill_rq, mp, ip_rput_other, CUR_OP,
7981 B_FALSE);
7982 return;
7983 case M_CTL:
7984 putnext(ill->ill_rq, mp);
7985 return;
7986 case M_IOCNAK:
7987 ip1dbg(("got iocnak "));
7988 iocp = (struct iocblk *)mp->b_rptr;
7989 switch (iocp->ioc_cmd) {
7990 case DL_IOC_HDR_INFO:
7991 ip_rput_other(NULL, ill->ill_rq, mp, NULL);
7992 return;
7993 default:
7994 break;
7995 }
7996 /* FALLTHRU */
7997 default:
7998 putnext(ill->ill_rq, mp);
7999 return;
8000 }
8001 }
8002
8003 /* Read side put procedure. Packets coming from the wire arrive here. */
8004 void
8005 ip_rput(queue_t *q, mblk_t *mp)
8006 {
8007 ill_t *ill;
8008 union DL_primitives *dl;
8009
8010 ill = (ill_t *)q->q_ptr;
8011
8012 if (ill->ill_state_flags & (ILL_CONDEMNED | ILL_LL_SUBNET_PENDING)) {
8013 /*
8014 * If things are opening or closing, only accept high-priority
8015 * DLPI messages. (On open ill->ill_ipif has not yet been
8016 * created; on close, things hanging off the ill may have been
8017 * freed already.)
8018 */
8019 dl = (union DL_primitives *)mp->b_rptr;
8020 if (DB_TYPE(mp) != M_PCPROTO ||
8021 dl->dl_primitive == DL_UNITDATA_IND) {
8022 inet_freemsg(mp);
8023 return;
8024 }
8025 }
8026 if (DB_TYPE(mp) == M_DATA) {
8027 struct mac_header_info_s mhi;
8028
8029 ip_mdata_to_mhi(ill, mp, &mhi);
8030 ip_input(ill, NULL, mp, &mhi);
8031 } else {
8032 ip_rput_notdata(ill, mp);
8033 }
8034 }
8035
8036 /*
8037 * Move the information to a copy.
8038 */
8039 mblk_t *
8040 ip_fix_dbref(mblk_t *mp, ip_recv_attr_t *ira)
8041 {
8042 mblk_t *mp1;
8043 ill_t *ill = ira->ira_ill;
8044 ip_stack_t *ipst = ill->ill_ipst;
8045
8046 IP_STAT(ipst, ip_db_ref);
8047
8048 /* Make sure we have ira_l2src before we loose the original mblk */
8049 if (!(ira->ira_flags & IRAF_L2SRC_SET))
8050 ip_setl2src(mp, ira, ira->ira_rill);
8051
8052 mp1 = copymsg(mp);
8053 if (mp1 == NULL) {
8054 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
8055 ip_drop_input("ipIfStatsInDiscards", mp, ill);
8056 freemsg(mp);
8057 return (NULL);
8058 }
8059 /* preserve the hardware checksum flags and data, if present */
8060 if (DB_CKSUMFLAGS(mp) != 0) {
8061 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
8062 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
8063 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
8064 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
8065 DB_CKSUM16(mp1) = DB_CKSUM16(mp);
8066 }
8067 freemsg(mp);
8068 return (mp1);
8069 }
8070
8071 static void
8072 ip_dlpi_error(ill_t *ill, t_uscalar_t prim, t_uscalar_t dl_err,
8073 t_uscalar_t err)
8074 {
8075 if (dl_err == DL_SYSERR) {
8076 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8077 "%s: %s failed: DL_SYSERR (errno %u)\n",
8078 ill->ill_name, dl_primstr(prim), err);
8079 return;
8080 }
8081
8082 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE,
8083 "%s: %s failed: %s\n", ill->ill_name, dl_primstr(prim),
8084 dl_errstr(dl_err));
8085 }
8086
8087 /*
8088 * ip_rput_dlpi is called by ip_rput to handle all DLPI messages other
8089 * than DL_UNITDATA_IND messages. If we need to process this message
8090 * exclusively, we call qwriter_ip, in which case we also need to call
8091 * ill_refhold before that, since qwriter_ip does an ill_refrele.
8092 */
8093 void
8094 ip_rput_dlpi(ill_t *ill, mblk_t *mp)
8095 {
8096 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8097 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8098 queue_t *q = ill->ill_rq;
8099 t_uscalar_t prim = dloa->dl_primitive;
8100 t_uscalar_t reqprim = DL_PRIM_INVAL;
8101
8102 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi",
8103 char *, dl_primstr(prim), ill_t *, ill);
8104 ip1dbg(("ip_rput_dlpi"));
8105
8106 /*
8107 * If we received an ACK but didn't send a request for it, then it
8108 * can't be part of any pending operation; discard up-front.
8109 */
8110 switch (prim) {
8111 case DL_ERROR_ACK:
8112 reqprim = dlea->dl_error_primitive;
8113 ip2dbg(("ip_rput_dlpi(%s): DL_ERROR_ACK for %s (0x%x): %s "
8114 "(0x%x), unix %u\n", ill->ill_name, dl_primstr(reqprim),
8115 reqprim, dl_errstr(dlea->dl_errno), dlea->dl_errno,
8116 dlea->dl_unix_errno));
8117 break;
8118 case DL_OK_ACK:
8119 reqprim = dloa->dl_correct_primitive;
8120 break;
8121 case DL_INFO_ACK:
8122 reqprim = DL_INFO_REQ;
8123 break;
8124 case DL_BIND_ACK:
8125 reqprim = DL_BIND_REQ;
8126 break;
8127 case DL_PHYS_ADDR_ACK:
8128 reqprim = DL_PHYS_ADDR_REQ;
8129 break;
8130 case DL_NOTIFY_ACK:
8131 reqprim = DL_NOTIFY_REQ;
8132 break;
8133 case DL_CAPABILITY_ACK:
8134 reqprim = DL_CAPABILITY_REQ;
8135 break;
8136 }
8137
8138 if (prim != DL_NOTIFY_IND) {
8139 if (reqprim == DL_PRIM_INVAL ||
8140 !ill_dlpi_pending(ill, reqprim)) {
8141 /* Not a DLPI message we support or expected */
8142 freemsg(mp);
8143 return;
8144 }
8145 ip1dbg(("ip_rput: received %s for %s\n", dl_primstr(prim),
8146 dl_primstr(reqprim)));
8147 }
8148
8149 switch (reqprim) {
8150 case DL_UNBIND_REQ:
8151 /*
8152 * NOTE: we mark the unbind as complete even if we got a
8153 * DL_ERROR_ACK, since there's not much else we can do.
8154 */
8155 mutex_enter(&ill->ill_lock);
8156 ill->ill_state_flags &= ~ILL_DL_UNBIND_IN_PROGRESS;
8157 cv_signal(&ill->ill_cv);
8158 mutex_exit(&ill->ill_lock);
8159 break;
8160
8161 case DL_ENABMULTI_REQ:
8162 if (prim == DL_OK_ACK) {
8163 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8164 ill->ill_dlpi_multicast_state = IDS_OK;
8165 }
8166 break;
8167 }
8168
8169 /*
8170 * The message is one we're waiting for (or DL_NOTIFY_IND), but we
8171 * need to become writer to continue to process it. Because an
8172 * exclusive operation doesn't complete until replies to all queued
8173 * DLPI messages have been received, we know we're in the middle of an
8174 * exclusive operation and pass CUR_OP (except for DL_NOTIFY_IND).
8175 *
8176 * As required by qwriter_ip(), we refhold the ill; it will refrele.
8177 * Since this is on the ill stream we unconditionally bump up the
8178 * refcount without doing ILL_CAN_LOOKUP().
8179 */
8180 ill_refhold(ill);
8181 if (prim == DL_NOTIFY_IND)
8182 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, NEW_OP, B_FALSE);
8183 else
8184 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, CUR_OP, B_FALSE);
8185 }
8186
8187 /*
8188 * Handling of DLPI messages that require exclusive access to the ipsq.
8189 *
8190 * Need to do ipsq_pending_mp_get on ioctl completion, which could
8191 * happen here. (along with mi_copy_done)
8192 */
8193 /* ARGSUSED */
8194 static void
8195 ip_rput_dlpi_writer(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8196 {
8197 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr;
8198 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa;
8199 int err = 0;
8200 ill_t *ill = (ill_t *)q->q_ptr;
8201 ipif_t *ipif = NULL;
8202 mblk_t *mp1 = NULL;
8203 conn_t *connp = NULL;
8204 t_uscalar_t paddrreq;
8205 mblk_t *mp_hw;
8206 boolean_t success;
8207 boolean_t ioctl_aborted = B_FALSE;
8208 boolean_t log = B_TRUE;
8209
8210 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer",
8211 char *, dl_primstr(dloa->dl_primitive), ill_t *, ill);
8212
8213 ip1dbg(("ip_rput_dlpi_writer .."));
8214 ASSERT(ipsq->ipsq_xop == ill->ill_phyint->phyint_ipsq->ipsq_xop);
8215 ASSERT(IAM_WRITER_ILL(ill));
8216
8217 ipif = ipsq->ipsq_xop->ipx_pending_ipif;
8218 /*
8219 * The current ioctl could have been aborted by the user and a new
8220 * ioctl to bring up another ill could have started. We could still
8221 * get a response from the driver later.
8222 */
8223 if (ipif != NULL && ipif->ipif_ill != ill)
8224 ioctl_aborted = B_TRUE;
8225
8226 switch (dloa->dl_primitive) {
8227 case DL_ERROR_ACK:
8228 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for %s\n",
8229 dl_primstr(dlea->dl_error_primitive)));
8230
8231 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer error",
8232 char *, dl_primstr(dlea->dl_error_primitive),
8233 ill_t *, ill);
8234
8235 switch (dlea->dl_error_primitive) {
8236 case DL_DISABMULTI_REQ:
8237 ill_dlpi_done(ill, dlea->dl_error_primitive);
8238 break;
8239 case DL_PROMISCON_REQ:
8240 case DL_PROMISCOFF_REQ:
8241 case DL_UNBIND_REQ:
8242 case DL_ATTACH_REQ:
8243 case DL_INFO_REQ:
8244 ill_dlpi_done(ill, dlea->dl_error_primitive);
8245 break;
8246 case DL_NOTIFY_REQ:
8247 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8248 log = B_FALSE;
8249 break;
8250 case DL_PHYS_ADDR_REQ:
8251 /*
8252 * For IPv6 only, there are two additional
8253 * phys_addr_req's sent to the driver to get the
8254 * IPv6 token and lla. This allows IP to acquire
8255 * the hardware address format for a given interface
8256 * without having built in knowledge of the hardware
8257 * address. ill_phys_addr_pend keeps track of the last
8258 * DL_PAR sent so we know which response we are
8259 * dealing with. ill_dlpi_done will update
8260 * ill_phys_addr_pend when it sends the next req.
8261 * We don't complete the IOCTL until all three DL_PARs
8262 * have been attempted, so set *_len to 0 and break.
8263 */
8264 paddrreq = ill->ill_phys_addr_pend;
8265 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8266 if (paddrreq == DL_IPV6_TOKEN) {
8267 ill->ill_token_length = 0;
8268 log = B_FALSE;
8269 break;
8270 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8271 ill->ill_nd_lla_len = 0;
8272 log = B_FALSE;
8273 break;
8274 }
8275 /*
8276 * Something went wrong with the DL_PHYS_ADDR_REQ.
8277 * We presumably have an IOCTL hanging out waiting
8278 * for completion. Find it and complete the IOCTL
8279 * with the error noted.
8280 * However, ill_dl_phys was called on an ill queue
8281 * (from SIOCSLIFNAME), thus conn_pending_ill is not
8282 * set. But the ioctl is known to be pending on ill_wq.
8283 */
8284 if (!ill->ill_ifname_pending)
8285 break;
8286 ill->ill_ifname_pending = 0;
8287 if (!ioctl_aborted)
8288 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8289 if (mp1 != NULL) {
8290 /*
8291 * This operation (SIOCSLIFNAME) must have
8292 * happened on the ill. Assert there is no conn
8293 */
8294 ASSERT(connp == NULL);
8295 q = ill->ill_wq;
8296 }
8297 break;
8298 case DL_BIND_REQ:
8299 ill_dlpi_done(ill, DL_BIND_REQ);
8300 if (ill->ill_ifname_pending)
8301 break;
8302 mutex_enter(&ill->ill_lock);
8303 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8304 mutex_exit(&ill->ill_lock);
8305 /*
8306 * Something went wrong with the bind. We presumably
8307 * have an IOCTL hanging out waiting for completion.
8308 * Find it, take down the interface that was coming
8309 * up, and complete the IOCTL with the error noted.
8310 */
8311 if (!ioctl_aborted)
8312 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8313 if (mp1 != NULL) {
8314 /*
8315 * This might be a result of a DL_NOTE_REPLUMB
8316 * notification. In that case, connp is NULL.
8317 */
8318 if (connp != NULL)
8319 q = CONNP_TO_WQ(connp);
8320
8321 (void) ipif_down(ipif, NULL, NULL);
8322 /* error is set below the switch */
8323 }
8324 break;
8325 case DL_ENABMULTI_REQ:
8326 ill_dlpi_done(ill, DL_ENABMULTI_REQ);
8327
8328 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS)
8329 ill->ill_dlpi_multicast_state = IDS_FAILED;
8330 if (ill->ill_dlpi_multicast_state == IDS_FAILED) {
8331
8332 printf("ip: joining multicasts failed (%d)"
8333 " on %s - will use link layer "
8334 "broadcasts for multicast\n",
8335 dlea->dl_errno, ill->ill_name);
8336
8337 /*
8338 * Set up for multi_bcast; We are the
8339 * writer, so ok to access ill->ill_ipif
8340 * without any lock.
8341 */
8342 mutex_enter(&ill->ill_phyint->phyint_lock);
8343 ill->ill_phyint->phyint_flags |=
8344 PHYI_MULTI_BCAST;
8345 mutex_exit(&ill->ill_phyint->phyint_lock);
8346
8347 }
8348 freemsg(mp); /* Don't want to pass this up */
8349 return;
8350 case DL_CAPABILITY_REQ:
8351 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for "
8352 "DL_CAPABILITY REQ\n"));
8353 if (ill->ill_dlpi_capab_state == IDCS_PROBE_SENT)
8354 ill->ill_dlpi_capab_state = IDCS_FAILED;
8355 ill_capability_done(ill);
8356 freemsg(mp);
8357 return;
8358 }
8359 /*
8360 * Note the error for IOCTL completion (mp1 is set when
8361 * ready to complete ioctl). If ill_ifname_pending_err is
8362 * set, an error occured during plumbing (ill_ifname_pending),
8363 * so we want to report that error.
8364 *
8365 * NOTE: there are two addtional DL_PHYS_ADDR_REQ's
8366 * (DL_IPV6_TOKEN and DL_IPV6_LINK_LAYER_ADDR) that are
8367 * expected to get errack'd if the driver doesn't support
8368 * these flags (e.g. ethernet). log will be set to B_FALSE
8369 * if these error conditions are encountered.
8370 */
8371 if (mp1 != NULL) {
8372 if (ill->ill_ifname_pending_err != 0) {
8373 err = ill->ill_ifname_pending_err;
8374 ill->ill_ifname_pending_err = 0;
8375 } else {
8376 err = dlea->dl_unix_errno ?
8377 dlea->dl_unix_errno : ENXIO;
8378 }
8379 /*
8380 * If we're plumbing an interface and an error hasn't already
8381 * been saved, set ill_ifname_pending_err to the error passed
8382 * up. Ignore the error if log is B_FALSE (see comment above).
8383 */
8384 } else if (log && ill->ill_ifname_pending &&
8385 ill->ill_ifname_pending_err == 0) {
8386 ill->ill_ifname_pending_err = dlea->dl_unix_errno ?
8387 dlea->dl_unix_errno : ENXIO;
8388 }
8389
8390 if (log)
8391 ip_dlpi_error(ill, dlea->dl_error_primitive,
8392 dlea->dl_errno, dlea->dl_unix_errno);
8393 break;
8394 case DL_CAPABILITY_ACK:
8395 ill_capability_ack(ill, mp);
8396 /*
8397 * The message has been handed off to ill_capability_ack
8398 * and must not be freed below
8399 */
8400 mp = NULL;
8401 break;
8402
8403 case DL_INFO_ACK:
8404 /* Call a routine to handle this one. */
8405 ill_dlpi_done(ill, DL_INFO_REQ);
8406 ip_ll_subnet_defaults(ill, mp);
8407 ASSERT(!MUTEX_HELD(&ill->ill_phyint->phyint_ipsq->ipsq_lock));
8408 return;
8409 case DL_BIND_ACK:
8410 /*
8411 * We should have an IOCTL waiting on this unless
8412 * sent by ill_dl_phys, in which case just return
8413 */
8414 ill_dlpi_done(ill, DL_BIND_REQ);
8415
8416 if (ill->ill_ifname_pending) {
8417 DTRACE_PROBE2(ip__rput__dlpi__ifname__pending,
8418 ill_t *, ill, mblk_t *, mp);
8419 break;
8420 }
8421 mutex_enter(&ill->ill_lock);
8422 ill->ill_dl_up = 1;
8423 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS;
8424 mutex_exit(&ill->ill_lock);
8425
8426 if (!ioctl_aborted)
8427 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8428 if (mp1 == NULL) {
8429 DTRACE_PROBE1(ip__rput__dlpi__no__mblk, ill_t *, ill);
8430 break;
8431 }
8432 /*
8433 * mp1 was added by ill_dl_up(). if that is a result of
8434 * a DL_NOTE_REPLUMB notification, connp could be NULL.
8435 */
8436 if (connp != NULL)
8437 q = CONNP_TO_WQ(connp);
8438 /*
8439 * We are exclusive. So nothing can change even after
8440 * we get the pending mp.
8441 */
8442 ip1dbg(("ip_rput_dlpi: bind_ack %s\n", ill->ill_name));
8443 DTRACE_PROBE1(ip__rput__dlpi__bind__ack, ill_t *, ill);
8444 ill_nic_event_dispatch(ill, 0, NE_UP, NULL, 0);
8445
8446 /*
8447 * Now bring up the resolver; when that is complete, we'll
8448 * create IREs. Note that we intentionally mirror what
8449 * ipif_up() would have done, because we got here by way of
8450 * ill_dl_up(), which stopped ipif_up()'s processing.
8451 */
8452 if (ill->ill_isv6) {
8453 /*
8454 * v6 interfaces.
8455 * Unlike ARP which has to do another bind
8456 * and attach, once we get here we are
8457 * done with NDP
8458 */
8459 (void) ipif_resolver_up(ipif, Res_act_initial);
8460 if ((err = ipif_ndp_up(ipif, B_TRUE)) == 0)
8461 err = ipif_up_done_v6(ipif);
8462 } else if (ill->ill_net_type == IRE_IF_RESOLVER) {
8463 /*
8464 * ARP and other v4 external resolvers.
8465 * Leave the pending mblk intact so that
8466 * the ioctl completes in ip_rput().
8467 */
8468 if (connp != NULL)
8469 mutex_enter(&connp->conn_lock);
8470 mutex_enter(&ill->ill_lock);
8471 success = ipsq_pending_mp_add(connp, ipif, q, mp1, 0);
8472 mutex_exit(&ill->ill_lock);
8473 if (connp != NULL)
8474 mutex_exit(&connp->conn_lock);
8475 if (success) {
8476 err = ipif_resolver_up(ipif, Res_act_initial);
8477 if (err == EINPROGRESS) {
8478 freemsg(mp);
8479 return;
8480 }
8481 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8482 } else {
8483 /* The conn has started closing */
8484 err = EINTR;
8485 }
8486 } else {
8487 /*
8488 * This one is complete. Reply to pending ioctl.
8489 */
8490 (void) ipif_resolver_up(ipif, Res_act_initial);
8491 err = ipif_up_done(ipif);
8492 }
8493
8494 if ((err == 0) && (ill->ill_up_ipifs)) {
8495 err = ill_up_ipifs(ill, q, mp1);
8496 if (err == EINPROGRESS) {
8497 freemsg(mp);
8498 return;
8499 }
8500 }
8501
8502 /*
8503 * If we have a moved ipif to bring up, and everything has
8504 * succeeded to this point, bring it up on the IPMP ill.
8505 * Otherwise, leave it down -- the admin can try to bring it
8506 * up by hand if need be.
8507 */
8508 if (ill->ill_move_ipif != NULL) {
8509 if (err != 0) {
8510 ill->ill_move_ipif = NULL;
8511 } else {
8512 ipif = ill->ill_move_ipif;
8513 ill->ill_move_ipif = NULL;
8514 err = ipif_up(ipif, q, mp1);
8515 if (err == EINPROGRESS) {
8516 freemsg(mp);
8517 return;
8518 }
8519 }
8520 }
8521 break;
8522
8523 case DL_NOTIFY_IND: {
8524 dl_notify_ind_t *notify = (dl_notify_ind_t *)mp->b_rptr;
8525 uint_t orig_mtu, orig_mc_mtu;
8526
8527 switch (notify->dl_notification) {
8528 case DL_NOTE_PHYS_ADDR:
8529 err = ill_set_phys_addr(ill, mp);
8530 break;
8531
8532 case DL_NOTE_REPLUMB:
8533 /*
8534 * Directly return after calling ill_replumb().
8535 * Note that we should not free mp as it is reused
8536 * in the ill_replumb() function.
8537 */
8538 err = ill_replumb(ill, mp);
8539 return;
8540
8541 case DL_NOTE_FASTPATH_FLUSH:
8542 nce_flush(ill, B_FALSE);
8543 break;
8544
8545 case DL_NOTE_SDU_SIZE:
8546 case DL_NOTE_SDU_SIZE2:
8547 /*
8548 * The dce and fragmentation code can cope with
8549 * this changing while packets are being sent.
8550 * When packets are sent ip_output will discover
8551 * a change.
8552 *
8553 * Change the MTU size of the interface.
8554 */
8555 mutex_enter(&ill->ill_lock);
8556 orig_mtu = ill->ill_mtu;
8557 orig_mc_mtu = ill->ill_mc_mtu;
8558 switch (notify->dl_notification) {
8559 case DL_NOTE_SDU_SIZE:
8560 ill->ill_current_frag =
8561 (uint_t)notify->dl_data;
8562 ill->ill_mc_mtu = (uint_t)notify->dl_data;
8563 break;
8564 case DL_NOTE_SDU_SIZE2:
8565 ill->ill_current_frag =
8566 (uint_t)notify->dl_data1;
8567 ill->ill_mc_mtu = (uint_t)notify->dl_data2;
8568 break;
8569 }
8570 if (ill->ill_current_frag > ill->ill_max_frag)
8571 ill->ill_max_frag = ill->ill_current_frag;
8572
8573 if (!(ill->ill_flags & ILLF_FIXEDMTU)) {
8574 ill->ill_mtu = ill->ill_current_frag;
8575
8576 /*
8577 * If ill_user_mtu was set (via
8578 * SIOCSLIFLNKINFO), clamp ill_mtu at it.
8579 */
8580 if (ill->ill_user_mtu != 0 &&
8581 ill->ill_user_mtu < ill->ill_mtu)
8582 ill->ill_mtu = ill->ill_user_mtu;
8583
8584 if (ill->ill_user_mtu != 0 &&
8585 ill->ill_user_mtu < ill->ill_mc_mtu)
8586 ill->ill_mc_mtu = ill->ill_user_mtu;
8587
8588 if (ill->ill_isv6) {
8589 if (ill->ill_mtu < IPV6_MIN_MTU)
8590 ill->ill_mtu = IPV6_MIN_MTU;
8591 if (ill->ill_mc_mtu < IPV6_MIN_MTU)
8592 ill->ill_mc_mtu = IPV6_MIN_MTU;
8593 } else {
8594 if (ill->ill_mtu < IP_MIN_MTU)
8595 ill->ill_mtu = IP_MIN_MTU;
8596 if (ill->ill_mc_mtu < IP_MIN_MTU)
8597 ill->ill_mc_mtu = IP_MIN_MTU;
8598 }
8599 } else if (ill->ill_mc_mtu > ill->ill_mtu) {
8600 ill->ill_mc_mtu = ill->ill_mtu;
8601 }
8602
8603 mutex_exit(&ill->ill_lock);
8604 /*
8605 * Make sure all dce_generation checks find out
8606 * that ill_mtu/ill_mc_mtu has changed.
8607 */
8608 if (orig_mtu != ill->ill_mtu ||
8609 orig_mc_mtu != ill->ill_mc_mtu) {
8610 dce_increment_all_generations(ill->ill_isv6,
8611 ill->ill_ipst);
8612 }
8613
8614 /*
8615 * Refresh IPMP meta-interface MTU if necessary.
8616 */
8617 if (IS_UNDER_IPMP(ill))
8618 ipmp_illgrp_refresh_mtu(ill->ill_grp);
8619 break;
8620
8621 case DL_NOTE_LINK_UP:
8622 case DL_NOTE_LINK_DOWN: {
8623 /*
8624 * We are writer. ill / phyint / ipsq assocs stable.
8625 * The RUNNING flag reflects the state of the link.
8626 */
8627 phyint_t *phyint = ill->ill_phyint;
8628 uint64_t new_phyint_flags;
8629 boolean_t changed = B_FALSE;
8630 boolean_t went_up;
8631
8632 went_up = notify->dl_notification == DL_NOTE_LINK_UP;
8633 mutex_enter(&phyint->phyint_lock);
8634
8635 new_phyint_flags = went_up ?
8636 phyint->phyint_flags | PHYI_RUNNING :
8637 phyint->phyint_flags & ~PHYI_RUNNING;
8638
8639 if (IS_IPMP(ill)) {
8640 new_phyint_flags = went_up ?
8641 new_phyint_flags & ~PHYI_FAILED :
8642 new_phyint_flags | PHYI_FAILED;
8643 }
8644
8645 if (new_phyint_flags != phyint->phyint_flags) {
8646 phyint->phyint_flags = new_phyint_flags;
8647 changed = B_TRUE;
8648 }
8649 mutex_exit(&phyint->phyint_lock);
8650 /*
8651 * ill_restart_dad handles the DAD restart and routing
8652 * socket notification logic.
8653 */
8654 if (changed) {
8655 ill_restart_dad(phyint->phyint_illv4, went_up);
8656 ill_restart_dad(phyint->phyint_illv6, went_up);
8657 }
8658 break;
8659 }
8660 case DL_NOTE_PROMISC_ON_PHYS: {
8661 phyint_t *phyint = ill->ill_phyint;
8662
8663 mutex_enter(&phyint->phyint_lock);
8664 phyint->phyint_flags |= PHYI_PROMISC;
8665 mutex_exit(&phyint->phyint_lock);
8666 break;
8667 }
8668 case DL_NOTE_PROMISC_OFF_PHYS: {
8669 phyint_t *phyint = ill->ill_phyint;
8670
8671 mutex_enter(&phyint->phyint_lock);
8672 phyint->phyint_flags &= ~PHYI_PROMISC;
8673 mutex_exit(&phyint->phyint_lock);
8674 break;
8675 }
8676 case DL_NOTE_CAPAB_RENEG:
8677 /*
8678 * Something changed on the driver side.
8679 * It wants us to renegotiate the capabilities
8680 * on this ill. One possible cause is the aggregation
8681 * interface under us where a port got added or
8682 * went away.
8683 *
8684 * If the capability negotiation is already done
8685 * or is in progress, reset the capabilities and
8686 * mark the ill's ill_capab_reneg to be B_TRUE,
8687 * so that when the ack comes back, we can start
8688 * the renegotiation process.
8689 *
8690 * Note that if ill_capab_reneg is already B_TRUE
8691 * (ill_dlpi_capab_state is IDS_UNKNOWN in this case),
8692 * the capability resetting request has been sent
8693 * and the renegotiation has not been started yet;
8694 * nothing needs to be done in this case.
8695 */
8696 ipsq_current_start(ipsq, ill->ill_ipif, 0);
8697 ill_capability_reset(ill, B_TRUE);
8698 ipsq_current_finish(ipsq);
8699 break;
8700
8701 case DL_NOTE_ALLOWED_IPS:
8702 ill_set_allowed_ips(ill, mp);
8703 break;
8704 default:
8705 ip0dbg(("ip_rput_dlpi_writer: unknown notification "
8706 "type 0x%x for DL_NOTIFY_IND\n",
8707 notify->dl_notification));
8708 break;
8709 }
8710
8711 /*
8712 * As this is an asynchronous operation, we
8713 * should not call ill_dlpi_done
8714 */
8715 break;
8716 }
8717 case DL_NOTIFY_ACK: {
8718 dl_notify_ack_t *noteack = (dl_notify_ack_t *)mp->b_rptr;
8719
8720 if (noteack->dl_notifications & DL_NOTE_LINK_UP)
8721 ill->ill_note_link = 1;
8722 ill_dlpi_done(ill, DL_NOTIFY_REQ);
8723 break;
8724 }
8725 case DL_PHYS_ADDR_ACK: {
8726 /*
8727 * As part of plumbing the interface via SIOCSLIFNAME,
8728 * ill_dl_phys() will queue a series of DL_PHYS_ADDR_REQs,
8729 * whose answers we receive here. As each answer is received,
8730 * we call ill_dlpi_done() to dispatch the next request as
8731 * we're processing the current one. Once all answers have
8732 * been received, we use ipsq_pending_mp_get() to dequeue the
8733 * outstanding IOCTL and reply to it. (Because ill_dl_phys()
8734 * is invoked from an ill queue, conn_oper_pending_ill is not
8735 * available, but we know the ioctl is pending on ill_wq.)
8736 */
8737 uint_t paddrlen, paddroff;
8738 uint8_t *addr;
8739
8740 paddrreq = ill->ill_phys_addr_pend;
8741 paddrlen = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_length;
8742 paddroff = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_offset;
8743 addr = mp->b_rptr + paddroff;
8744
8745 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ);
8746 if (paddrreq == DL_IPV6_TOKEN) {
8747 /*
8748 * bcopy to low-order bits of ill_token
8749 *
8750 * XXX Temporary hack - currently, all known tokens
8751 * are 64 bits, so I'll cheat for the moment.
8752 */
8753 bcopy(addr, &ill->ill_token.s6_addr32[2], paddrlen);
8754 ill->ill_token_length = paddrlen;
8755 break;
8756 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) {
8757 ASSERT(ill->ill_nd_lla_mp == NULL);
8758 ill_set_ndmp(ill, mp, paddroff, paddrlen);
8759 mp = NULL;
8760 break;
8761 } else if (paddrreq == DL_CURR_DEST_ADDR) {
8762 ASSERT(ill->ill_dest_addr_mp == NULL);
8763 ill->ill_dest_addr_mp = mp;
8764 ill->ill_dest_addr = addr;
8765 mp = NULL;
8766 if (ill->ill_isv6) {
8767 ill_setdesttoken(ill);
8768 ipif_setdestlinklocal(ill->ill_ipif);
8769 }
8770 break;
8771 }
8772
8773 ASSERT(paddrreq == DL_CURR_PHYS_ADDR);
8774 ASSERT(ill->ill_phys_addr_mp == NULL);
8775 if (!ill->ill_ifname_pending)
8776 break;
8777 ill->ill_ifname_pending = 0;
8778 if (!ioctl_aborted)
8779 mp1 = ipsq_pending_mp_get(ipsq, &connp);
8780 if (mp1 != NULL) {
8781 ASSERT(connp == NULL);
8782 q = ill->ill_wq;
8783 }
8784 /*
8785 * If any error acks received during the plumbing sequence,
8786 * ill_ifname_pending_err will be set. Break out and send up
8787 * the error to the pending ioctl.
8788 */
8789 if (ill->ill_ifname_pending_err != 0) {
8790 err = ill->ill_ifname_pending_err;
8791 ill->ill_ifname_pending_err = 0;
8792 break;
8793 }
8794
8795 ill->ill_phys_addr_mp = mp;
8796 ill->ill_phys_addr = (paddrlen == 0 ? NULL : addr);
8797 mp = NULL;
8798
8799 /*
8800 * If paddrlen or ill_phys_addr_length is zero, the DLPI
8801 * provider doesn't support physical addresses. We check both
8802 * paddrlen and ill_phys_addr_length because sppp (PPP) does
8803 * not have physical addresses, but historically adversises a
8804 * physical address length of 0 in its DL_INFO_ACK, but 6 in
8805 * its DL_PHYS_ADDR_ACK.
8806 */
8807 if (paddrlen == 0 || ill->ill_phys_addr_length == 0) {
8808 ill->ill_phys_addr = NULL;
8809 } else if (paddrlen != ill->ill_phys_addr_length) {
8810 ip0dbg(("DL_PHYS_ADDR_ACK: got addrlen %d, expected %d",
8811 paddrlen, ill->ill_phys_addr_length));
8812 err = EINVAL;
8813 break;
8814 }
8815
8816 if (ill->ill_nd_lla_mp == NULL) {
8817 if ((mp_hw = copyb(ill->ill_phys_addr_mp)) == NULL) {
8818 err = ENOMEM;
8819 break;
8820 }
8821 ill_set_ndmp(ill, mp_hw, paddroff, paddrlen);
8822 }
8823
8824 if (ill->ill_isv6) {
8825 ill_setdefaulttoken(ill);
8826 ipif_setlinklocal(ill->ill_ipif);
8827 }
8828 break;
8829 }
8830 case DL_OK_ACK:
8831 ip2dbg(("DL_OK_ACK %s (0x%x)\n",
8832 dl_primstr((int)dloa->dl_correct_primitive),
8833 dloa->dl_correct_primitive));
8834 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer ok",
8835 char *, dl_primstr(dloa->dl_correct_primitive),
8836 ill_t *, ill);
8837
8838 switch (dloa->dl_correct_primitive) {
8839 case DL_ENABMULTI_REQ:
8840 case DL_DISABMULTI_REQ:
8841 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8842 break;
8843 case DL_PROMISCON_REQ:
8844 case DL_PROMISCOFF_REQ:
8845 case DL_UNBIND_REQ:
8846 case DL_ATTACH_REQ:
8847 ill_dlpi_done(ill, dloa->dl_correct_primitive);
8848 break;
8849 }
8850 break;
8851 default:
8852 break;
8853 }
8854
8855 freemsg(mp);
8856 if (mp1 == NULL)
8857 return;
8858
8859 /*
8860 * The operation must complete without EINPROGRESS since
8861 * ipsq_pending_mp_get() has removed the mblk (mp1). Otherwise,
8862 * the operation will be stuck forever inside the IPSQ.
8863 */
8864 ASSERT(err != EINPROGRESS);
8865
8866 DTRACE_PROBE4(ipif__ioctl, char *, "ip_rput_dlpi_writer finish",
8867 int, ipsq->ipsq_xop->ipx_current_ioctl, ill_t *, ill,
8868 ipif_t *, NULL);
8869
8870 switch (ipsq->ipsq_xop->ipx_current_ioctl) {
8871 case 0:
8872 ipsq_current_finish(ipsq);
8873 break;
8874
8875 case SIOCSLIFNAME:
8876 case IF_UNITSEL: {
8877 ill_t *ill_other = ILL_OTHER(ill);
8878
8879 /*
8880 * If SIOCSLIFNAME or IF_UNITSEL is about to succeed, and the
8881 * ill has a peer which is in an IPMP group, then place ill
8882 * into the same group. One catch: although ifconfig plumbs
8883 * the appropriate IPMP meta-interface prior to plumbing this
8884 * ill, it is possible for multiple ifconfig applications to
8885 * race (or for another application to adjust plumbing), in
8886 * which case the IPMP meta-interface we need will be missing.
8887 * If so, kick the phyint out of the group.
8888 */
8889 if (err == 0 && ill_other != NULL && IS_UNDER_IPMP(ill_other)) {
8890 ipmp_grp_t *grp = ill->ill_phyint->phyint_grp;
8891 ipmp_illgrp_t *illg;
8892
8893 illg = ill->ill_isv6 ? grp->gr_v6 : grp->gr_v4;
8894 if (illg == NULL)
8895 ipmp_phyint_leave_grp(ill->ill_phyint);
8896 else
8897 ipmp_ill_join_illgrp(ill, illg);
8898 }
8899
8900 if (ipsq->ipsq_xop->ipx_current_ioctl == IF_UNITSEL)
8901 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8902 else
8903 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8904 break;
8905 }
8906 case SIOCLIFADDIF:
8907 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq);
8908 break;
8909
8910 default:
8911 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq);
8912 break;
8913 }
8914 }
8915
8916 /*
8917 * ip_rput_other is called by ip_rput to handle messages modifying the global
8918 * state in IP. If 'ipsq' is non-NULL, caller is writer on it.
8919 */
8920 /* ARGSUSED */
8921 void
8922 ip_rput_other(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
8923 {
8924 ill_t *ill = q->q_ptr;
8925 struct iocblk *iocp;
8926
8927 ip1dbg(("ip_rput_other "));
8928 if (ipsq != NULL) {
8929 ASSERT(IAM_WRITER_IPSQ(ipsq));
8930 ASSERT(ipsq->ipsq_xop ==
8931 ill->ill_phyint->phyint_ipsq->ipsq_xop);
8932 }
8933
8934 switch (mp->b_datap->db_type) {
8935 case M_ERROR:
8936 case M_HANGUP:
8937 /*
8938 * The device has a problem. We force the ILL down. It can
8939 * be brought up again manually using SIOCSIFFLAGS (via
8940 * ifconfig or equivalent).
8941 */
8942 ASSERT(ipsq != NULL);
8943 if (mp->b_rptr < mp->b_wptr)
8944 ill->ill_error = (int)(*mp->b_rptr & 0xFF);
8945 if (ill->ill_error == 0)
8946 ill->ill_error = ENXIO;
8947 if (!ill_down_start(q, mp))
8948 return;
8949 ipif_all_down_tail(ipsq, q, mp, NULL);
8950 break;
8951 case M_IOCNAK: {
8952 iocp = (struct iocblk *)mp->b_rptr;
8953
8954 ASSERT(iocp->ioc_cmd == DL_IOC_HDR_INFO);
8955 /*
8956 * If this was the first attempt, turn off the fastpath
8957 * probing.
8958 */
8959 mutex_enter(&ill->ill_lock);
8960 if (ill->ill_dlpi_fastpath_state == IDS_INPROGRESS) {
8961 ill->ill_dlpi_fastpath_state = IDS_FAILED;
8962 mutex_exit(&ill->ill_lock);
8963 /*
8964 * don't flush the nce_t entries: we use them
8965 * as an index to the ncec itself.
8966 */
8967 ip1dbg(("ip_rput: DLPI fastpath off on interface %s\n",
8968 ill->ill_name));
8969 } else {
8970 mutex_exit(&ill->ill_lock);
8971 }
8972 freemsg(mp);
8973 break;
8974 }
8975 default:
8976 ASSERT(0);
8977 break;
8978 }
8979 }
8980
8981 /*
8982 * Update any source route, record route or timestamp options
8983 * When it fails it has consumed the message and BUMPed the MIB.
8984 */
8985 boolean_t
8986 ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill,
8987 ip_recv_attr_t *ira)
8988 {
8989 ipoptp_t opts;
8990 uchar_t *opt;
8991 uint8_t optval;
8992 uint8_t optlen;
8993 ipaddr_t dst;
8994 ipaddr_t ifaddr;
8995 uint32_t ts;
8996 timestruc_t now;
8997 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
8998
8999 ip2dbg(("ip_forward_options\n"));
9000 dst = ipha->ipha_dst;
9001 for (optval = ipoptp_first(&opts, ipha);
9002 optval != IPOPT_EOL;
9003 optval = ipoptp_next(&opts)) {
9004 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
9005 opt = opts.ipoptp_cur;
9006 optlen = opts.ipoptp_len;
9007 ip2dbg(("ip_forward_options: opt %d, len %d\n",
9008 optval, opts.ipoptp_len));
9009 switch (optval) {
9010 uint32_t off;
9011 case IPOPT_SSRR:
9012 case IPOPT_LSRR:
9013 /* Check if adminstratively disabled */
9014 if (!ipst->ips_ip_forward_src_routed) {
9015 BUMP_MIB(dst_ill->ill_ip_mib,
9016 ipIfStatsForwProhibits);
9017 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED",
9018 mp, dst_ill);
9019 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED,
9020 ira);
9021 return (B_FALSE);
9022 }
9023 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9024 /*
9025 * Must be partial since ip_input_options
9026 * checked for strict.
9027 */
9028 break;
9029 }
9030 off = opt[IPOPT_OFFSET];
9031 off--;
9032 redo_srr:
9033 if (optlen < IP_ADDR_LEN ||
9034 off > optlen - IP_ADDR_LEN) {
9035 /* End of source route */
9036 ip1dbg((
9037 "ip_forward_options: end of SR\n"));
9038 break;
9039 }
9040 /* Pick a reasonable address on the outbound if */
9041 ASSERT(dst_ill != NULL);
9042 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9043 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9044 NULL) != 0) {
9045 /* No source! Shouldn't happen */
9046 ifaddr = INADDR_ANY;
9047 }
9048 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9049 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9050 ip1dbg(("ip_forward_options: next hop 0x%x\n",
9051 ntohl(dst)));
9052
9053 /*
9054 * Check if our address is present more than
9055 * once as consecutive hops in source route.
9056 */
9057 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9058 off += IP_ADDR_LEN;
9059 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9060 goto redo_srr;
9061 }
9062 ipha->ipha_dst = dst;
9063 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9064 break;
9065 case IPOPT_RR:
9066 off = opt[IPOPT_OFFSET];
9067 off--;
9068 if (optlen < IP_ADDR_LEN ||
9069 off > optlen - IP_ADDR_LEN) {
9070 /* No more room - ignore */
9071 ip1dbg((
9072 "ip_forward_options: end of RR\n"));
9073 break;
9074 }
9075 /* Pick a reasonable address on the outbound if */
9076 ASSERT(dst_ill != NULL);
9077 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst,
9078 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9079 NULL) != 0) {
9080 /* No source! Shouldn't happen */
9081 ifaddr = INADDR_ANY;
9082 }
9083 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9084 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9085 break;
9086 case IPOPT_TS:
9087 /* Insert timestamp if there is room */
9088 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9089 case IPOPT_TS_TSONLY:
9090 off = IPOPT_TS_TIMELEN;
9091 break;
9092 case IPOPT_TS_PRESPEC:
9093 case IPOPT_TS_PRESPEC_RFC791:
9094 /* Verify that the address matched */
9095 off = opt[IPOPT_OFFSET] - 1;
9096 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9097 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9098 /* Not for us */
9099 break;
9100 }
9101 /* FALLTHRU */
9102 case IPOPT_TS_TSANDADDR:
9103 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9104 break;
9105 default:
9106 /*
9107 * ip_*put_options should have already
9108 * dropped this packet.
9109 */
9110 cmn_err(CE_PANIC, "ip_forward_options: "
9111 "unknown IT - bug in ip_input_options?\n");
9112 return (B_TRUE); /* Keep "lint" happy */
9113 }
9114 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9115 /* Increase overflow counter */
9116 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9117 opt[IPOPT_POS_OV_FLG] =
9118 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9119 (off << 4));
9120 break;
9121 }
9122 off = opt[IPOPT_OFFSET] - 1;
9123 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9124 case IPOPT_TS_PRESPEC:
9125 case IPOPT_TS_PRESPEC_RFC791:
9126 case IPOPT_TS_TSANDADDR:
9127 /* Pick a reasonable addr on the outbound if */
9128 ASSERT(dst_ill != NULL);
9129 if (ip_select_source_v4(dst_ill, INADDR_ANY,
9130 dst, INADDR_ANY, ALL_ZONES, ipst, &ifaddr,
9131 NULL, NULL) != 0) {
9132 /* No source! Shouldn't happen */
9133 ifaddr = INADDR_ANY;
9134 }
9135 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9136 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9137 /* FALLTHRU */
9138 case IPOPT_TS_TSONLY:
9139 off = opt[IPOPT_OFFSET] - 1;
9140 /* Compute # of milliseconds since midnight */
9141 gethrestime(&now);
9142 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9143 NSEC2MSEC(now.tv_nsec);
9144 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9145 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9146 break;
9147 }
9148 break;
9149 }
9150 }
9151 return (B_TRUE);
9152 }
9153
9154 /*
9155 * Call ill_frag_timeout to do garbage collection. ill_frag_timeout
9156 * returns 'true' if there are still fragments left on the queue, in
9157 * which case we restart the timer.
9158 */
9159 void
9160 ill_frag_timer(void *arg)
9161 {
9162 ill_t *ill = (ill_t *)arg;
9163 boolean_t frag_pending;
9164 ip_stack_t *ipst = ill->ill_ipst;
9165 time_t timeout;
9166
9167 mutex_enter(&ill->ill_lock);
9168 ASSERT(!ill->ill_fragtimer_executing);
9169 if (ill->ill_state_flags & ILL_CONDEMNED) {
9170 ill->ill_frag_timer_id = 0;
9171 mutex_exit(&ill->ill_lock);
9172 return;
9173 }
9174 ill->ill_fragtimer_executing = 1;
9175 mutex_exit(&ill->ill_lock);
9176
9177 timeout = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9178 ipst->ips_ip_reassembly_timeout);
9179
9180 frag_pending = ill_frag_timeout(ill, timeout);
9181
9182 /*
9183 * Restart the timer, if we have fragments pending or if someone
9184 * wanted us to be scheduled again.
9185 */
9186 mutex_enter(&ill->ill_lock);
9187 ill->ill_fragtimer_executing = 0;
9188 ill->ill_frag_timer_id = 0;
9189 if (frag_pending || ill->ill_fragtimer_needrestart)
9190 ill_frag_timer_start(ill);
9191 mutex_exit(&ill->ill_lock);
9192 }
9193
9194 void
9195 ill_frag_timer_start(ill_t *ill)
9196 {
9197 ip_stack_t *ipst = ill->ill_ipst;
9198 clock_t timeo_ms;
9199
9200 ASSERT(MUTEX_HELD(&ill->ill_lock));
9201
9202 /* If the ill is closing or opening don't proceed */
9203 if (ill->ill_state_flags & ILL_CONDEMNED)
9204 return;
9205
9206 if (ill->ill_fragtimer_executing) {
9207 /*
9208 * ill_frag_timer is currently executing. Just record the
9209 * the fact that we want the timer to be restarted.
9210 * ill_frag_timer will post a timeout before it returns,
9211 * ensuring it will be called again.
9212 */
9213 ill->ill_fragtimer_needrestart = 1;
9214 return;
9215 }
9216
9217 if (ill->ill_frag_timer_id == 0) {
9218 timeo_ms = (ill->ill_isv6 ? ipst->ips_ipv6_reassembly_timeout :
9219 ipst->ips_ip_reassembly_timeout) * SECONDS;
9220
9221 /*
9222 * The timer is neither running nor is the timeout handler
9223 * executing. Post a timeout so that ill_frag_timer will be
9224 * called
9225 */
9226 ill->ill_frag_timer_id = timeout(ill_frag_timer, ill,
9227 MSEC_TO_TICK(timeo_ms >> 1));
9228 ill->ill_fragtimer_needrestart = 0;
9229 }
9230 }
9231
9232 /*
9233 * Update any source route, record route or timestamp options.
9234 * Check that we are at end of strict source route.
9235 * The options have already been checked for sanity in ip_input_options().
9236 */
9237 boolean_t
9238 ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
9239 {
9240 ipoptp_t opts;
9241 uchar_t *opt;
9242 uint8_t optval;
9243 uint8_t optlen;
9244 ipaddr_t dst;
9245 ipaddr_t ifaddr;
9246 uint32_t ts;
9247 timestruc_t now;
9248 ill_t *ill = ira->ira_ill;
9249 ip_stack_t *ipst = ill->ill_ipst;
9250
9251 ip2dbg(("ip_input_local_options\n"));
9252
9253 for (optval = ipoptp_first(&opts, ipha);
9254 optval != IPOPT_EOL;
9255 optval = ipoptp_next(&opts)) {
9256 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
9257 opt = opts.ipoptp_cur;
9258 optlen = opts.ipoptp_len;
9259 ip2dbg(("ip_input_local_options: opt %d, len %d\n",
9260 optval, optlen));
9261 switch (optval) {
9262 uint32_t off;
9263 case IPOPT_SSRR:
9264 case IPOPT_LSRR:
9265 off = opt[IPOPT_OFFSET];
9266 off--;
9267 if (optlen < IP_ADDR_LEN ||
9268 off > optlen - IP_ADDR_LEN) {
9269 /* End of source route */
9270 ip1dbg(("ip_input_local_options: end of SR\n"));
9271 break;
9272 }
9273 /*
9274 * This will only happen if two consecutive entries
9275 * in the source route contains our address or if
9276 * it is a packet with a loose source route which
9277 * reaches us before consuming the whole source route
9278 */
9279 ip1dbg(("ip_input_local_options: not end of SR\n"));
9280 if (optval == IPOPT_SSRR) {
9281 goto bad_src_route;
9282 }
9283 /*
9284 * Hack: instead of dropping the packet truncate the
9285 * source route to what has been used by filling the
9286 * rest with IPOPT_NOP.
9287 */
9288 opt[IPOPT_OLEN] = (uint8_t)off;
9289 while (off < optlen) {
9290 opt[off++] = IPOPT_NOP;
9291 }
9292 break;
9293 case IPOPT_RR:
9294 off = opt[IPOPT_OFFSET];
9295 off--;
9296 if (optlen < IP_ADDR_LEN ||
9297 off > optlen - IP_ADDR_LEN) {
9298 /* No more room - ignore */
9299 ip1dbg((
9300 "ip_input_local_options: end of RR\n"));
9301 break;
9302 }
9303 /* Pick a reasonable address on the outbound if */
9304 if (ip_select_source_v4(ill, INADDR_ANY, ipha->ipha_dst,
9305 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL,
9306 NULL) != 0) {
9307 /* No source! Shouldn't happen */
9308 ifaddr = INADDR_ANY;
9309 }
9310 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9311 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9312 break;
9313 case IPOPT_TS:
9314 /* Insert timestamp if there is romm */
9315 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9316 case IPOPT_TS_TSONLY:
9317 off = IPOPT_TS_TIMELEN;
9318 break;
9319 case IPOPT_TS_PRESPEC:
9320 case IPOPT_TS_PRESPEC_RFC791:
9321 /* Verify that the address matched */
9322 off = opt[IPOPT_OFFSET] - 1;
9323 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9324 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9325 /* Not for us */
9326 break;
9327 }
9328 /* FALLTHRU */
9329 case IPOPT_TS_TSANDADDR:
9330 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9331 break;
9332 default:
9333 /*
9334 * ip_*put_options should have already
9335 * dropped this packet.
9336 */
9337 cmn_err(CE_PANIC, "ip_input_local_options: "
9338 "unknown IT - bug in ip_input_options?\n");
9339 return (B_TRUE); /* Keep "lint" happy */
9340 }
9341 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
9342 /* Increase overflow counter */
9343 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
9344 opt[IPOPT_POS_OV_FLG] =
9345 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) |
9346 (off << 4));
9347 break;
9348 }
9349 off = opt[IPOPT_OFFSET] - 1;
9350 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9351 case IPOPT_TS_PRESPEC:
9352 case IPOPT_TS_PRESPEC_RFC791:
9353 case IPOPT_TS_TSANDADDR:
9354 /* Pick a reasonable addr on the outbound if */
9355 if (ip_select_source_v4(ill, INADDR_ANY,
9356 ipha->ipha_dst, INADDR_ANY, ALL_ZONES, ipst,
9357 &ifaddr, NULL, NULL) != 0) {
9358 /* No source! Shouldn't happen */
9359 ifaddr = INADDR_ANY;
9360 }
9361 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN);
9362 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
9363 /* FALLTHRU */
9364 case IPOPT_TS_TSONLY:
9365 off = opt[IPOPT_OFFSET] - 1;
9366 /* Compute # of milliseconds since midnight */
9367 gethrestime(&now);
9368 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
9369 NSEC2MSEC(now.tv_nsec);
9370 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
9371 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
9372 break;
9373 }
9374 break;
9375 }
9376 }
9377 return (B_TRUE);
9378
9379 bad_src_route:
9380 /* make sure we clear any indication of a hardware checksum */
9381 DB_CKSUMFLAGS(mp) = 0;
9382 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
9383 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9384 return (B_FALSE);
9385
9386 }
9387
9388 /*
9389 * Process IP options in an inbound packet. Always returns the nexthop.
9390 * Normally this is the passed in nexthop, but if there is an option
9391 * that effects the nexthop (such as a source route) that will be returned.
9392 * Sets *errorp if there is an error, in which case an ICMP error has been sent
9393 * and mp freed.
9394 */
9395 ipaddr_t
9396 ip_input_options(ipha_t *ipha, ipaddr_t dst, mblk_t *mp,
9397 ip_recv_attr_t *ira, int *errorp)
9398 {
9399 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
9400 ipoptp_t opts;
9401 uchar_t *opt;
9402 uint8_t optval;
9403 uint8_t optlen;
9404 intptr_t code = 0;
9405 ire_t *ire;
9406
9407 ip2dbg(("ip_input_options\n"));
9408 *errorp = 0;
9409 for (optval = ipoptp_first(&opts, ipha);
9410 optval != IPOPT_EOL;
9411 optval = ipoptp_next(&opts)) {
9412 opt = opts.ipoptp_cur;
9413 optlen = opts.ipoptp_len;
9414 ip2dbg(("ip_input_options: opt %d, len %d\n",
9415 optval, optlen));
9416 /*
9417 * Note: we need to verify the checksum before we
9418 * modify anything thus this routine only extracts the next
9419 * hop dst from any source route.
9420 */
9421 switch (optval) {
9422 uint32_t off;
9423 case IPOPT_SSRR:
9424 case IPOPT_LSRR:
9425 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
9426 if (optval == IPOPT_SSRR) {
9427 ip1dbg(("ip_input_options: not next"
9428 " strict source route 0x%x\n",
9429 ntohl(dst)));
9430 code = (char *)&ipha->ipha_dst -
9431 (char *)ipha;
9432 goto param_prob; /* RouterReq's */
9433 }
9434 ip2dbg(("ip_input_options: "
9435 "not next source route 0x%x\n",
9436 ntohl(dst)));
9437 break;
9438 }
9439
9440 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9441 ip1dbg((
9442 "ip_input_options: bad option offset\n"));
9443 code = (char *)&opt[IPOPT_OLEN] -
9444 (char *)ipha;
9445 goto param_prob;
9446 }
9447 off = opt[IPOPT_OFFSET];
9448 off--;
9449 redo_srr:
9450 if (optlen < IP_ADDR_LEN ||
9451 off > optlen - IP_ADDR_LEN) {
9452 /* End of source route */
9453 ip1dbg(("ip_input_options: end of SR\n"));
9454 break;
9455 }
9456 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
9457 ip1dbg(("ip_input_options: next hop 0x%x\n",
9458 ntohl(dst)));
9459
9460 /*
9461 * Check if our address is present more than
9462 * once as consecutive hops in source route.
9463 * XXX verify per-interface ip_forwarding
9464 * for source route?
9465 */
9466 if (ip_type_v4(dst, ipst) == IRE_LOCAL) {
9467 off += IP_ADDR_LEN;
9468 goto redo_srr;
9469 }
9470
9471 if (dst == htonl(INADDR_LOOPBACK)) {
9472 ip1dbg(("ip_input_options: loopback addr in "
9473 "source route!\n"));
9474 goto bad_src_route;
9475 }
9476 /*
9477 * For strict: verify that dst is directly
9478 * reachable.
9479 */
9480 if (optval == IPOPT_SSRR) {
9481 ire = ire_ftable_lookup_v4(dst, 0, 0,
9482 IRE_INTERFACE, NULL, ALL_ZONES,
9483 ira->ira_tsl,
9484 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
9485 NULL);
9486 if (ire == NULL) {
9487 ip1dbg(("ip_input_options: SSRR not "
9488 "directly reachable: 0x%x\n",
9489 ntohl(dst)));
9490 goto bad_src_route;
9491 }
9492 ire_refrele(ire);
9493 }
9494 /*
9495 * Defer update of the offset and the record route
9496 * until the packet is forwarded.
9497 */
9498 break;
9499 case IPOPT_RR:
9500 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9501 ip1dbg((
9502 "ip_input_options: bad option offset\n"));
9503 code = (char *)&opt[IPOPT_OLEN] -
9504 (char *)ipha;
9505 goto param_prob;
9506 }
9507 break;
9508 case IPOPT_TS:
9509 /*
9510 * Verify that length >= 5 and that there is either
9511 * room for another timestamp or that the overflow
9512 * counter is not maxed out.
9513 */
9514 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
9515 if (optlen < IPOPT_MINLEN_IT) {
9516 goto param_prob;
9517 }
9518 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
9519 ip1dbg((
9520 "ip_input_options: bad option offset\n"));
9521 code = (char *)&opt[IPOPT_OFFSET] -
9522 (char *)ipha;
9523 goto param_prob;
9524 }
9525 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
9526 case IPOPT_TS_TSONLY:
9527 off = IPOPT_TS_TIMELEN;
9528 break;
9529 case IPOPT_TS_TSANDADDR:
9530 case IPOPT_TS_PRESPEC:
9531 case IPOPT_TS_PRESPEC_RFC791:
9532 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
9533 break;
9534 default:
9535 code = (char *)&opt[IPOPT_POS_OV_FLG] -
9536 (char *)ipha;
9537 goto param_prob;
9538 }
9539 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
9540 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
9541 /*
9542 * No room and the overflow counter is 15
9543 * already.
9544 */
9545 goto param_prob;
9546 }
9547 break;
9548 }
9549 }
9550
9551 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0) {
9552 return (dst);
9553 }
9554
9555 ip1dbg(("ip_input_options: error processing IP options."));
9556 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
9557
9558 param_prob:
9559 /* make sure we clear any indication of a hardware checksum */
9560 DB_CKSUMFLAGS(mp) = 0;
9561 ip_drop_input("ICMP_PARAM_PROBLEM", mp, ira->ira_ill);
9562 icmp_param_problem(mp, (uint8_t)code, ira);
9563 *errorp = -1;
9564 return (dst);
9565
9566 bad_src_route:
9567 /* make sure we clear any indication of a hardware checksum */
9568 DB_CKSUMFLAGS(mp) = 0;
9569 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ira->ira_ill);
9570 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira);
9571 *errorp = -1;
9572 return (dst);
9573 }
9574
9575 /*
9576 * IP & ICMP info in >=14 msg's ...
9577 * - ip fixed part (mib2_ip_t)
9578 * - icmp fixed part (mib2_icmp_t)
9579 * - ipAddrEntryTable (ip 20) all IPv4 ipifs
9580 * - ipRouteEntryTable (ip 21) all IPv4 IREs
9581 * - ipNetToMediaEntryTable (ip 22) all IPv4 Neighbor Cache entries
9582 * - ipRouteAttributeTable (ip 102) labeled routes
9583 * - ip multicast membership (ip_member_t)
9584 * - ip multicast source filtering (ip_grpsrc_t)
9585 * - igmp fixed part (struct igmpstat)
9586 * - multicast routing stats (struct mrtstat)
9587 * - multicast routing vifs (array of struct vifctl)
9588 * - multicast routing routes (array of struct mfcctl)
9589 * - ip6 fixed part (mib2_ipv6IfStatsEntry_t)
9590 * One per ill plus one generic
9591 * - icmp6 fixed part (mib2_ipv6IfIcmpEntry_t)
9592 * One per ill plus one generic
9593 * - ipv6RouteEntry all IPv6 IREs
9594 * - ipv6RouteAttributeTable (ip6 102) labeled routes
9595 * - ipv6NetToMediaEntry all IPv6 Neighbor Cache entries
9596 * - ipv6AddrEntry all IPv6 ipifs
9597 * - ipv6 multicast membership (ipv6_member_t)
9598 * - ipv6 multicast source filtering (ipv6_grpsrc_t)
9599 *
9600 * NOTE: original mpctl is copied for msg's 2..N, since its ctl part is
9601 * already filled in by the caller.
9602 * If legacy_req is true then MIB structures needs to be truncated to their
9603 * legacy sizes before being returned.
9604 * Return value of 0 indicates that no messages were sent and caller
9605 * should free mpctl.
9606 */
9607 int
9608 ip_snmp_get(queue_t *q, mblk_t *mpctl, int level, boolean_t legacy_req)
9609 {
9610 ip_stack_t *ipst;
9611 sctp_stack_t *sctps;
9612
9613 if (q->q_next != NULL) {
9614 ipst = ILLQ_TO_IPST(q);
9615 } else {
9616 ipst = CONNQ_TO_IPST(q);
9617 }
9618 ASSERT(ipst != NULL);
9619 sctps = ipst->ips_netstack->netstack_sctp;
9620
9621 if (mpctl == NULL || mpctl->b_cont == NULL) {
9622 return (0);
9623 }
9624
9625 /*
9626 * For the purposes of the (broken) packet shell use
9627 * of the level we make sure MIB2_TCP/MIB2_UDP can be used
9628 * to make TCP and UDP appear first in the list of mib items.
9629 * TBD: We could expand this and use it in netstat so that
9630 * the kernel doesn't have to produce large tables (connections,
9631 * routes, etc) when netstat only wants the statistics or a particular
9632 * table.
9633 */
9634 if (!(level == MIB2_TCP || level == MIB2_UDP)) {
9635 if ((mpctl = icmp_snmp_get(q, mpctl)) == NULL) {
9636 return (1);
9637 }
9638 }
9639
9640 if (level != MIB2_TCP) {
9641 if ((mpctl = udp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9642 return (1);
9643 }
9644 if (level == MIB2_UDP) {
9645 goto done;
9646 }
9647 }
9648
9649 if (level != MIB2_UDP) {
9650 if ((mpctl = tcp_snmp_get(q, mpctl, legacy_req)) == NULL) {
9651 return (1);
9652 }
9653 if (level == MIB2_TCP) {
9654 goto done;
9655 }
9656 }
9657
9658 if ((mpctl = ip_snmp_get_mib2_ip_traffic_stats(q, mpctl,
9659 ipst, legacy_req)) == NULL) {
9660 return (1);
9661 }
9662
9663 if ((mpctl = ip_snmp_get_mib2_ip6(q, mpctl, ipst,
9664 legacy_req)) == NULL) {
9665 return (1);
9666 }
9667
9668 if ((mpctl = ip_snmp_get_mib2_icmp(q, mpctl, ipst)) == NULL) {
9669 return (1);
9670 }
9671
9672 if ((mpctl = ip_snmp_get_mib2_icmp6(q, mpctl, ipst)) == NULL) {
9673 return (1);
9674 }
9675
9676 if ((mpctl = ip_snmp_get_mib2_igmp(q, mpctl, ipst)) == NULL) {
9677 return (1);
9678 }
9679
9680 if ((mpctl = ip_snmp_get_mib2_multi(q, mpctl, ipst)) == NULL) {
9681 return (1);
9682 }
9683
9684 if ((mpctl = ip_snmp_get_mib2_ip_addr(q, mpctl, ipst,
9685 legacy_req)) == NULL) {
9686 return (1);
9687 }
9688
9689 if ((mpctl = ip_snmp_get_mib2_ip6_addr(q, mpctl, ipst,
9690 legacy_req)) == NULL) {
9691 return (1);
9692 }
9693
9694 if ((mpctl = ip_snmp_get_mib2_ip_group_mem(q, mpctl, ipst)) == NULL) {
9695 return (1);
9696 }
9697
9698 if ((mpctl = ip_snmp_get_mib2_ip6_group_mem(q, mpctl, ipst)) == NULL) {
9699 return (1);
9700 }
9701
9702 if ((mpctl = ip_snmp_get_mib2_ip_group_src(q, mpctl, ipst)) == NULL) {
9703 return (1);
9704 }
9705
9706 if ((mpctl = ip_snmp_get_mib2_ip6_group_src(q, mpctl, ipst)) == NULL) {
9707 return (1);
9708 }
9709
9710 if ((mpctl = ip_snmp_get_mib2_virt_multi(q, mpctl, ipst)) == NULL) {
9711 return (1);
9712 }
9713
9714 if ((mpctl = ip_snmp_get_mib2_multi_rtable(q, mpctl, ipst)) == NULL) {
9715 return (1);
9716 }
9717
9718 mpctl = ip_snmp_get_mib2_ip_route_media(q, mpctl, level, ipst);
9719 if (mpctl == NULL)
9720 return (1);
9721
9722 mpctl = ip_snmp_get_mib2_ip6_route_media(q, mpctl, level, ipst);
9723 if (mpctl == NULL)
9724 return (1);
9725
9726 if ((mpctl = sctp_snmp_get_mib2(q, mpctl, sctps)) == NULL) {
9727 return (1);
9728 }
9729 if ((mpctl = ip_snmp_get_mib2_ip_dce(q, mpctl, ipst)) == NULL) {
9730 return (1);
9731 }
9732 done:
9733 freemsg(mpctl);
9734 return (1);
9735 }
9736
9737 /* Get global (legacy) IPv4 statistics */
9738 static mblk_t *
9739 ip_snmp_get_mib2_ip(queue_t *q, mblk_t *mpctl, mib2_ipIfStatsEntry_t *ipmib,
9740 ip_stack_t *ipst, boolean_t legacy_req)
9741 {
9742 mib2_ip_t old_ip_mib;
9743 struct opthdr *optp;
9744 mblk_t *mp2ctl;
9745 mib2_ipAddrEntry_t mae;
9746
9747 /*
9748 * make a copy of the original message
9749 */
9750 mp2ctl = copymsg(mpctl);
9751
9752 /* fixed length IP structure... */
9753 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9754 optp->level = MIB2_IP;
9755 optp->name = 0;
9756 SET_MIB(old_ip_mib.ipForwarding,
9757 (WE_ARE_FORWARDING(ipst) ? 1 : 2));
9758 SET_MIB(old_ip_mib.ipDefaultTTL,
9759 (uint32_t)ipst->ips_ip_def_ttl);
9760 SET_MIB(old_ip_mib.ipReasmTimeout,
9761 ipst->ips_ip_reassembly_timeout);
9762 SET_MIB(old_ip_mib.ipAddrEntrySize,
9763 (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
9764 sizeof (mib2_ipAddrEntry_t));
9765 SET_MIB(old_ip_mib.ipRouteEntrySize,
9766 sizeof (mib2_ipRouteEntry_t));
9767 SET_MIB(old_ip_mib.ipNetToMediaEntrySize,
9768 sizeof (mib2_ipNetToMediaEntry_t));
9769 SET_MIB(old_ip_mib.ipMemberEntrySize, sizeof (ip_member_t));
9770 SET_MIB(old_ip_mib.ipGroupSourceEntrySize, sizeof (ip_grpsrc_t));
9771 SET_MIB(old_ip_mib.ipRouteAttributeSize,
9772 sizeof (mib2_ipAttributeEntry_t));
9773 SET_MIB(old_ip_mib.transportMLPSize, sizeof (mib2_transportMLPEntry_t));
9774 SET_MIB(old_ip_mib.ipDestEntrySize, sizeof (dest_cache_entry_t));
9775
9776 /*
9777 * Grab the statistics from the new IP MIB
9778 */
9779 SET_MIB(old_ip_mib.ipInReceives,
9780 (uint32_t)ipmib->ipIfStatsHCInReceives);
9781 SET_MIB(old_ip_mib.ipInHdrErrors, ipmib->ipIfStatsInHdrErrors);
9782 SET_MIB(old_ip_mib.ipInAddrErrors, ipmib->ipIfStatsInAddrErrors);
9783 SET_MIB(old_ip_mib.ipForwDatagrams,
9784 (uint32_t)ipmib->ipIfStatsHCOutForwDatagrams);
9785 SET_MIB(old_ip_mib.ipInUnknownProtos,
9786 ipmib->ipIfStatsInUnknownProtos);
9787 SET_MIB(old_ip_mib.ipInDiscards, ipmib->ipIfStatsInDiscards);
9788 SET_MIB(old_ip_mib.ipInDelivers,
9789 (uint32_t)ipmib->ipIfStatsHCInDelivers);
9790 SET_MIB(old_ip_mib.ipOutRequests,
9791 (uint32_t)ipmib->ipIfStatsHCOutRequests);
9792 SET_MIB(old_ip_mib.ipOutDiscards, ipmib->ipIfStatsOutDiscards);
9793 SET_MIB(old_ip_mib.ipOutNoRoutes, ipmib->ipIfStatsOutNoRoutes);
9794 SET_MIB(old_ip_mib.ipReasmReqds, ipmib->ipIfStatsReasmReqds);
9795 SET_MIB(old_ip_mib.ipReasmOKs, ipmib->ipIfStatsReasmOKs);
9796 SET_MIB(old_ip_mib.ipReasmFails, ipmib->ipIfStatsReasmFails);
9797 SET_MIB(old_ip_mib.ipFragOKs, ipmib->ipIfStatsOutFragOKs);
9798 SET_MIB(old_ip_mib.ipFragFails, ipmib->ipIfStatsOutFragFails);
9799 SET_MIB(old_ip_mib.ipFragCreates, ipmib->ipIfStatsOutFragCreates);
9800
9801 /* ipRoutingDiscards is not being used */
9802 SET_MIB(old_ip_mib.ipRoutingDiscards, 0);
9803 SET_MIB(old_ip_mib.tcpInErrs, ipmib->tcpIfStatsInErrs);
9804 SET_MIB(old_ip_mib.udpNoPorts, ipmib->udpIfStatsNoPorts);
9805 SET_MIB(old_ip_mib.ipInCksumErrs, ipmib->ipIfStatsInCksumErrs);
9806 SET_MIB(old_ip_mib.ipReasmDuplicates,
9807 ipmib->ipIfStatsReasmDuplicates);
9808 SET_MIB(old_ip_mib.ipReasmPartDups, ipmib->ipIfStatsReasmPartDups);
9809 SET_MIB(old_ip_mib.ipForwProhibits, ipmib->ipIfStatsForwProhibits);
9810 SET_MIB(old_ip_mib.udpInCksumErrs, ipmib->udpIfStatsInCksumErrs);
9811 SET_MIB(old_ip_mib.udpInOverflows, ipmib->udpIfStatsInOverflows);
9812 SET_MIB(old_ip_mib.rawipInOverflows,
9813 ipmib->rawipIfStatsInOverflows);
9814
9815 SET_MIB(old_ip_mib.ipsecInSucceeded, ipmib->ipsecIfStatsInSucceeded);
9816 SET_MIB(old_ip_mib.ipsecInFailed, ipmib->ipsecIfStatsInFailed);
9817 SET_MIB(old_ip_mib.ipInIPv6, ipmib->ipIfStatsInWrongIPVersion);
9818 SET_MIB(old_ip_mib.ipOutIPv6, ipmib->ipIfStatsOutWrongIPVersion);
9819 SET_MIB(old_ip_mib.ipOutSwitchIPv6,
9820 ipmib->ipIfStatsOutSwitchIPVersion);
9821
9822 if (!snmp_append_data(mpctl->b_cont, (char *)&old_ip_mib,
9823 (int)sizeof (old_ip_mib))) {
9824 ip1dbg(("ip_snmp_get_mib2_ip: failed to allocate %u bytes\n",
9825 (uint_t)sizeof (old_ip_mib)));
9826 }
9827
9828 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9829 ip3dbg(("ip_snmp_get_mib2_ip: level %d, name %d, len %d\n",
9830 (int)optp->level, (int)optp->name, (int)optp->len));
9831 qreply(q, mpctl);
9832 return (mp2ctl);
9833 }
9834
9835 /* Per interface IPv4 statistics */
9836 static mblk_t *
9837 ip_snmp_get_mib2_ip_traffic_stats(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
9838 boolean_t legacy_req)
9839 {
9840 struct opthdr *optp;
9841 mblk_t *mp2ctl;
9842 ill_t *ill;
9843 ill_walk_context_t ctx;
9844 mblk_t *mp_tail = NULL;
9845 mib2_ipIfStatsEntry_t global_ip_mib;
9846 mib2_ipAddrEntry_t mae;
9847
9848 /*
9849 * Make a copy of the original message
9850 */
9851 mp2ctl = copymsg(mpctl);
9852
9853 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9854 optp->level = MIB2_IP;
9855 optp->name = MIB2_IP_TRAFFIC_STATS;
9856 /* Include "unknown interface" ip_mib */
9857 ipst->ips_ip_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv4;
9858 ipst->ips_ip_mib.ipIfStatsIfIndex =
9859 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
9860 SET_MIB(ipst->ips_ip_mib.ipIfStatsForwarding,
9861 (ipst->ips_ip_forwarding ? 1 : 2));
9862 SET_MIB(ipst->ips_ip_mib.ipIfStatsDefaultTTL,
9863 (uint32_t)ipst->ips_ip_def_ttl);
9864 SET_MIB(ipst->ips_ip_mib.ipIfStatsEntrySize,
9865 sizeof (mib2_ipIfStatsEntry_t));
9866 SET_MIB(ipst->ips_ip_mib.ipIfStatsAddrEntrySize,
9867 sizeof (mib2_ipAddrEntry_t));
9868 SET_MIB(ipst->ips_ip_mib.ipIfStatsRouteEntrySize,
9869 sizeof (mib2_ipRouteEntry_t));
9870 SET_MIB(ipst->ips_ip_mib.ipIfStatsNetToMediaEntrySize,
9871 sizeof (mib2_ipNetToMediaEntry_t));
9872 SET_MIB(ipst->ips_ip_mib.ipIfStatsMemberEntrySize,
9873 sizeof (ip_member_t));
9874 SET_MIB(ipst->ips_ip_mib.ipIfStatsGroupSourceEntrySize,
9875 sizeof (ip_grpsrc_t));
9876
9877 bcopy(&ipst->ips_ip_mib, &global_ip_mib, sizeof (global_ip_mib));
9878
9879 if (legacy_req) {
9880 SET_MIB(global_ip_mib.ipIfStatsAddrEntrySize,
9881 LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t));
9882 }
9883
9884 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9885 (char *)&global_ip_mib, (int)sizeof (global_ip_mib))) {
9886 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9887 "failed to allocate %u bytes\n",
9888 (uint_t)sizeof (global_ip_mib)));
9889 }
9890
9891 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
9892 ill = ILL_START_WALK_V4(&ctx, ipst);
9893 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
9894 ill->ill_ip_mib->ipIfStatsIfIndex =
9895 ill->ill_phyint->phyint_ifindex;
9896 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
9897 (ipst->ips_ip_forwarding ? 1 : 2));
9898 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultTTL,
9899 (uint32_t)ipst->ips_ip_def_ttl);
9900
9901 ip_mib2_add_ip_stats(&global_ip_mib, ill->ill_ip_mib);
9902 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
9903 (char *)ill->ill_ip_mib,
9904 (int)sizeof (*ill->ill_ip_mib))) {
9905 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9906 "failed to allocate %u bytes\n",
9907 (uint_t)sizeof (*ill->ill_ip_mib)));
9908 }
9909 }
9910 rw_exit(&ipst->ips_ill_g_lock);
9911
9912 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9913 ip3dbg(("ip_snmp_get_mib2_ip_traffic_stats: "
9914 "level %d, name %d, len %d\n",
9915 (int)optp->level, (int)optp->name, (int)optp->len));
9916 qreply(q, mpctl);
9917
9918 if (mp2ctl == NULL)
9919 return (NULL);
9920
9921 return (ip_snmp_get_mib2_ip(q, mp2ctl, &global_ip_mib, ipst,
9922 legacy_req));
9923 }
9924
9925 /* Global IPv4 ICMP statistics */
9926 static mblk_t *
9927 ip_snmp_get_mib2_icmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9928 {
9929 struct opthdr *optp;
9930 mblk_t *mp2ctl;
9931
9932 /*
9933 * Make a copy of the original message
9934 */
9935 mp2ctl = copymsg(mpctl);
9936
9937 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9938 optp->level = MIB2_ICMP;
9939 optp->name = 0;
9940 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_icmp_mib,
9941 (int)sizeof (ipst->ips_icmp_mib))) {
9942 ip1dbg(("ip_snmp_get_mib2_icmp: failed to allocate %u bytes\n",
9943 (uint_t)sizeof (ipst->ips_icmp_mib)));
9944 }
9945 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9946 ip3dbg(("ip_snmp_get_mib2_icmp: level %d, name %d, len %d\n",
9947 (int)optp->level, (int)optp->name, (int)optp->len));
9948 qreply(q, mpctl);
9949 return (mp2ctl);
9950 }
9951
9952 /* Global IPv4 IGMP statistics */
9953 static mblk_t *
9954 ip_snmp_get_mib2_igmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9955 {
9956 struct opthdr *optp;
9957 mblk_t *mp2ctl;
9958
9959 /*
9960 * make a copy of the original message
9961 */
9962 mp2ctl = copymsg(mpctl);
9963
9964 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9965 optp->level = EXPER_IGMP;
9966 optp->name = 0;
9967 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_igmpstat,
9968 (int)sizeof (ipst->ips_igmpstat))) {
9969 ip1dbg(("ip_snmp_get_mib2_igmp: failed to allocate %u bytes\n",
9970 (uint_t)sizeof (ipst->ips_igmpstat)));
9971 }
9972 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9973 ip3dbg(("ip_snmp_get_mib2_igmp: level %d, name %d, len %d\n",
9974 (int)optp->level, (int)optp->name, (int)optp->len));
9975 qreply(q, mpctl);
9976 return (mp2ctl);
9977 }
9978
9979 /* Global IPv4 Multicast Routing statistics */
9980 static mblk_t *
9981 ip_snmp_get_mib2_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
9982 {
9983 struct opthdr *optp;
9984 mblk_t *mp2ctl;
9985
9986 /*
9987 * make a copy of the original message
9988 */
9989 mp2ctl = copymsg(mpctl);
9990
9991 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
9992 optp->level = EXPER_DVMRP;
9993 optp->name = 0;
9994 if (!ip_mroute_stats(mpctl->b_cont, ipst)) {
9995 ip0dbg(("ip_mroute_stats: failed\n"));
9996 }
9997 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
9998 ip3dbg(("ip_snmp_get_mib2_multi: level %d, name %d, len %d\n",
9999 (int)optp->level, (int)optp->name, (int)optp->len));
10000 qreply(q, mpctl);
10001 return (mp2ctl);
10002 }
10003
10004 /* IPv4 address information */
10005 static mblk_t *
10006 ip_snmp_get_mib2_ip_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10007 boolean_t legacy_req)
10008 {
10009 struct opthdr *optp;
10010 mblk_t *mp2ctl;
10011 mblk_t *mp_tail = NULL;
10012 ill_t *ill;
10013 ipif_t *ipif;
10014 uint_t bitval;
10015 mib2_ipAddrEntry_t mae;
10016 size_t mae_size;
10017 zoneid_t zoneid;
10018 ill_walk_context_t ctx;
10019
10020 /*
10021 * make a copy of the original message
10022 */
10023 mp2ctl = copymsg(mpctl);
10024
10025 mae_size = (legacy_req) ? LEGACY_MIB_SIZE(&mae, mib2_ipAddrEntry_t) :
10026 sizeof (mib2_ipAddrEntry_t);
10027
10028 /* ipAddrEntryTable */
10029
10030 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10031 optp->level = MIB2_IP;
10032 optp->name = MIB2_IP_ADDR;
10033 zoneid = Q_TO_CONN(q)->conn_zoneid;
10034
10035 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10036 ill = ILL_START_WALK_V4(&ctx, ipst);
10037 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10038 for (ipif = ill->ill_ipif; ipif != NULL;
10039 ipif = ipif->ipif_next) {
10040 if (ipif->ipif_zoneid != zoneid &&
10041 ipif->ipif_zoneid != ALL_ZONES)
10042 continue;
10043 /* Sum of count from dead IRE_LO* and our current */
10044 mae.ipAdEntInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10045 if (ipif->ipif_ire_local != NULL) {
10046 mae.ipAdEntInfo.ae_ibcnt +=
10047 ipif->ipif_ire_local->ire_ib_pkt_count;
10048 }
10049 mae.ipAdEntInfo.ae_obcnt = 0;
10050 mae.ipAdEntInfo.ae_focnt = 0;
10051
10052 ipif_get_name(ipif, mae.ipAdEntIfIndex.o_bytes,
10053 OCTET_LENGTH);
10054 mae.ipAdEntIfIndex.o_length =
10055 mi_strlen(mae.ipAdEntIfIndex.o_bytes);
10056 mae.ipAdEntAddr = ipif->ipif_lcl_addr;
10057 mae.ipAdEntNetMask = ipif->ipif_net_mask;
10058 mae.ipAdEntInfo.ae_subnet = ipif->ipif_subnet;
10059 mae.ipAdEntInfo.ae_subnet_len =
10060 ip_mask_to_plen(ipif->ipif_net_mask);
10061 mae.ipAdEntInfo.ae_src_addr = ipif->ipif_lcl_addr;
10062 for (bitval = 1;
10063 bitval &&
10064 !(bitval & ipif->ipif_brd_addr);
10065 bitval <<= 1)
10066 noop;
10067 mae.ipAdEntBcastAddr = bitval;
10068 mae.ipAdEntReasmMaxSize = IP_MAXPACKET;
10069 mae.ipAdEntInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10070 mae.ipAdEntInfo.ae_metric = ipif->ipif_ill->ill_metric;
10071 mae.ipAdEntInfo.ae_broadcast_addr =
10072 ipif->ipif_brd_addr;
10073 mae.ipAdEntInfo.ae_pp_dst_addr =
10074 ipif->ipif_pp_dst_addr;
10075 mae.ipAdEntInfo.ae_flags = ipif->ipif_flags |
10076 ill->ill_flags | ill->ill_phyint->phyint_flags;
10077 mae.ipAdEntRetransmitTime =
10078 ill->ill_reachable_retrans_time;
10079
10080 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10081 (char *)&mae, (int)mae_size)) {
10082 ip1dbg(("ip_snmp_get_mib2_ip_addr: failed to "
10083 "allocate %u bytes\n", (uint_t)mae_size));
10084 }
10085 }
10086 }
10087 rw_exit(&ipst->ips_ill_g_lock);
10088
10089 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10090 ip3dbg(("ip_snmp_get_mib2_ip_addr: level %d, name %d, len %d\n",
10091 (int)optp->level, (int)optp->name, (int)optp->len));
10092 qreply(q, mpctl);
10093 return (mp2ctl);
10094 }
10095
10096 /* IPv6 address information */
10097 static mblk_t *
10098 ip_snmp_get_mib2_ip6_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10099 boolean_t legacy_req)
10100 {
10101 struct opthdr *optp;
10102 mblk_t *mp2ctl;
10103 mblk_t *mp_tail = NULL;
10104 ill_t *ill;
10105 ipif_t *ipif;
10106 mib2_ipv6AddrEntry_t mae6;
10107 size_t mae6_size;
10108 zoneid_t zoneid;
10109 ill_walk_context_t ctx;
10110
10111 /*
10112 * make a copy of the original message
10113 */
10114 mp2ctl = copymsg(mpctl);
10115
10116 mae6_size = (legacy_req) ?
10117 LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t) :
10118 sizeof (mib2_ipv6AddrEntry_t);
10119
10120 /* ipv6AddrEntryTable */
10121
10122 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10123 optp->level = MIB2_IP6;
10124 optp->name = MIB2_IP6_ADDR;
10125 zoneid = Q_TO_CONN(q)->conn_zoneid;
10126
10127 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10128 ill = ILL_START_WALK_V6(&ctx, ipst);
10129 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10130 for (ipif = ill->ill_ipif; ipif != NULL;
10131 ipif = ipif->ipif_next) {
10132 if (ipif->ipif_zoneid != zoneid &&
10133 ipif->ipif_zoneid != ALL_ZONES)
10134 continue;
10135 /* Sum of count from dead IRE_LO* and our current */
10136 mae6.ipv6AddrInfo.ae_ibcnt = ipif->ipif_ib_pkt_count;
10137 if (ipif->ipif_ire_local != NULL) {
10138 mae6.ipv6AddrInfo.ae_ibcnt +=
10139 ipif->ipif_ire_local->ire_ib_pkt_count;
10140 }
10141 mae6.ipv6AddrInfo.ae_obcnt = 0;
10142 mae6.ipv6AddrInfo.ae_focnt = 0;
10143
10144 ipif_get_name(ipif, mae6.ipv6AddrIfIndex.o_bytes,
10145 OCTET_LENGTH);
10146 mae6.ipv6AddrIfIndex.o_length =
10147 mi_strlen(mae6.ipv6AddrIfIndex.o_bytes);
10148 mae6.ipv6AddrAddress = ipif->ipif_v6lcl_addr;
10149 mae6.ipv6AddrPfxLength =
10150 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask);
10151 mae6.ipv6AddrInfo.ae_subnet = ipif->ipif_v6subnet;
10152 mae6.ipv6AddrInfo.ae_subnet_len =
10153 mae6.ipv6AddrPfxLength;
10154 mae6.ipv6AddrInfo.ae_src_addr = ipif->ipif_v6lcl_addr;
10155
10156 /* Type: stateless(1), stateful(2), unknown(3) */
10157 if (ipif->ipif_flags & IPIF_ADDRCONF)
10158 mae6.ipv6AddrType = 1;
10159 else
10160 mae6.ipv6AddrType = 2;
10161 /* Anycast: true(1), false(2) */
10162 if (ipif->ipif_flags & IPIF_ANYCAST)
10163 mae6.ipv6AddrAnycastFlag = 1;
10164 else
10165 mae6.ipv6AddrAnycastFlag = 2;
10166
10167 /*
10168 * Address status: preferred(1), deprecated(2),
10169 * invalid(3), inaccessible(4), unknown(5)
10170 */
10171 if (ipif->ipif_flags & IPIF_NOLOCAL)
10172 mae6.ipv6AddrStatus = 3;
10173 else if (ipif->ipif_flags & IPIF_DEPRECATED)
10174 mae6.ipv6AddrStatus = 2;
10175 else
10176 mae6.ipv6AddrStatus = 1;
10177 mae6.ipv6AddrInfo.ae_mtu = ipif->ipif_ill->ill_mtu;
10178 mae6.ipv6AddrInfo.ae_metric =
10179 ipif->ipif_ill->ill_metric;
10180 mae6.ipv6AddrInfo.ae_pp_dst_addr =
10181 ipif->ipif_v6pp_dst_addr;
10182 mae6.ipv6AddrInfo.ae_flags = ipif->ipif_flags |
10183 ill->ill_flags | ill->ill_phyint->phyint_flags;
10184 mae6.ipv6AddrReasmMaxSize = IP_MAXPACKET;
10185 mae6.ipv6AddrIdentifier = ill->ill_token;
10186 mae6.ipv6AddrIdentifierLen = ill->ill_token_length;
10187 mae6.ipv6AddrReachableTime = ill->ill_reachable_time;
10188 mae6.ipv6AddrRetransmitTime =
10189 ill->ill_reachable_retrans_time;
10190 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10191 (char *)&mae6, (int)mae6_size)) {
10192 ip1dbg(("ip_snmp_get_mib2_ip6_addr: failed to "
10193 "allocate %u bytes\n",
10194 (uint_t)mae6_size));
10195 }
10196 }
10197 }
10198 rw_exit(&ipst->ips_ill_g_lock);
10199
10200 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10201 ip3dbg(("ip_snmp_get_mib2_ip6_addr: level %d, name %d, len %d\n",
10202 (int)optp->level, (int)optp->name, (int)optp->len));
10203 qreply(q, mpctl);
10204 return (mp2ctl);
10205 }
10206
10207 /* IPv4 multicast group membership. */
10208 static mblk_t *
10209 ip_snmp_get_mib2_ip_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10210 {
10211 struct opthdr *optp;
10212 mblk_t *mp2ctl;
10213 ill_t *ill;
10214 ipif_t *ipif;
10215 ilm_t *ilm;
10216 ip_member_t ipm;
10217 mblk_t *mp_tail = NULL;
10218 ill_walk_context_t ctx;
10219 zoneid_t zoneid;
10220
10221 /*
10222 * make a copy of the original message
10223 */
10224 mp2ctl = copymsg(mpctl);
10225 zoneid = Q_TO_CONN(q)->conn_zoneid;
10226
10227 /* ipGroupMember table */
10228 optp = (struct opthdr *)&mpctl->b_rptr[
10229 sizeof (struct T_optmgmt_ack)];
10230 optp->level = MIB2_IP;
10231 optp->name = EXPER_IP_GROUP_MEMBERSHIP;
10232
10233 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10234 ill = ILL_START_WALK_V4(&ctx, ipst);
10235 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10236 /* Make sure the ill isn't going away. */
10237 if (!ill_check_and_refhold(ill))
10238 continue;
10239 rw_exit(&ipst->ips_ill_g_lock);
10240 rw_enter(&ill->ill_mcast_lock, RW_READER);
10241 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10242 if (ilm->ilm_zoneid != zoneid &&
10243 ilm->ilm_zoneid != ALL_ZONES)
10244 continue;
10245
10246 /* Is there an ipif for ilm_ifaddr? */
10247 for (ipif = ill->ill_ipif; ipif != NULL;
10248 ipif = ipif->ipif_next) {
10249 if (!IPIF_IS_CONDEMNED(ipif) &&
10250 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10251 ilm->ilm_ifaddr != INADDR_ANY)
10252 break;
10253 }
10254 if (ipif != NULL) {
10255 ipif_get_name(ipif,
10256 ipm.ipGroupMemberIfIndex.o_bytes,
10257 OCTET_LENGTH);
10258 } else {
10259 ill_get_name(ill,
10260 ipm.ipGroupMemberIfIndex.o_bytes,
10261 OCTET_LENGTH);
10262 }
10263 ipm.ipGroupMemberIfIndex.o_length =
10264 mi_strlen(ipm.ipGroupMemberIfIndex.o_bytes);
10265
10266 ipm.ipGroupMemberAddress = ilm->ilm_addr;
10267 ipm.ipGroupMemberRefCnt = ilm->ilm_refcnt;
10268 ipm.ipGroupMemberFilterMode = ilm->ilm_fmode;
10269 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10270 (char *)&ipm, (int)sizeof (ipm))) {
10271 ip1dbg(("ip_snmp_get_mib2_ip_group: "
10272 "failed to allocate %u bytes\n",
10273 (uint_t)sizeof (ipm)));
10274 }
10275 }
10276 rw_exit(&ill->ill_mcast_lock);
10277 ill_refrele(ill);
10278 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10279 }
10280 rw_exit(&ipst->ips_ill_g_lock);
10281 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10282 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10283 (int)optp->level, (int)optp->name, (int)optp->len));
10284 qreply(q, mpctl);
10285 return (mp2ctl);
10286 }
10287
10288 /* IPv6 multicast group membership. */
10289 static mblk_t *
10290 ip_snmp_get_mib2_ip6_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10291 {
10292 struct opthdr *optp;
10293 mblk_t *mp2ctl;
10294 ill_t *ill;
10295 ilm_t *ilm;
10296 ipv6_member_t ipm6;
10297 mblk_t *mp_tail = NULL;
10298 ill_walk_context_t ctx;
10299 zoneid_t zoneid;
10300
10301 /*
10302 * make a copy of the original message
10303 */
10304 mp2ctl = copymsg(mpctl);
10305 zoneid = Q_TO_CONN(q)->conn_zoneid;
10306
10307 /* ip6GroupMember table */
10308 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10309 optp->level = MIB2_IP6;
10310 optp->name = EXPER_IP6_GROUP_MEMBERSHIP;
10311
10312 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10313 ill = ILL_START_WALK_V6(&ctx, ipst);
10314 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10315 /* Make sure the ill isn't going away. */
10316 if (!ill_check_and_refhold(ill))
10317 continue;
10318 rw_exit(&ipst->ips_ill_g_lock);
10319 /*
10320 * Normally we don't have any members on under IPMP interfaces.
10321 * We report them as a debugging aid.
10322 */
10323 rw_enter(&ill->ill_mcast_lock, RW_READER);
10324 ipm6.ipv6GroupMemberIfIndex = ill->ill_phyint->phyint_ifindex;
10325 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10326 if (ilm->ilm_zoneid != zoneid &&
10327 ilm->ilm_zoneid != ALL_ZONES)
10328 continue; /* not this zone */
10329 ipm6.ipv6GroupMemberAddress = ilm->ilm_v6addr;
10330 ipm6.ipv6GroupMemberRefCnt = ilm->ilm_refcnt;
10331 ipm6.ipv6GroupMemberFilterMode = ilm->ilm_fmode;
10332 if (!snmp_append_data2(mpctl->b_cont,
10333 &mp_tail,
10334 (char *)&ipm6, (int)sizeof (ipm6))) {
10335 ip1dbg(("ip_snmp_get_mib2_ip6_group: "
10336 "failed to allocate %u bytes\n",
10337 (uint_t)sizeof (ipm6)));
10338 }
10339 }
10340 rw_exit(&ill->ill_mcast_lock);
10341 ill_refrele(ill);
10342 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10343 }
10344 rw_exit(&ipst->ips_ill_g_lock);
10345
10346 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10347 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10348 (int)optp->level, (int)optp->name, (int)optp->len));
10349 qreply(q, mpctl);
10350 return (mp2ctl);
10351 }
10352
10353 /* IP multicast filtered sources */
10354 static mblk_t *
10355 ip_snmp_get_mib2_ip_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10356 {
10357 struct opthdr *optp;
10358 mblk_t *mp2ctl;
10359 ill_t *ill;
10360 ipif_t *ipif;
10361 ilm_t *ilm;
10362 ip_grpsrc_t ips;
10363 mblk_t *mp_tail = NULL;
10364 ill_walk_context_t ctx;
10365 zoneid_t zoneid;
10366 int i;
10367 slist_t *sl;
10368
10369 /*
10370 * make a copy of the original message
10371 */
10372 mp2ctl = copymsg(mpctl);
10373 zoneid = Q_TO_CONN(q)->conn_zoneid;
10374
10375 /* ipGroupSource table */
10376 optp = (struct opthdr *)&mpctl->b_rptr[
10377 sizeof (struct T_optmgmt_ack)];
10378 optp->level = MIB2_IP;
10379 optp->name = EXPER_IP_GROUP_SOURCES;
10380
10381 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10382 ill = ILL_START_WALK_V4(&ctx, ipst);
10383 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10384 /* Make sure the ill isn't going away. */
10385 if (!ill_check_and_refhold(ill))
10386 continue;
10387 rw_exit(&ipst->ips_ill_g_lock);
10388 rw_enter(&ill->ill_mcast_lock, RW_READER);
10389 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10390 sl = ilm->ilm_filter;
10391 if (ilm->ilm_zoneid != zoneid &&
10392 ilm->ilm_zoneid != ALL_ZONES)
10393 continue;
10394 if (SLIST_IS_EMPTY(sl))
10395 continue;
10396
10397 /* Is there an ipif for ilm_ifaddr? */
10398 for (ipif = ill->ill_ipif; ipif != NULL;
10399 ipif = ipif->ipif_next) {
10400 if (!IPIF_IS_CONDEMNED(ipif) &&
10401 ipif->ipif_lcl_addr == ilm->ilm_ifaddr &&
10402 ilm->ilm_ifaddr != INADDR_ANY)
10403 break;
10404 }
10405 if (ipif != NULL) {
10406 ipif_get_name(ipif,
10407 ips.ipGroupSourceIfIndex.o_bytes,
10408 OCTET_LENGTH);
10409 } else {
10410 ill_get_name(ill,
10411 ips.ipGroupSourceIfIndex.o_bytes,
10412 OCTET_LENGTH);
10413 }
10414 ips.ipGroupSourceIfIndex.o_length =
10415 mi_strlen(ips.ipGroupSourceIfIndex.o_bytes);
10416
10417 ips.ipGroupSourceGroup = ilm->ilm_addr;
10418 for (i = 0; i < sl->sl_numsrc; i++) {
10419 if (!IN6_IS_ADDR_V4MAPPED(&sl->sl_addr[i]))
10420 continue;
10421 IN6_V4MAPPED_TO_IPADDR(&sl->sl_addr[i],
10422 ips.ipGroupSourceAddress);
10423 if (snmp_append_data2(mpctl->b_cont, &mp_tail,
10424 (char *)&ips, (int)sizeof (ips)) == 0) {
10425 ip1dbg(("ip_snmp_get_mib2_ip_group_src:"
10426 " failed to allocate %u bytes\n",
10427 (uint_t)sizeof (ips)));
10428 }
10429 }
10430 }
10431 rw_exit(&ill->ill_mcast_lock);
10432 ill_refrele(ill);
10433 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10434 }
10435 rw_exit(&ipst->ips_ill_g_lock);
10436 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10437 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10438 (int)optp->level, (int)optp->name, (int)optp->len));
10439 qreply(q, mpctl);
10440 return (mp2ctl);
10441 }
10442
10443 /* IPv6 multicast filtered sources. */
10444 static mblk_t *
10445 ip_snmp_get_mib2_ip6_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10446 {
10447 struct opthdr *optp;
10448 mblk_t *mp2ctl;
10449 ill_t *ill;
10450 ilm_t *ilm;
10451 ipv6_grpsrc_t ips6;
10452 mblk_t *mp_tail = NULL;
10453 ill_walk_context_t ctx;
10454 zoneid_t zoneid;
10455 int i;
10456 slist_t *sl;
10457
10458 /*
10459 * make a copy of the original message
10460 */
10461 mp2ctl = copymsg(mpctl);
10462 zoneid = Q_TO_CONN(q)->conn_zoneid;
10463
10464 /* ip6GroupMember table */
10465 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10466 optp->level = MIB2_IP6;
10467 optp->name = EXPER_IP6_GROUP_SOURCES;
10468
10469 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10470 ill = ILL_START_WALK_V6(&ctx, ipst);
10471 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10472 /* Make sure the ill isn't going away. */
10473 if (!ill_check_and_refhold(ill))
10474 continue;
10475 rw_exit(&ipst->ips_ill_g_lock);
10476 /*
10477 * Normally we don't have any members on under IPMP interfaces.
10478 * We report them as a debugging aid.
10479 */
10480 rw_enter(&ill->ill_mcast_lock, RW_READER);
10481 ips6.ipv6GroupSourceIfIndex = ill->ill_phyint->phyint_ifindex;
10482 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) {
10483 sl = ilm->ilm_filter;
10484 if (ilm->ilm_zoneid != zoneid &&
10485 ilm->ilm_zoneid != ALL_ZONES)
10486 continue;
10487 if (SLIST_IS_EMPTY(sl))
10488 continue;
10489 ips6.ipv6GroupSourceGroup = ilm->ilm_v6addr;
10490 for (i = 0; i < sl->sl_numsrc; i++) {
10491 ips6.ipv6GroupSourceAddress = sl->sl_addr[i];
10492 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10493 (char *)&ips6, (int)sizeof (ips6))) {
10494 ip1dbg(("ip_snmp_get_mib2_ip6_"
10495 "group_src: failed to allocate "
10496 "%u bytes\n",
10497 (uint_t)sizeof (ips6)));
10498 }
10499 }
10500 }
10501 rw_exit(&ill->ill_mcast_lock);
10502 ill_refrele(ill);
10503 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10504 }
10505 rw_exit(&ipst->ips_ill_g_lock);
10506
10507 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10508 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n",
10509 (int)optp->level, (int)optp->name, (int)optp->len));
10510 qreply(q, mpctl);
10511 return (mp2ctl);
10512 }
10513
10514 /* Multicast routing virtual interface table. */
10515 static mblk_t *
10516 ip_snmp_get_mib2_virt_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10517 {
10518 struct opthdr *optp;
10519 mblk_t *mp2ctl;
10520
10521 /*
10522 * make a copy of the original message
10523 */
10524 mp2ctl = copymsg(mpctl);
10525
10526 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10527 optp->level = EXPER_DVMRP;
10528 optp->name = EXPER_DVMRP_VIF;
10529 if (!ip_mroute_vif(mpctl->b_cont, ipst)) {
10530 ip0dbg(("ip_mroute_vif: failed\n"));
10531 }
10532 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10533 ip3dbg(("ip_snmp_get_mib2_virt_multi: level %d, name %d, len %d\n",
10534 (int)optp->level, (int)optp->name, (int)optp->len));
10535 qreply(q, mpctl);
10536 return (mp2ctl);
10537 }
10538
10539 /* Multicast routing table. */
10540 static mblk_t *
10541 ip_snmp_get_mib2_multi_rtable(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10542 {
10543 struct opthdr *optp;
10544 mblk_t *mp2ctl;
10545
10546 /*
10547 * make a copy of the original message
10548 */
10549 mp2ctl = copymsg(mpctl);
10550
10551 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10552 optp->level = EXPER_DVMRP;
10553 optp->name = EXPER_DVMRP_MRT;
10554 if (!ip_mroute_mrt(mpctl->b_cont, ipst)) {
10555 ip0dbg(("ip_mroute_mrt: failed\n"));
10556 }
10557 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10558 ip3dbg(("ip_snmp_get_mib2_multi_rtable: level %d, name %d, len %d\n",
10559 (int)optp->level, (int)optp->name, (int)optp->len));
10560 qreply(q, mpctl);
10561 return (mp2ctl);
10562 }
10563
10564 /*
10565 * Return ipRouteEntryTable, ipNetToMediaEntryTable, and ipRouteAttributeTable
10566 * in one IRE walk.
10567 */
10568 static mblk_t *
10569 ip_snmp_get_mib2_ip_route_media(queue_t *q, mblk_t *mpctl, int level,
10570 ip_stack_t *ipst)
10571 {
10572 struct opthdr *optp;
10573 mblk_t *mp2ctl; /* Returned */
10574 mblk_t *mp3ctl; /* nettomedia */
10575 mblk_t *mp4ctl; /* routeattrs */
10576 iproutedata_t ird;
10577 zoneid_t zoneid;
10578
10579 /*
10580 * make copies of the original message
10581 * - mp2ctl is returned unchanged to the caller for its use
10582 * - mpctl is sent upstream as ipRouteEntryTable
10583 * - mp3ctl is sent upstream as ipNetToMediaEntryTable
10584 * - mp4ctl is sent upstream as ipRouteAttributeTable
10585 */
10586 mp2ctl = copymsg(mpctl);
10587 mp3ctl = copymsg(mpctl);
10588 mp4ctl = copymsg(mpctl);
10589 if (mp3ctl == NULL || mp4ctl == NULL) {
10590 freemsg(mp4ctl);
10591 freemsg(mp3ctl);
10592 freemsg(mp2ctl);
10593 freemsg(mpctl);
10594 return (NULL);
10595 }
10596
10597 bzero(&ird, sizeof (ird));
10598
10599 ird.ird_route.lp_head = mpctl->b_cont;
10600 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10601 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10602 /*
10603 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10604 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10605 * intended a temporary solution until a proper MIB API is provided
10606 * that provides complete filtering/caller-opt-in.
10607 */
10608 if (level == EXPER_IP_AND_ALL_IRES)
10609 ird.ird_flags |= IRD_REPORT_ALL;
10610
10611 zoneid = Q_TO_CONN(q)->conn_zoneid;
10612 ire_walk_v4(ip_snmp_get2_v4, &ird, zoneid, ipst);
10613
10614 /* ipRouteEntryTable in mpctl */
10615 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10616 optp->level = MIB2_IP;
10617 optp->name = MIB2_IP_ROUTE;
10618 optp->len = msgdsize(ird.ird_route.lp_head);
10619 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10620 (int)optp->level, (int)optp->name, (int)optp->len));
10621 qreply(q, mpctl);
10622
10623 /* ipNetToMediaEntryTable in mp3ctl */
10624 ncec_walk(NULL, ip_snmp_get2_v4_media, &ird, ipst);
10625
10626 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10627 optp->level = MIB2_IP;
10628 optp->name = MIB2_IP_MEDIA;
10629 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10630 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10631 (int)optp->level, (int)optp->name, (int)optp->len));
10632 qreply(q, mp3ctl);
10633
10634 /* ipRouteAttributeTable in mp4ctl */
10635 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10636 optp->level = MIB2_IP;
10637 optp->name = EXPER_IP_RTATTR;
10638 optp->len = msgdsize(ird.ird_attrs.lp_head);
10639 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n",
10640 (int)optp->level, (int)optp->name, (int)optp->len));
10641 if (optp->len == 0)
10642 freemsg(mp4ctl);
10643 else
10644 qreply(q, mp4ctl);
10645
10646 return (mp2ctl);
10647 }
10648
10649 /*
10650 * Return ipv6RouteEntryTable and ipv6RouteAttributeTable in one IRE walk, and
10651 * ipv6NetToMediaEntryTable in an NDP walk.
10652 */
10653 static mblk_t *
10654 ip_snmp_get_mib2_ip6_route_media(queue_t *q, mblk_t *mpctl, int level,
10655 ip_stack_t *ipst)
10656 {
10657 struct opthdr *optp;
10658 mblk_t *mp2ctl; /* Returned */
10659 mblk_t *mp3ctl; /* nettomedia */
10660 mblk_t *mp4ctl; /* routeattrs */
10661 iproutedata_t ird;
10662 zoneid_t zoneid;
10663
10664 /*
10665 * make copies of the original message
10666 * - mp2ctl is returned unchanged to the caller for its use
10667 * - mpctl is sent upstream as ipv6RouteEntryTable
10668 * - mp3ctl is sent upstream as ipv6NetToMediaEntryTable
10669 * - mp4ctl is sent upstream as ipv6RouteAttributeTable
10670 */
10671 mp2ctl = copymsg(mpctl);
10672 mp3ctl = copymsg(mpctl);
10673 mp4ctl = copymsg(mpctl);
10674 if (mp3ctl == NULL || mp4ctl == NULL) {
10675 freemsg(mp4ctl);
10676 freemsg(mp3ctl);
10677 freemsg(mp2ctl);
10678 freemsg(mpctl);
10679 return (NULL);
10680 }
10681
10682 bzero(&ird, sizeof (ird));
10683
10684 ird.ird_route.lp_head = mpctl->b_cont;
10685 ird.ird_netmedia.lp_head = mp3ctl->b_cont;
10686 ird.ird_attrs.lp_head = mp4ctl->b_cont;
10687 /*
10688 * If the level has been set the special EXPER_IP_AND_ALL_IRES value,
10689 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is
10690 * intended a temporary solution until a proper MIB API is provided
10691 * that provides complete filtering/caller-opt-in.
10692 */
10693 if (level == EXPER_IP_AND_ALL_IRES)
10694 ird.ird_flags |= IRD_REPORT_ALL;
10695
10696 zoneid = Q_TO_CONN(q)->conn_zoneid;
10697 ire_walk_v6(ip_snmp_get2_v6_route, &ird, zoneid, ipst);
10698
10699 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10700 optp->level = MIB2_IP6;
10701 optp->name = MIB2_IP6_ROUTE;
10702 optp->len = msgdsize(ird.ird_route.lp_head);
10703 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10704 (int)optp->level, (int)optp->name, (int)optp->len));
10705 qreply(q, mpctl);
10706
10707 /* ipv6NetToMediaEntryTable in mp3ctl */
10708 ncec_walk(NULL, ip_snmp_get2_v6_media, &ird, ipst);
10709
10710 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10711 optp->level = MIB2_IP6;
10712 optp->name = MIB2_IP6_MEDIA;
10713 optp->len = msgdsize(ird.ird_netmedia.lp_head);
10714 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10715 (int)optp->level, (int)optp->name, (int)optp->len));
10716 qreply(q, mp3ctl);
10717
10718 /* ipv6RouteAttributeTable in mp4ctl */
10719 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10720 optp->level = MIB2_IP6;
10721 optp->name = EXPER_IP_RTATTR;
10722 optp->len = msgdsize(ird.ird_attrs.lp_head);
10723 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n",
10724 (int)optp->level, (int)optp->name, (int)optp->len));
10725 if (optp->len == 0)
10726 freemsg(mp4ctl);
10727 else
10728 qreply(q, mp4ctl);
10729
10730 return (mp2ctl);
10731 }
10732
10733 /*
10734 * IPv6 mib: One per ill
10735 */
10736 static mblk_t *
10737 ip_snmp_get_mib2_ip6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst,
10738 boolean_t legacy_req)
10739 {
10740 struct opthdr *optp;
10741 mblk_t *mp2ctl;
10742 ill_t *ill;
10743 ill_walk_context_t ctx;
10744 mblk_t *mp_tail = NULL;
10745 mib2_ipv6AddrEntry_t mae6;
10746 mib2_ipIfStatsEntry_t *ise;
10747 size_t ise_size, iae_size;
10748
10749 /*
10750 * Make a copy of the original message
10751 */
10752 mp2ctl = copymsg(mpctl);
10753
10754 /* fixed length IPv6 structure ... */
10755
10756 if (legacy_req) {
10757 ise_size = LEGACY_MIB_SIZE(&ipst->ips_ip6_mib,
10758 mib2_ipIfStatsEntry_t);
10759 iae_size = LEGACY_MIB_SIZE(&mae6, mib2_ipv6AddrEntry_t);
10760 } else {
10761 ise_size = sizeof (mib2_ipIfStatsEntry_t);
10762 iae_size = sizeof (mib2_ipv6AddrEntry_t);
10763 }
10764
10765 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10766 optp->level = MIB2_IP6;
10767 optp->name = 0;
10768 /* Include "unknown interface" ip6_mib */
10769 ipst->ips_ip6_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv6;
10770 ipst->ips_ip6_mib.ipIfStatsIfIndex =
10771 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */
10772 SET_MIB(ipst->ips_ip6_mib.ipIfStatsForwarding,
10773 ipst->ips_ipv6_forwarding ? 1 : 2);
10774 SET_MIB(ipst->ips_ip6_mib.ipIfStatsDefaultHopLimit,
10775 ipst->ips_ipv6_def_hops);
10776 SET_MIB(ipst->ips_ip6_mib.ipIfStatsEntrySize,
10777 sizeof (mib2_ipIfStatsEntry_t));
10778 SET_MIB(ipst->ips_ip6_mib.ipIfStatsAddrEntrySize,
10779 sizeof (mib2_ipv6AddrEntry_t));
10780 SET_MIB(ipst->ips_ip6_mib.ipIfStatsRouteEntrySize,
10781 sizeof (mib2_ipv6RouteEntry_t));
10782 SET_MIB(ipst->ips_ip6_mib.ipIfStatsNetToMediaEntrySize,
10783 sizeof (mib2_ipv6NetToMediaEntry_t));
10784 SET_MIB(ipst->ips_ip6_mib.ipIfStatsMemberEntrySize,
10785 sizeof (ipv6_member_t));
10786 SET_MIB(ipst->ips_ip6_mib.ipIfStatsGroupSourceEntrySize,
10787 sizeof (ipv6_grpsrc_t));
10788
10789 /*
10790 * Synchronize 64- and 32-bit counters
10791 */
10792 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInReceives,
10793 ipIfStatsHCInReceives);
10794 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInDelivers,
10795 ipIfStatsHCInDelivers);
10796 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutRequests,
10797 ipIfStatsHCOutRequests);
10798 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutForwDatagrams,
10799 ipIfStatsHCOutForwDatagrams);
10800 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutMcastPkts,
10801 ipIfStatsHCOutMcastPkts);
10802 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInMcastPkts,
10803 ipIfStatsHCInMcastPkts);
10804
10805 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10806 (char *)&ipst->ips_ip6_mib, (int)ise_size)) {
10807 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate %u bytes\n",
10808 (uint_t)ise_size));
10809 } else if (legacy_req) {
10810 /* Adjust the EntrySize fields for legacy requests. */
10811 ise =
10812 (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr - (int)ise_size);
10813 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10814 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10815 }
10816
10817 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10818 ill = ILL_START_WALK_V6(&ctx, ipst);
10819 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10820 ill->ill_ip_mib->ipIfStatsIfIndex =
10821 ill->ill_phyint->phyint_ifindex;
10822 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding,
10823 ipst->ips_ipv6_forwarding ? 1 : 2);
10824 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultHopLimit,
10825 ill->ill_max_hops);
10826
10827 /*
10828 * Synchronize 64- and 32-bit counters
10829 */
10830 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInReceives,
10831 ipIfStatsHCInReceives);
10832 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInDelivers,
10833 ipIfStatsHCInDelivers);
10834 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutRequests,
10835 ipIfStatsHCOutRequests);
10836 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutForwDatagrams,
10837 ipIfStatsHCOutForwDatagrams);
10838 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutMcastPkts,
10839 ipIfStatsHCOutMcastPkts);
10840 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInMcastPkts,
10841 ipIfStatsHCInMcastPkts);
10842
10843 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10844 (char *)ill->ill_ip_mib, (int)ise_size)) {
10845 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate "
10846 "%u bytes\n", (uint_t)ise_size));
10847 } else if (legacy_req) {
10848 /* Adjust the EntrySize fields for legacy requests. */
10849 ise = (mib2_ipIfStatsEntry_t *)(mp_tail->b_wptr -
10850 (int)ise_size);
10851 SET_MIB(ise->ipIfStatsEntrySize, ise_size);
10852 SET_MIB(ise->ipIfStatsAddrEntrySize, iae_size);
10853 }
10854 }
10855 rw_exit(&ipst->ips_ill_g_lock);
10856
10857 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10858 ip3dbg(("ip_snmp_get_mib2_ip6: level %d, name %d, len %d\n",
10859 (int)optp->level, (int)optp->name, (int)optp->len));
10860 qreply(q, mpctl);
10861 return (mp2ctl);
10862 }
10863
10864 /*
10865 * ICMPv6 mib: One per ill
10866 */
10867 static mblk_t *
10868 ip_snmp_get_mib2_icmp6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst)
10869 {
10870 struct opthdr *optp;
10871 mblk_t *mp2ctl;
10872 ill_t *ill;
10873 ill_walk_context_t ctx;
10874 mblk_t *mp_tail = NULL;
10875 /*
10876 * Make a copy of the original message
10877 */
10878 mp2ctl = copymsg(mpctl);
10879
10880 /* fixed length ICMPv6 structure ... */
10881
10882 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
10883 optp->level = MIB2_ICMP6;
10884 optp->name = 0;
10885 /* Include "unknown interface" icmp6_mib */
10886 ipst->ips_icmp6_mib.ipv6IfIcmpIfIndex =
10887 MIB2_UNKNOWN_INTERFACE; /* netstat flag */
10888 ipst->ips_icmp6_mib.ipv6IfIcmpEntrySize =
10889 sizeof (mib2_ipv6IfIcmpEntry_t);
10890 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10891 (char *)&ipst->ips_icmp6_mib,
10892 (int)sizeof (ipst->ips_icmp6_mib))) {
10893 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate %u bytes\n",
10894 (uint_t)sizeof (ipst->ips_icmp6_mib)));
10895 }
10896
10897 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
10898 ill = ILL_START_WALK_V6(&ctx, ipst);
10899 for (; ill != NULL; ill = ill_next(&ctx, ill)) {
10900 ill->ill_icmp6_mib->ipv6IfIcmpIfIndex =
10901 ill->ill_phyint->phyint_ifindex;
10902 if (!snmp_append_data2(mpctl->b_cont, &mp_tail,
10903 (char *)ill->ill_icmp6_mib,
10904 (int)sizeof (*ill->ill_icmp6_mib))) {
10905 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate "
10906 "%u bytes\n",
10907 (uint_t)sizeof (*ill->ill_icmp6_mib)));
10908 }
10909 }
10910 rw_exit(&ipst->ips_ill_g_lock);
10911
10912 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont);
10913 ip3dbg(("ip_snmp_get_mib2_icmp6: level %d, name %d, len %d\n",
10914 (int)optp->level, (int)optp->name, (int)optp->len));
10915 qreply(q, mpctl);
10916 return (mp2ctl);
10917 }
10918
10919 /*
10920 * ire_walk routine to create both ipRouteEntryTable and
10921 * ipRouteAttributeTable in one IRE walk
10922 */
10923 static void
10924 ip_snmp_get2_v4(ire_t *ire, iproutedata_t *ird)
10925 {
10926 ill_t *ill;
10927 mib2_ipRouteEntry_t *re;
10928 mib2_ipAttributeEntry_t iaes;
10929 tsol_ire_gw_secattr_t *attrp;
10930 tsol_gc_t *gc = NULL;
10931 tsol_gcgrp_t *gcgrp = NULL;
10932 ip_stack_t *ipst = ire->ire_ipst;
10933
10934 ASSERT(ire->ire_ipversion == IPV4_VERSION);
10935
10936 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
10937 if (ire->ire_testhidden)
10938 return;
10939 if (ire->ire_type & IRE_IF_CLONE)
10940 return;
10941 }
10942
10943 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
10944 return;
10945
10946 if ((attrp = ire->ire_gw_secattr) != NULL) {
10947 mutex_enter(&attrp->igsa_lock);
10948 if ((gc = attrp->igsa_gc) != NULL) {
10949 gcgrp = gc->gc_grp;
10950 ASSERT(gcgrp != NULL);
10951 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
10952 }
10953 mutex_exit(&attrp->igsa_lock);
10954 }
10955 /*
10956 * Return all IRE types for route table... let caller pick and choose
10957 */
10958 re->ipRouteDest = ire->ire_addr;
10959 ill = ire->ire_ill;
10960 re->ipRouteIfIndex.o_length = 0;
10961 if (ill != NULL) {
10962 ill_get_name(ill, re->ipRouteIfIndex.o_bytes, OCTET_LENGTH);
10963 re->ipRouteIfIndex.o_length =
10964 mi_strlen(re->ipRouteIfIndex.o_bytes);
10965 }
10966 re->ipRouteMetric1 = -1;
10967 re->ipRouteMetric2 = -1;
10968 re->ipRouteMetric3 = -1;
10969 re->ipRouteMetric4 = -1;
10970
10971 re->ipRouteNextHop = ire->ire_gateway_addr;
10972 /* indirect(4), direct(3), or invalid(2) */
10973 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
10974 re->ipRouteType = 2;
10975 else if (ire->ire_type & IRE_ONLINK)
10976 re->ipRouteType = 3;
10977 else
10978 re->ipRouteType = 4;
10979
10980 re->ipRouteProto = -1;
10981 re->ipRouteAge = gethrestime_sec() - ire->ire_create_time;
10982 re->ipRouteMask = ire->ire_mask;
10983 re->ipRouteMetric5 = -1;
10984 re->ipRouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
10985 if (ire->ire_ill != NULL && re->ipRouteInfo.re_max_frag == 0)
10986 re->ipRouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
10987
10988 re->ipRouteInfo.re_frag_flag = 0;
10989 re->ipRouteInfo.re_rtt = 0;
10990 re->ipRouteInfo.re_src_addr = 0;
10991 re->ipRouteInfo.re_ref = ire->ire_refcnt;
10992 re->ipRouteInfo.re_obpkt = ire->ire_ob_pkt_count;
10993 re->ipRouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
10994 re->ipRouteInfo.re_flags = ire->ire_flags;
10995
10996 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
10997 if (ire->ire_type & IRE_INTERFACE) {
10998 ire_t *child;
10999
11000 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
11001 child = ire->ire_dep_children;
11002 while (child != NULL) {
11003 re->ipRouteInfo.re_obpkt += child->ire_ob_pkt_count;
11004 re->ipRouteInfo.re_ibpkt += child->ire_ib_pkt_count;
11005 child = child->ire_dep_sib_next;
11006 }
11007 rw_exit(&ipst->ips_ire_dep_lock);
11008 }
11009
11010 if (ire->ire_flags & RTF_DYNAMIC) {
11011 re->ipRouteInfo.re_ire_type = IRE_HOST_REDIRECT;
11012 } else {
11013 re->ipRouteInfo.re_ire_type = ire->ire_type;
11014 }
11015
11016 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
11017 (char *)re, (int)sizeof (*re))) {
11018 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u bytes\n",
11019 (uint_t)sizeof (*re)));
11020 }
11021
11022 if (gc != NULL) {
11023 iaes.iae_routeidx = ird->ird_idx;
11024 iaes.iae_doi = gc->gc_db->gcdb_doi;
11025 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
11026
11027 if (!snmp_append_data2(ird->ird_attrs.lp_head,
11028 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
11029 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u "
11030 "bytes\n", (uint_t)sizeof (iaes)));
11031 }
11032 }
11033
11034 /* bump route index for next pass */
11035 ird->ird_idx++;
11036
11037 kmem_free(re, sizeof (*re));
11038 if (gcgrp != NULL)
11039 rw_exit(&gcgrp->gcgrp_rwlock);
11040 }
11041
11042 /*
11043 * ire_walk routine to create ipv6RouteEntryTable and ipRouteEntryTable.
11044 */
11045 static void
11046 ip_snmp_get2_v6_route(ire_t *ire, iproutedata_t *ird)
11047 {
11048 ill_t *ill;
11049 mib2_ipv6RouteEntry_t *re;
11050 mib2_ipAttributeEntry_t iaes;
11051 tsol_ire_gw_secattr_t *attrp;
11052 tsol_gc_t *gc = NULL;
11053 tsol_gcgrp_t *gcgrp = NULL;
11054 ip_stack_t *ipst = ire->ire_ipst;
11055
11056 ASSERT(ire->ire_ipversion == IPV6_VERSION);
11057
11058 if (!(ird->ird_flags & IRD_REPORT_ALL)) {
11059 if (ire->ire_testhidden)
11060 return;
11061 if (ire->ire_type & IRE_IF_CLONE)
11062 return;
11063 }
11064
11065 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL)
11066 return;
11067
11068 if ((attrp = ire->ire_gw_secattr) != NULL) {
11069 mutex_enter(&attrp->igsa_lock);
11070 if ((gc = attrp->igsa_gc) != NULL) {
11071 gcgrp = gc->gc_grp;
11072 ASSERT(gcgrp != NULL);
11073 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER);
11074 }
11075 mutex_exit(&attrp->igsa_lock);
11076 }
11077 /*
11078 * Return all IRE types for route table... let caller pick and choose
11079 */
11080 re->ipv6RouteDest = ire->ire_addr_v6;
11081 re->ipv6RoutePfxLength = ip_mask_to_plen_v6(&ire->ire_mask_v6);
11082 re->ipv6RouteIndex = 0; /* Unique when multiple with same dest/plen */
11083 re->ipv6RouteIfIndex.o_length = 0;
11084 ill = ire->ire_ill;
11085 if (ill != NULL) {
11086 ill_get_name(ill, re->ipv6RouteIfIndex.o_bytes, OCTET_LENGTH);
11087 re->ipv6RouteIfIndex.o_length =
11088 mi_strlen(re->ipv6RouteIfIndex.o_bytes);
11089 }
11090
11091 ASSERT(!(ire->ire_type & IRE_BROADCAST));
11092
11093 mutex_enter(&ire->ire_lock);
11094 re->ipv6RouteNextHop = ire->ire_gateway_addr_v6;
11095 mutex_exit(&ire->ire_lock);
11096
11097 /* remote(4), local(3), or discard(2) */
11098 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))
11099 re->ipv6RouteType = 2;
11100 else if (ire->ire_type & IRE_ONLINK)
11101 re->ipv6RouteType = 3;
11102 else
11103 re->ipv6RouteType = 4;
11104
11105 re->ipv6RouteProtocol = -1;
11106 re->ipv6RoutePolicy = 0;
11107 re->ipv6RouteAge = gethrestime_sec() - ire->ire_create_time;
11108 re->ipv6RouteNextHopRDI = 0;
11109 re->ipv6RouteWeight = 0;
11110 re->ipv6RouteMetric = 0;
11111 re->ipv6RouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu;
11112 if (ire->ire_ill != NULL && re->ipv6RouteInfo.re_max_frag == 0)
11113 re->ipv6RouteInfo.re_max_frag = ire->ire_ill->ill_mtu;
11114
11115 re->ipv6RouteInfo.re_frag_flag = 0;
11116 re->ipv6RouteInfo.re_rtt = 0;
11117 re->ipv6RouteInfo.re_src_addr = ipv6_all_zeros;
11118 re->ipv6RouteInfo.re_obpkt = ire->ire_ob_pkt_count;
11119 re->ipv6RouteInfo.re_ibpkt = ire->ire_ib_pkt_count;
11120 re->ipv6RouteInfo.re_ref = ire->ire_refcnt;
11121 re->ipv6RouteInfo.re_flags = ire->ire_flags;
11122
11123 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */
11124 if (ire->ire_type & IRE_INTERFACE) {
11125 ire_t *child;
11126
11127 rw_enter(&ipst->ips_ire_dep_lock, RW_READER);
11128 child = ire->ire_dep_children;
11129 while (child != NULL) {
11130 re->ipv6RouteInfo.re_obpkt += child->ire_ob_pkt_count;
11131 re->ipv6RouteInfo.re_ibpkt += child->ire_ib_pkt_count;
11132 child = child->ire_dep_sib_next;
11133 }
11134 rw_exit(&ipst->ips_ire_dep_lock);
11135 }
11136 if (ire->ire_flags & RTF_DYNAMIC) {
11137 re->ipv6RouteInfo.re_ire_type = IRE_HOST_REDIRECT;
11138 } else {
11139 re->ipv6RouteInfo.re_ire_type = ire->ire_type;
11140 }
11141
11142 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail,
11143 (char *)re, (int)sizeof (*re))) {
11144 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u bytes\n",
11145 (uint_t)sizeof (*re)));
11146 }
11147
11148 if (gc != NULL) {
11149 iaes.iae_routeidx = ird->ird_idx;
11150 iaes.iae_doi = gc->gc_db->gcdb_doi;
11151 iaes.iae_slrange = gc->gc_db->gcdb_slrange;
11152
11153 if (!snmp_append_data2(ird->ird_attrs.lp_head,
11154 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) {
11155 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u "
11156 "bytes\n", (uint_t)sizeof (iaes)));
11157 }
11158 }
11159
11160 /* bump route index for next pass */
11161 ird->ird_idx++;
11162
11163 kmem_free(re, sizeof (*re));
11164 if (gcgrp != NULL)
11165 rw_exit(&gcgrp->gcgrp_rwlock);
11166 }
11167
11168 /*
11169 * ncec_walk routine to create ipv6NetToMediaEntryTable
11170 */
11171 static int
11172 ip_snmp_get2_v6_media(ncec_t *ncec, iproutedata_t *ird)
11173 {
11174 ill_t *ill;
11175 mib2_ipv6NetToMediaEntry_t ntme;
11176
11177 ill = ncec->ncec_ill;
11178 /* skip arpce entries, and loopback ncec entries */
11179 if (ill->ill_isv6 == B_FALSE || ill->ill_net_type == IRE_LOOPBACK)
11180 return (0);
11181 /*
11182 * Neighbor cache entry attached to IRE with on-link
11183 * destination.
11184 * We report all IPMP groups on ncec_ill which is normally the upper.
11185 */
11186 ntme.ipv6NetToMediaIfIndex = ill->ill_phyint->phyint_ifindex;
11187 ntme.ipv6NetToMediaNetAddress = ncec->ncec_addr;
11188 ntme.ipv6NetToMediaPhysAddress.o_length = ill->ill_phys_addr_length;
11189 if (ncec->ncec_lladdr != NULL) {
11190 bcopy(ncec->ncec_lladdr, ntme.ipv6NetToMediaPhysAddress.o_bytes,
11191 ntme.ipv6NetToMediaPhysAddress.o_length);
11192 }
11193 /*
11194 * Note: Returns ND_* states. Should be:
11195 * reachable(1), stale(2), delay(3), probe(4),
11196 * invalid(5), unknown(6)
11197 */
11198 ntme.ipv6NetToMediaState = ncec->ncec_state;
11199 ntme.ipv6NetToMediaLastUpdated = 0;
11200
11201 /* other(1), dynamic(2), static(3), local(4) */
11202 if (NCE_MYADDR(ncec)) {
11203 ntme.ipv6NetToMediaType = 4;
11204 } else if (ncec->ncec_flags & NCE_F_PUBLISH) {
11205 ntme.ipv6NetToMediaType = 1; /* proxy */
11206 } else if (ncec->ncec_flags & NCE_F_STATIC) {
11207 ntme.ipv6NetToMediaType = 3;
11208 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST)) {
11209 ntme.ipv6NetToMediaType = 1;
11210 } else {
11211 ntme.ipv6NetToMediaType = 2;
11212 }
11213
11214 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11215 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11216 ip1dbg(("ip_snmp_get2_v6_media: failed to allocate %u bytes\n",
11217 (uint_t)sizeof (ntme)));
11218 }
11219 return (0);
11220 }
11221
11222 int
11223 nce2ace(ncec_t *ncec)
11224 {
11225 int flags = 0;
11226
11227 if (NCE_ISREACHABLE(ncec))
11228 flags |= ACE_F_RESOLVED;
11229 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11230 flags |= ACE_F_AUTHORITY;
11231 if (ncec->ncec_flags & NCE_F_PUBLISH)
11232 flags |= ACE_F_PUBLISH;
11233 if ((ncec->ncec_flags & NCE_F_NONUD) != 0)
11234 flags |= ACE_F_PERMANENT;
11235 if (NCE_MYADDR(ncec))
11236 flags |= (ACE_F_MYADDR | ACE_F_AUTHORITY);
11237 if (ncec->ncec_flags & NCE_F_UNVERIFIED)
11238 flags |= ACE_F_UNVERIFIED;
11239 if (ncec->ncec_flags & NCE_F_AUTHORITY)
11240 flags |= ACE_F_AUTHORITY;
11241 if (ncec->ncec_flags & NCE_F_DELAYED)
11242 flags |= ACE_F_DELAYED;
11243 return (flags);
11244 }
11245
11246 /*
11247 * ncec_walk routine to create ipNetToMediaEntryTable
11248 */
11249 static int
11250 ip_snmp_get2_v4_media(ncec_t *ncec, iproutedata_t *ird)
11251 {
11252 ill_t *ill;
11253 mib2_ipNetToMediaEntry_t ntme;
11254 const char *name = "unknown";
11255 ipaddr_t ncec_addr;
11256
11257 ill = ncec->ncec_ill;
11258 if (ill->ill_isv6 || (ncec->ncec_flags & NCE_F_BCAST) ||
11259 ill->ill_net_type == IRE_LOOPBACK)
11260 return (0);
11261
11262 /* We report all IPMP groups on ncec_ill which is normally the upper. */
11263 name = ill->ill_name;
11264 /* Based on RFC 4293: other(1), inval(2), dyn(3), stat(4) */
11265 if (NCE_MYADDR(ncec)) {
11266 ntme.ipNetToMediaType = 4;
11267 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST|NCE_F_PUBLISH)) {
11268 ntme.ipNetToMediaType = 1;
11269 } else {
11270 ntme.ipNetToMediaType = 3;
11271 }
11272 ntme.ipNetToMediaIfIndex.o_length = MIN(OCTET_LENGTH, strlen(name));
11273 bcopy(name, ntme.ipNetToMediaIfIndex.o_bytes,
11274 ntme.ipNetToMediaIfIndex.o_length);
11275
11276 IN6_V4MAPPED_TO_IPADDR(&ncec->ncec_addr, ncec_addr);
11277 bcopy(&ncec_addr, &ntme.ipNetToMediaNetAddress, sizeof (ncec_addr));
11278
11279 ntme.ipNetToMediaInfo.ntm_mask.o_length = sizeof (ipaddr_t);
11280 ncec_addr = INADDR_BROADCAST;
11281 bcopy(&ncec_addr, ntme.ipNetToMediaInfo.ntm_mask.o_bytes,
11282 sizeof (ncec_addr));
11283 /*
11284 * map all the flags to the ACE counterpart.
11285 */
11286 ntme.ipNetToMediaInfo.ntm_flags = nce2ace(ncec);
11287
11288 ntme.ipNetToMediaPhysAddress.o_length =
11289 MIN(OCTET_LENGTH, ill->ill_phys_addr_length);
11290
11291 if (!NCE_ISREACHABLE(ncec))
11292 ntme.ipNetToMediaPhysAddress.o_length = 0;
11293 else {
11294 if (ncec->ncec_lladdr != NULL) {
11295 bcopy(ncec->ncec_lladdr,
11296 ntme.ipNetToMediaPhysAddress.o_bytes,
11297 ntme.ipNetToMediaPhysAddress.o_length);
11298 }
11299 }
11300
11301 if (!snmp_append_data2(ird->ird_netmedia.lp_head,
11302 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) {
11303 ip1dbg(("ip_snmp_get2_v4_media: failed to allocate %u bytes\n",
11304 (uint_t)sizeof (ntme)));
11305 }
11306 return (0);
11307 }
11308
11309 /*
11310 * return (0) if invalid set request, 1 otherwise, including non-tcp requests
11311 */
11312 /* ARGSUSED */
11313 int
11314 ip_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
11315 {
11316 switch (level) {
11317 case MIB2_IP:
11318 case MIB2_ICMP:
11319 switch (name) {
11320 default:
11321 break;
11322 }
11323 return (1);
11324 default:
11325 return (1);
11326 }
11327 }
11328
11329 /*
11330 * When there exists both a 64- and 32-bit counter of a particular type
11331 * (i.e., InReceives), only the 64-bit counters are added.
11332 */
11333 void
11334 ip_mib2_add_ip_stats(mib2_ipIfStatsEntry_t *o1, mib2_ipIfStatsEntry_t *o2)
11335 {
11336 UPDATE_MIB(o1, ipIfStatsInHdrErrors, o2->ipIfStatsInHdrErrors);
11337 UPDATE_MIB(o1, ipIfStatsInTooBigErrors, o2->ipIfStatsInTooBigErrors);
11338 UPDATE_MIB(o1, ipIfStatsInNoRoutes, o2->ipIfStatsInNoRoutes);
11339 UPDATE_MIB(o1, ipIfStatsInAddrErrors, o2->ipIfStatsInAddrErrors);
11340 UPDATE_MIB(o1, ipIfStatsInUnknownProtos, o2->ipIfStatsInUnknownProtos);
11341 UPDATE_MIB(o1, ipIfStatsInTruncatedPkts, o2->ipIfStatsInTruncatedPkts);
11342 UPDATE_MIB(o1, ipIfStatsInDiscards, o2->ipIfStatsInDiscards);
11343 UPDATE_MIB(o1, ipIfStatsOutDiscards, o2->ipIfStatsOutDiscards);
11344 UPDATE_MIB(o1, ipIfStatsOutFragOKs, o2->ipIfStatsOutFragOKs);
11345 UPDATE_MIB(o1, ipIfStatsOutFragFails, o2->ipIfStatsOutFragFails);
11346 UPDATE_MIB(o1, ipIfStatsOutFragCreates, o2->ipIfStatsOutFragCreates);
11347 UPDATE_MIB(o1, ipIfStatsReasmReqds, o2->ipIfStatsReasmReqds);
11348 UPDATE_MIB(o1, ipIfStatsReasmOKs, o2->ipIfStatsReasmOKs);
11349 UPDATE_MIB(o1, ipIfStatsReasmFails, o2->ipIfStatsReasmFails);
11350 UPDATE_MIB(o1, ipIfStatsOutNoRoutes, o2->ipIfStatsOutNoRoutes);
11351 UPDATE_MIB(o1, ipIfStatsReasmDuplicates, o2->ipIfStatsReasmDuplicates);
11352 UPDATE_MIB(o1, ipIfStatsReasmPartDups, o2->ipIfStatsReasmPartDups);
11353 UPDATE_MIB(o1, ipIfStatsForwProhibits, o2->ipIfStatsForwProhibits);
11354 UPDATE_MIB(o1, udpInCksumErrs, o2->udpInCksumErrs);
11355 UPDATE_MIB(o1, udpInOverflows, o2->udpInOverflows);
11356 UPDATE_MIB(o1, rawipInOverflows, o2->rawipInOverflows);
11357 UPDATE_MIB(o1, ipIfStatsInWrongIPVersion,
11358 o2->ipIfStatsInWrongIPVersion);
11359 UPDATE_MIB(o1, ipIfStatsOutWrongIPVersion,
11360 o2->ipIfStatsInWrongIPVersion);
11361 UPDATE_MIB(o1, ipIfStatsOutSwitchIPVersion,
11362 o2->ipIfStatsOutSwitchIPVersion);
11363 UPDATE_MIB(o1, ipIfStatsHCInReceives, o2->ipIfStatsHCInReceives);
11364 UPDATE_MIB(o1, ipIfStatsHCInOctets, o2->ipIfStatsHCInOctets);
11365 UPDATE_MIB(o1, ipIfStatsHCInForwDatagrams,
11366 o2->ipIfStatsHCInForwDatagrams);
11367 UPDATE_MIB(o1, ipIfStatsHCInDelivers, o2->ipIfStatsHCInDelivers);
11368 UPDATE_MIB(o1, ipIfStatsHCOutRequests, o2->ipIfStatsHCOutRequests);
11369 UPDATE_MIB(o1, ipIfStatsHCOutForwDatagrams,
11370 o2->ipIfStatsHCOutForwDatagrams);
11371 UPDATE_MIB(o1, ipIfStatsOutFragReqds, o2->ipIfStatsOutFragReqds);
11372 UPDATE_MIB(o1, ipIfStatsHCOutTransmits, o2->ipIfStatsHCOutTransmits);
11373 UPDATE_MIB(o1, ipIfStatsHCOutOctets, o2->ipIfStatsHCOutOctets);
11374 UPDATE_MIB(o1, ipIfStatsHCInMcastPkts, o2->ipIfStatsHCInMcastPkts);
11375 UPDATE_MIB(o1, ipIfStatsHCInMcastOctets, o2->ipIfStatsHCInMcastOctets);
11376 UPDATE_MIB(o1, ipIfStatsHCOutMcastPkts, o2->ipIfStatsHCOutMcastPkts);
11377 UPDATE_MIB(o1, ipIfStatsHCOutMcastOctets,
11378 o2->ipIfStatsHCOutMcastOctets);
11379 UPDATE_MIB(o1, ipIfStatsHCInBcastPkts, o2->ipIfStatsHCInBcastPkts);
11380 UPDATE_MIB(o1, ipIfStatsHCOutBcastPkts, o2->ipIfStatsHCOutBcastPkts);
11381 UPDATE_MIB(o1, ipsecInSucceeded, o2->ipsecInSucceeded);
11382 UPDATE_MIB(o1, ipsecInFailed, o2->ipsecInFailed);
11383 UPDATE_MIB(o1, ipInCksumErrs, o2->ipInCksumErrs);
11384 UPDATE_MIB(o1, tcpInErrs, o2->tcpInErrs);
11385 UPDATE_MIB(o1, udpNoPorts, o2->udpNoPorts);
11386 }
11387
11388 void
11389 ip_mib2_add_icmp6_stats(mib2_ipv6IfIcmpEntry_t *o1, mib2_ipv6IfIcmpEntry_t *o2)
11390 {
11391 UPDATE_MIB(o1, ipv6IfIcmpInMsgs, o2->ipv6IfIcmpInMsgs);
11392 UPDATE_MIB(o1, ipv6IfIcmpInErrors, o2->ipv6IfIcmpInErrors);
11393 UPDATE_MIB(o1, ipv6IfIcmpInDestUnreachs, o2->ipv6IfIcmpInDestUnreachs);
11394 UPDATE_MIB(o1, ipv6IfIcmpInAdminProhibs, o2->ipv6IfIcmpInAdminProhibs);
11395 UPDATE_MIB(o1, ipv6IfIcmpInTimeExcds, o2->ipv6IfIcmpInTimeExcds);
11396 UPDATE_MIB(o1, ipv6IfIcmpInParmProblems, o2->ipv6IfIcmpInParmProblems);
11397 UPDATE_MIB(o1, ipv6IfIcmpInPktTooBigs, o2->ipv6IfIcmpInPktTooBigs);
11398 UPDATE_MIB(o1, ipv6IfIcmpInEchos, o2->ipv6IfIcmpInEchos);
11399 UPDATE_MIB(o1, ipv6IfIcmpInEchoReplies, o2->ipv6IfIcmpInEchoReplies);
11400 UPDATE_MIB(o1, ipv6IfIcmpInRouterSolicits,
11401 o2->ipv6IfIcmpInRouterSolicits);
11402 UPDATE_MIB(o1, ipv6IfIcmpInRouterAdvertisements,
11403 o2->ipv6IfIcmpInRouterAdvertisements);
11404 UPDATE_MIB(o1, ipv6IfIcmpInNeighborSolicits,
11405 o2->ipv6IfIcmpInNeighborSolicits);
11406 UPDATE_MIB(o1, ipv6IfIcmpInNeighborAdvertisements,
11407 o2->ipv6IfIcmpInNeighborAdvertisements);
11408 UPDATE_MIB(o1, ipv6IfIcmpInRedirects, o2->ipv6IfIcmpInRedirects);
11409 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembQueries,
11410 o2->ipv6IfIcmpInGroupMembQueries);
11411 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembResponses,
11412 o2->ipv6IfIcmpInGroupMembResponses);
11413 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembReductions,
11414 o2->ipv6IfIcmpInGroupMembReductions);
11415 UPDATE_MIB(o1, ipv6IfIcmpOutMsgs, o2->ipv6IfIcmpOutMsgs);
11416 UPDATE_MIB(o1, ipv6IfIcmpOutErrors, o2->ipv6IfIcmpOutErrors);
11417 UPDATE_MIB(o1, ipv6IfIcmpOutDestUnreachs,
11418 o2->ipv6IfIcmpOutDestUnreachs);
11419 UPDATE_MIB(o1, ipv6IfIcmpOutAdminProhibs,
11420 o2->ipv6IfIcmpOutAdminProhibs);
11421 UPDATE_MIB(o1, ipv6IfIcmpOutTimeExcds, o2->ipv6IfIcmpOutTimeExcds);
11422 UPDATE_MIB(o1, ipv6IfIcmpOutParmProblems,
11423 o2->ipv6IfIcmpOutParmProblems);
11424 UPDATE_MIB(o1, ipv6IfIcmpOutPktTooBigs, o2->ipv6IfIcmpOutPktTooBigs);
11425 UPDATE_MIB(o1, ipv6IfIcmpOutEchos, o2->ipv6IfIcmpOutEchos);
11426 UPDATE_MIB(o1, ipv6IfIcmpOutEchoReplies, o2->ipv6IfIcmpOutEchoReplies);
11427 UPDATE_MIB(o1, ipv6IfIcmpOutRouterSolicits,
11428 o2->ipv6IfIcmpOutRouterSolicits);
11429 UPDATE_MIB(o1, ipv6IfIcmpOutRouterAdvertisements,
11430 o2->ipv6IfIcmpOutRouterAdvertisements);
11431 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborSolicits,
11432 o2->ipv6IfIcmpOutNeighborSolicits);
11433 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborAdvertisements,
11434 o2->ipv6IfIcmpOutNeighborAdvertisements);
11435 UPDATE_MIB(o1, ipv6IfIcmpOutRedirects, o2->ipv6IfIcmpOutRedirects);
11436 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembQueries,
11437 o2->ipv6IfIcmpOutGroupMembQueries);
11438 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembResponses,
11439 o2->ipv6IfIcmpOutGroupMembResponses);
11440 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembReductions,
11441 o2->ipv6IfIcmpOutGroupMembReductions);
11442 UPDATE_MIB(o1, ipv6IfIcmpInOverflows, o2->ipv6IfIcmpInOverflows);
11443 UPDATE_MIB(o1, ipv6IfIcmpBadHoplimit, o2->ipv6IfIcmpBadHoplimit);
11444 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborAdvertisements,
11445 o2->ipv6IfIcmpInBadNeighborAdvertisements);
11446 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborSolicitations,
11447 o2->ipv6IfIcmpInBadNeighborSolicitations);
11448 UPDATE_MIB(o1, ipv6IfIcmpInBadRedirects, o2->ipv6IfIcmpInBadRedirects);
11449 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembTotal,
11450 o2->ipv6IfIcmpInGroupMembTotal);
11451 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadQueries,
11452 o2->ipv6IfIcmpInGroupMembBadQueries);
11453 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadReports,
11454 o2->ipv6IfIcmpInGroupMembBadReports);
11455 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembOurReports,
11456 o2->ipv6IfIcmpInGroupMembOurReports);
11457 }
11458
11459 /*
11460 * Called before the options are updated to check if this packet will
11461 * be source routed from here.
11462 * This routine assumes that the options are well formed i.e. that they
11463 * have already been checked.
11464 */
11465 boolean_t
11466 ip_source_routed(ipha_t *ipha, ip_stack_t *ipst)
11467 {
11468 ipoptp_t opts;
11469 uchar_t *opt;
11470 uint8_t optval;
11471 uint8_t optlen;
11472 ipaddr_t dst;
11473
11474 if (IS_SIMPLE_IPH(ipha)) {
11475 ip2dbg(("not source routed\n"));
11476 return (B_FALSE);
11477 }
11478 dst = ipha->ipha_dst;
11479 for (optval = ipoptp_first(&opts, ipha);
11480 optval != IPOPT_EOL;
11481 optval = ipoptp_next(&opts)) {
11482 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11483 opt = opts.ipoptp_cur;
11484 optlen = opts.ipoptp_len;
11485 ip2dbg(("ip_source_routed: opt %d, len %d\n",
11486 optval, optlen));
11487 switch (optval) {
11488 uint32_t off;
11489 case IPOPT_SSRR:
11490 case IPOPT_LSRR:
11491 /*
11492 * If dst is one of our addresses and there are some
11493 * entries left in the source route return (true).
11494 */
11495 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
11496 ip2dbg(("ip_source_routed: not next"
11497 " source route 0x%x\n",
11498 ntohl(dst)));
11499 return (B_FALSE);
11500 }
11501 off = opt[IPOPT_OFFSET];
11502 off--;
11503 if (optlen < IP_ADDR_LEN ||
11504 off > optlen - IP_ADDR_LEN) {
11505 /* End of source route */
11506 ip1dbg(("ip_source_routed: end of SR\n"));
11507 return (B_FALSE);
11508 }
11509 return (B_TRUE);
11510 }
11511 }
11512 ip2dbg(("not source routed\n"));
11513 return (B_FALSE);
11514 }
11515
11516 /*
11517 * ip_unbind is called by the transports to remove a conn from
11518 * the fanout table.
11519 */
11520 void
11521 ip_unbind(conn_t *connp)
11522 {
11523
11524 ASSERT(!MUTEX_HELD(&connp->conn_lock));
11525
11526 if (is_system_labeled() && connp->conn_anon_port) {
11527 (void) tsol_mlp_anon(crgetzone(connp->conn_cred),
11528 connp->conn_mlp_type, connp->conn_proto,
11529 ntohs(connp->conn_lport), B_FALSE);
11530 connp->conn_anon_port = 0;
11531 }
11532 connp->conn_mlp_type = mlptSingle;
11533
11534 ipcl_hash_remove(connp);
11535 }
11536
11537 /*
11538 * Used for deciding the MSS size for the upper layer. Thus
11539 * we need to check the outbound policy values in the conn.
11540 */
11541 int
11542 conn_ipsec_length(conn_t *connp)
11543 {
11544 ipsec_latch_t *ipl;
11545
11546 ipl = connp->conn_latch;
11547 if (ipl == NULL)
11548 return (0);
11549
11550 if (connp->conn_ixa->ixa_ipsec_policy == NULL)
11551 return (0);
11552
11553 return (connp->conn_ixa->ixa_ipsec_policy->ipsp_act->ipa_ovhd);
11554 }
11555
11556 /*
11557 * Returns an estimate of the IPsec headers size. This is used if
11558 * we don't want to call into IPsec to get the exact size.
11559 */
11560 int
11561 ipsec_out_extra_length(ip_xmit_attr_t *ixa)
11562 {
11563 ipsec_action_t *a;
11564
11565 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
11566 return (0);
11567
11568 a = ixa->ixa_ipsec_action;
11569 if (a == NULL) {
11570 ASSERT(ixa->ixa_ipsec_policy != NULL);
11571 a = ixa->ixa_ipsec_policy->ipsp_act;
11572 }
11573 ASSERT(a != NULL);
11574
11575 return (a->ipa_ovhd);
11576 }
11577
11578 /*
11579 * If there are any source route options, return the true final
11580 * destination. Otherwise, return the destination.
11581 */
11582 ipaddr_t
11583 ip_get_dst(ipha_t *ipha)
11584 {
11585 ipoptp_t opts;
11586 uchar_t *opt;
11587 uint8_t optval;
11588 uint8_t optlen;
11589 ipaddr_t dst;
11590 uint32_t off;
11591
11592 dst = ipha->ipha_dst;
11593
11594 if (IS_SIMPLE_IPH(ipha))
11595 return (dst);
11596
11597 for (optval = ipoptp_first(&opts, ipha);
11598 optval != IPOPT_EOL;
11599 optval = ipoptp_next(&opts)) {
11600 opt = opts.ipoptp_cur;
11601 optlen = opts.ipoptp_len;
11602 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11603 switch (optval) {
11604 case IPOPT_SSRR:
11605 case IPOPT_LSRR:
11606 off = opt[IPOPT_OFFSET];
11607 /*
11608 * If one of the conditions is true, it means
11609 * end of options and dst already has the right
11610 * value.
11611 */
11612 if (!(optlen < IP_ADDR_LEN || off > optlen - 3)) {
11613 off = optlen - IP_ADDR_LEN;
11614 bcopy(&opt[off], &dst, IP_ADDR_LEN);
11615 }
11616 return (dst);
11617 default:
11618 break;
11619 }
11620 }
11621
11622 return (dst);
11623 }
11624
11625 /*
11626 * Outbound IP fragmentation routine.
11627 * Assumes the caller has checked whether or not fragmentation should
11628 * be allowed. Here we copy the DF bit from the header to all the generated
11629 * fragments.
11630 */
11631 int
11632 ip_fragment_v4(mblk_t *mp_orig, nce_t *nce, iaflags_t ixaflags,
11633 uint_t pkt_len, uint32_t max_frag, uint32_t xmit_hint, zoneid_t szone,
11634 zoneid_t nolzid, pfirepostfrag_t postfragfn, uintptr_t *ixa_cookie)
11635 {
11636 int i1;
11637 int hdr_len;
11638 mblk_t *hdr_mp;
11639 ipha_t *ipha;
11640 int ip_data_end;
11641 int len;
11642 mblk_t *mp = mp_orig;
11643 int offset;
11644 ill_t *ill = nce->nce_ill;
11645 ip_stack_t *ipst = ill->ill_ipst;
11646 mblk_t *carve_mp;
11647 uint32_t frag_flag;
11648 uint_t priority = mp->b_band;
11649 int error = 0;
11650
11651 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragReqds);
11652
11653 if (pkt_len != msgdsize(mp)) {
11654 ip0dbg(("Packet length mismatch: %d, %ld\n",
11655 pkt_len, msgdsize(mp)));
11656 freemsg(mp);
11657 return (EINVAL);
11658 }
11659
11660 if (max_frag == 0) {
11661 ip1dbg(("ip_fragment_v4: max_frag is zero. Dropping packet\n"));
11662 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11663 ip_drop_output("FragFails: zero max_frag", mp, ill);
11664 freemsg(mp);
11665 return (EINVAL);
11666 }
11667
11668 ASSERT(MBLKL(mp) >= sizeof (ipha_t));
11669 ipha = (ipha_t *)mp->b_rptr;
11670 ASSERT(ntohs(ipha->ipha_length) == pkt_len);
11671 frag_flag = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_DF;
11672
11673 /*
11674 * Establish the starting offset. May not be zero if we are fragging
11675 * a fragment that is being forwarded.
11676 */
11677 offset = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET;
11678
11679 /* TODO why is this test needed? */
11680 if (((max_frag - ntohs(ipha->ipha_length)) & ~7) < 8) {
11681 /* TODO: notify ulp somehow */
11682 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11683 ip_drop_output("FragFails: bad starting offset", mp, ill);
11684 freemsg(mp);
11685 return (EINVAL);
11686 }
11687
11688 hdr_len = IPH_HDR_LENGTH(ipha);
11689 ipha->ipha_hdr_checksum = 0;
11690
11691 /*
11692 * Establish the number of bytes maximum per frag, after putting
11693 * in the header.
11694 */
11695 len = (max_frag - hdr_len) & ~7;
11696
11697 /* Get a copy of the header for the trailing frags */
11698 hdr_mp = ip_fragment_copyhdr((uchar_t *)ipha, hdr_len, offset, ipst,
11699 mp);
11700 if (hdr_mp == NULL) {
11701 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11702 ip_drop_output("FragFails: no hdr_mp", mp, ill);
11703 freemsg(mp);
11704 return (ENOBUFS);
11705 }
11706
11707 /* Store the starting offset, with the MoreFrags flag. */
11708 i1 = offset | IPH_MF | frag_flag;
11709 ipha->ipha_fragment_offset_and_flags = htons((uint16_t)i1);
11710
11711 /* Establish the ending byte offset, based on the starting offset. */
11712 offset <<= 3;
11713 ip_data_end = offset + ntohs(ipha->ipha_length) - hdr_len;
11714
11715 /* Store the length of the first fragment in the IP header. */
11716 i1 = len + hdr_len;
11717 ASSERT(i1 <= IP_MAXPACKET);
11718 ipha->ipha_length = htons((uint16_t)i1);
11719
11720 /*
11721 * Compute the IP header checksum for the first frag. We have to
11722 * watch out that we stop at the end of the header.
11723 */
11724 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11725
11726 /*
11727 * Now carve off the first frag. Note that this will include the
11728 * original IP header.
11729 */
11730 if (!(mp = ip_carve_mp(&mp_orig, i1))) {
11731 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11732 ip_drop_output("FragFails: could not carve mp", mp_orig, ill);
11733 freeb(hdr_mp);
11734 freemsg(mp_orig);
11735 return (ENOBUFS);
11736 }
11737
11738 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11739
11740 error = postfragfn(mp, nce, ixaflags, i1, xmit_hint, szone, nolzid,
11741 ixa_cookie);
11742 if (error != 0 && error != EWOULDBLOCK) {
11743 /* No point in sending the other fragments */
11744 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11745 ip_drop_output("FragFails: postfragfn failed", mp_orig, ill);
11746 freeb(hdr_mp);
11747 freemsg(mp_orig);
11748 return (error);
11749 }
11750
11751 /* No need to redo state machine in loop */
11752 ixaflags &= ~IXAF_REACH_CONF;
11753
11754 /* Advance the offset to the second frag starting point. */
11755 offset += len;
11756 /*
11757 * Update hdr_len from the copied header - there might be less options
11758 * in the later fragments.
11759 */
11760 hdr_len = IPH_HDR_LENGTH(hdr_mp->b_rptr);
11761 /* Loop until done. */
11762 for (;;) {
11763 uint16_t offset_and_flags;
11764 uint16_t ip_len;
11765
11766 if (ip_data_end - offset > len) {
11767 /*
11768 * Carve off the appropriate amount from the original
11769 * datagram.
11770 */
11771 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11772 mp = NULL;
11773 break;
11774 }
11775 /*
11776 * More frags after this one. Get another copy
11777 * of the header.
11778 */
11779 if (carve_mp->b_datap->db_ref == 1 &&
11780 hdr_mp->b_wptr - hdr_mp->b_rptr <
11781 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11782 /* Inline IP header */
11783 carve_mp->b_rptr -= hdr_mp->b_wptr -
11784 hdr_mp->b_rptr;
11785 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11786 hdr_mp->b_wptr - hdr_mp->b_rptr);
11787 mp = carve_mp;
11788 } else {
11789 if (!(mp = copyb(hdr_mp))) {
11790 freemsg(carve_mp);
11791 break;
11792 }
11793 /* Get priority marking, if any. */
11794 mp->b_band = priority;
11795 mp->b_cont = carve_mp;
11796 }
11797 ipha = (ipha_t *)mp->b_rptr;
11798 offset_and_flags = IPH_MF;
11799 } else {
11800 /*
11801 * Last frag. Consume the header. Set len to
11802 * the length of this last piece.
11803 */
11804 len = ip_data_end - offset;
11805
11806 /*
11807 * Carve off the appropriate amount from the original
11808 * datagram.
11809 */
11810 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) {
11811 mp = NULL;
11812 break;
11813 }
11814 if (carve_mp->b_datap->db_ref == 1 &&
11815 hdr_mp->b_wptr - hdr_mp->b_rptr <
11816 carve_mp->b_rptr - carve_mp->b_datap->db_base) {
11817 /* Inline IP header */
11818 carve_mp->b_rptr -= hdr_mp->b_wptr -
11819 hdr_mp->b_rptr;
11820 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr,
11821 hdr_mp->b_wptr - hdr_mp->b_rptr);
11822 mp = carve_mp;
11823 freeb(hdr_mp);
11824 hdr_mp = mp;
11825 } else {
11826 mp = hdr_mp;
11827 /* Get priority marking, if any. */
11828 mp->b_band = priority;
11829 mp->b_cont = carve_mp;
11830 }
11831 ipha = (ipha_t *)mp->b_rptr;
11832 /* A frag of a frag might have IPH_MF non-zero */
11833 offset_and_flags =
11834 ntohs(ipha->ipha_fragment_offset_and_flags) &
11835 IPH_MF;
11836 }
11837 offset_and_flags |= (uint16_t)(offset >> 3);
11838 offset_and_flags |= (uint16_t)frag_flag;
11839 /* Store the offset and flags in the IP header. */
11840 ipha->ipha_fragment_offset_and_flags = htons(offset_and_flags);
11841
11842 /* Store the length in the IP header. */
11843 ip_len = (uint16_t)(len + hdr_len);
11844 ipha->ipha_length = htons(ip_len);
11845
11846 /*
11847 * Set the IP header checksum. Note that mp is just
11848 * the header, so this is easy to pass to ip_csum.
11849 */
11850 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha);
11851
11852 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates);
11853
11854 error = postfragfn(mp, nce, ixaflags, ip_len, xmit_hint, szone,
11855 nolzid, ixa_cookie);
11856 /* All done if we just consumed the hdr_mp. */
11857 if (mp == hdr_mp) {
11858 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragOKs);
11859 return (error);
11860 }
11861 if (error != 0 && error != EWOULDBLOCK) {
11862 DTRACE_PROBE2(ip__xmit__frag__fail, ill_t *, ill,
11863 mblk_t *, hdr_mp);
11864 /* No point in sending the other fragments */
11865 break;
11866 }
11867
11868 /* Otherwise, advance and loop. */
11869 offset += len;
11870 }
11871 /* Clean up following allocation failure. */
11872 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails);
11873 ip_drop_output("FragFails: loop ended", NULL, ill);
11874 if (mp != hdr_mp)
11875 freeb(hdr_mp);
11876 if (mp != mp_orig)
11877 freemsg(mp_orig);
11878 return (error);
11879 }
11880
11881 /*
11882 * Copy the header plus those options which have the copy bit set
11883 */
11884 static mblk_t *
11885 ip_fragment_copyhdr(uchar_t *rptr, int hdr_len, int offset, ip_stack_t *ipst,
11886 mblk_t *src)
11887 {
11888 mblk_t *mp;
11889 uchar_t *up;
11890
11891 /*
11892 * Quick check if we need to look for options without the copy bit
11893 * set
11894 */
11895 mp = allocb_tmpl(ipst->ips_ip_wroff_extra + hdr_len, src);
11896 if (!mp)
11897 return (mp);
11898 mp->b_rptr += ipst->ips_ip_wroff_extra;
11899 if (hdr_len == IP_SIMPLE_HDR_LENGTH || offset != 0) {
11900 bcopy(rptr, mp->b_rptr, hdr_len);
11901 mp->b_wptr += hdr_len + ipst->ips_ip_wroff_extra;
11902 return (mp);
11903 }
11904 up = mp->b_rptr;
11905 bcopy(rptr, up, IP_SIMPLE_HDR_LENGTH);
11906 up += IP_SIMPLE_HDR_LENGTH;
11907 rptr += IP_SIMPLE_HDR_LENGTH;
11908 hdr_len -= IP_SIMPLE_HDR_LENGTH;
11909 while (hdr_len > 0) {
11910 uint32_t optval;
11911 uint32_t optlen;
11912
11913 optval = *rptr;
11914 if (optval == IPOPT_EOL)
11915 break;
11916 if (optval == IPOPT_NOP)
11917 optlen = 1;
11918 else
11919 optlen = rptr[1];
11920 if (optval & IPOPT_COPY) {
11921 bcopy(rptr, up, optlen);
11922 up += optlen;
11923 }
11924 rptr += optlen;
11925 hdr_len -= optlen;
11926 }
11927 /*
11928 * Make sure that we drop an even number of words by filling
11929 * with EOL to the next word boundary.
11930 */
11931 for (hdr_len = up - (mp->b_rptr + IP_SIMPLE_HDR_LENGTH);
11932 hdr_len & 0x3; hdr_len++)
11933 *up++ = IPOPT_EOL;
11934 mp->b_wptr = up;
11935 /* Update header length */
11936 mp->b_rptr[0] = (uint8_t)((IP_VERSION << 4) | ((up - mp->b_rptr) >> 2));
11937 return (mp);
11938 }
11939
11940 /*
11941 * Update any source route, record route, or timestamp options when
11942 * sending a packet back to ourselves.
11943 * Check that we are at end of strict source route.
11944 * The options have been sanity checked by ip_output_options().
11945 */
11946 void
11947 ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst)
11948 {
11949 ipoptp_t opts;
11950 uchar_t *opt;
11951 uint8_t optval;
11952 uint8_t optlen;
11953 ipaddr_t dst;
11954 uint32_t ts;
11955 timestruc_t now;
11956
11957 for (optval = ipoptp_first(&opts, ipha);
11958 optval != IPOPT_EOL;
11959 optval = ipoptp_next(&opts)) {
11960 opt = opts.ipoptp_cur;
11961 optlen = opts.ipoptp_len;
11962 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
11963 switch (optval) {
11964 uint32_t off;
11965 case IPOPT_SSRR:
11966 case IPOPT_LSRR:
11967 off = opt[IPOPT_OFFSET];
11968 off--;
11969 if (optlen < IP_ADDR_LEN ||
11970 off > optlen - IP_ADDR_LEN) {
11971 /* End of source route */
11972 break;
11973 }
11974 /*
11975 * This will only happen if two consecutive entries
11976 * in the source route contains our address or if
11977 * it is a packet with a loose source route which
11978 * reaches us before consuming the whole source route
11979 */
11980
11981 if (optval == IPOPT_SSRR) {
11982 return;
11983 }
11984 /*
11985 * Hack: instead of dropping the packet truncate the
11986 * source route to what has been used by filling the
11987 * rest with IPOPT_NOP.
11988 */
11989 opt[IPOPT_OLEN] = (uint8_t)off;
11990 while (off < optlen) {
11991 opt[off++] = IPOPT_NOP;
11992 }
11993 break;
11994 case IPOPT_RR:
11995 off = opt[IPOPT_OFFSET];
11996 off--;
11997 if (optlen < IP_ADDR_LEN ||
11998 off > optlen - IP_ADDR_LEN) {
11999 /* No more room - ignore */
12000 ip1dbg((
12001 "ip_output_local_options: end of RR\n"));
12002 break;
12003 }
12004 dst = htonl(INADDR_LOOPBACK);
12005 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
12006 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
12007 break;
12008 case IPOPT_TS:
12009 /* Insert timestamp if there is romm */
12010 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
12011 case IPOPT_TS_TSONLY:
12012 off = IPOPT_TS_TIMELEN;
12013 break;
12014 case IPOPT_TS_PRESPEC:
12015 case IPOPT_TS_PRESPEC_RFC791:
12016 /* Verify that the address matched */
12017 off = opt[IPOPT_OFFSET] - 1;
12018 bcopy((char *)opt + off, &dst, IP_ADDR_LEN);
12019 if (ip_type_v4(dst, ipst) != IRE_LOCAL) {
12020 /* Not for us */
12021 break;
12022 }
12023 /* FALLTHRU */
12024 case IPOPT_TS_TSANDADDR:
12025 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
12026 break;
12027 default:
12028 /*
12029 * ip_*put_options should have already
12030 * dropped this packet.
12031 */
12032 cmn_err(CE_PANIC, "ip_output_local_options: "
12033 "unknown IT - bug in ip_output_options?\n");
12034 return; /* Keep "lint" happy */
12035 }
12036 if (opt[IPOPT_OFFSET] - 1 + off > optlen) {
12037 /* Increase overflow counter */
12038 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1;
12039 opt[IPOPT_POS_OV_FLG] = (uint8_t)
12040 (opt[IPOPT_POS_OV_FLG] & 0x0F) |
12041 (off << 4);
12042 break;
12043 }
12044 off = opt[IPOPT_OFFSET] - 1;
12045 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
12046 case IPOPT_TS_PRESPEC:
12047 case IPOPT_TS_PRESPEC_RFC791:
12048 case IPOPT_TS_TSANDADDR:
12049 dst = htonl(INADDR_LOOPBACK);
12050 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN);
12051 opt[IPOPT_OFFSET] += IP_ADDR_LEN;
12052 /* FALLTHRU */
12053 case IPOPT_TS_TSONLY:
12054 off = opt[IPOPT_OFFSET] - 1;
12055 /* Compute # of milliseconds since midnight */
12056 gethrestime(&now);
12057 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 +
12058 NSEC2MSEC(now.tv_nsec);
12059 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN);
12060 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN;
12061 break;
12062 }
12063 break;
12064 }
12065 }
12066 }
12067
12068 /*
12069 * Prepend an M_DATA fastpath header, and if none present prepend a
12070 * DL_UNITDATA_REQ. Frees the mblk on failure.
12071 *
12072 * nce_dlur_mp and nce_fp_mp can not disappear once they have been set.
12073 * If there is a change to them, the nce will be deleted (condemned) and
12074 * a new nce_t will be created when packets are sent. Thus we need no locks
12075 * to access those fields.
12076 *
12077 * We preserve b_band to support IPQoS. If a DL_UNITDATA_REQ is prepended
12078 * we place b_band in dl_priority.dl_max.
12079 */
12080 static mblk_t *
12081 ip_xmit_attach_llhdr(mblk_t *mp, nce_t *nce)
12082 {
12083 uint_t hlen;
12084 mblk_t *mp1;
12085 uint_t priority;
12086 uchar_t *rptr;
12087
12088 rptr = mp->b_rptr;
12089
12090 ASSERT(DB_TYPE(mp) == M_DATA);
12091 priority = mp->b_band;
12092
12093 ASSERT(nce != NULL);
12094 if ((mp1 = nce->nce_fp_mp) != NULL) {
12095 hlen = MBLKL(mp1);
12096 /*
12097 * Check if we have enough room to prepend fastpath
12098 * header
12099 */
12100 if (hlen != 0 && (rptr - mp->b_datap->db_base) >= hlen) {
12101 rptr -= hlen;
12102 bcopy(mp1->b_rptr, rptr, hlen);
12103 /*
12104 * Set the b_rptr to the start of the link layer
12105 * header
12106 */
12107 mp->b_rptr = rptr;
12108 return (mp);
12109 }
12110 mp1 = copyb(mp1);
12111 if (mp1 == NULL) {
12112 ill_t *ill = nce->nce_ill;
12113
12114 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12115 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12116 freemsg(mp);
12117 return (NULL);
12118 }
12119 mp1->b_band = priority;
12120 mp1->b_cont = mp;
12121 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp);
12122 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp);
12123 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp);
12124 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp);
12125 DB_LSOMSS(mp1) = DB_LSOMSS(mp);
12126 DTRACE_PROBE1(ip__xmit__copyb, (mblk_t *), mp1);
12127 /*
12128 * XXX disable ICK_VALID and compute checksum
12129 * here; can happen if nce_fp_mp changes and
12130 * it can't be copied now due to insufficient
12131 * space. (unlikely, fp mp can change, but it
12132 * does not increase in length)
12133 */
12134 return (mp1);
12135 }
12136 mp1 = copyb(nce->nce_dlur_mp);
12137
12138 if (mp1 == NULL) {
12139 ill_t *ill = nce->nce_ill;
12140
12141 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12142 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12143 freemsg(mp);
12144 return (NULL);
12145 }
12146 mp1->b_cont = mp;
12147 if (priority != 0) {
12148 mp1->b_band = priority;
12149 ((dl_unitdata_req_t *)(mp1->b_rptr))->dl_priority.dl_max =
12150 priority;
12151 }
12152 return (mp1);
12153 }
12154
12155 /*
12156 * Finish the outbound IPsec processing. This function is called from
12157 * ipsec_out_process() if the IPsec packet was processed
12158 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12159 * asynchronously.
12160 *
12161 * This is common to IPv4 and IPv6.
12162 */
12163 int
12164 ip_output_post_ipsec(mblk_t *mp, ip_xmit_attr_t *ixa)
12165 {
12166 iaflags_t ixaflags = ixa->ixa_flags;
12167 uint_t pktlen;
12168
12169
12170 /* AH/ESP don't update ixa_pktlen when they modify the packet */
12171 if (ixaflags & IXAF_IS_IPV4) {
12172 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12173
12174 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12175 pktlen = ntohs(ipha->ipha_length);
12176 } else {
12177 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12178
12179 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12180 pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12181 }
12182
12183 /*
12184 * We release any hard reference on the SAs here to make
12185 * sure the SAs can be garbage collected. ipsr_sa has a soft reference
12186 * on the SAs.
12187 * If in the future we want the hard latching of the SAs in the
12188 * ip_xmit_attr_t then we should remove this.
12189 */
12190 if (ixa->ixa_ipsec_esp_sa != NULL) {
12191 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12192 ixa->ixa_ipsec_esp_sa = NULL;
12193 }
12194 if (ixa->ixa_ipsec_ah_sa != NULL) {
12195 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12196 ixa->ixa_ipsec_ah_sa = NULL;
12197 }
12198
12199 /* Do we need to fragment? */
12200 if ((ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR) ||
12201 pktlen > ixa->ixa_fragsize) {
12202 if (ixaflags & IXAF_IS_IPV4) {
12203 ASSERT(!(ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR));
12204 /*
12205 * We check for the DF case in ipsec_out_process
12206 * hence this only handles the non-DF case.
12207 */
12208 return (ip_fragment_v4(mp, ixa->ixa_nce, ixa->ixa_flags,
12209 pktlen, ixa->ixa_fragsize,
12210 ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12211 ixa->ixa_no_loop_zoneid, ixa->ixa_postfragfn,
12212 &ixa->ixa_cookie));
12213 } else {
12214 mp = ip_fraghdr_add_v6(mp, ixa->ixa_ident, ixa);
12215 if (mp == NULL) {
12216 /* MIB and ip_drop_output already done */
12217 return (ENOMEM);
12218 }
12219 pktlen += sizeof (ip6_frag_t);
12220 if (pktlen > ixa->ixa_fragsize) {
12221 return (ip_fragment_v6(mp, ixa->ixa_nce,
12222 ixa->ixa_flags, pktlen,
12223 ixa->ixa_fragsize, ixa->ixa_xmit_hint,
12224 ixa->ixa_zoneid, ixa->ixa_no_loop_zoneid,
12225 ixa->ixa_postfragfn, &ixa->ixa_cookie));
12226 }
12227 }
12228 }
12229 return ((ixa->ixa_postfragfn)(mp, ixa->ixa_nce, ixa->ixa_flags,
12230 pktlen, ixa->ixa_xmit_hint, ixa->ixa_zoneid,
12231 ixa->ixa_no_loop_zoneid, NULL));
12232 }
12233
12234 /*
12235 * Finish the inbound IPsec processing. This function is called from
12236 * ipsec_out_process() if the IPsec packet was processed
12237 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed
12238 * asynchronously.
12239 *
12240 * This is common to IPv4 and IPv6.
12241 */
12242 void
12243 ip_input_post_ipsec(mblk_t *mp, ip_recv_attr_t *ira)
12244 {
12245 iaflags_t iraflags = ira->ira_flags;
12246
12247 /* Length might have changed */
12248 if (iraflags & IRAF_IS_IPV4) {
12249 ipha_t *ipha = (ipha_t *)mp->b_rptr;
12250
12251 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
12252 ira->ira_pktlen = ntohs(ipha->ipha_length);
12253 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha);
12254 ira->ira_protocol = ipha->ipha_protocol;
12255
12256 ip_fanout_v4(mp, ipha, ira);
12257 } else {
12258 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
12259 uint8_t *nexthdrp;
12260
12261 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
12262 ira->ira_pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN;
12263 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ira->ira_ip_hdr_length,
12264 &nexthdrp)) {
12265 /* Malformed packet */
12266 BUMP_MIB(ira->ira_ill->ill_ip_mib, ipIfStatsInDiscards);
12267 ip_drop_input("ipIfStatsInDiscards", mp, ira->ira_ill);
12268 freemsg(mp);
12269 return;
12270 }
12271 ira->ira_protocol = *nexthdrp;
12272 ip_fanout_v6(mp, ip6h, ira);
12273 }
12274 }
12275
12276 /*
12277 * Select which AH & ESP SA's to use (if any) for the outbound packet.
12278 *
12279 * If this function returns B_TRUE, the requested SA's have been filled
12280 * into the ixa_ipsec_*_sa pointers.
12281 *
12282 * If the function returns B_FALSE, the packet has been "consumed", most
12283 * likely by an ACQUIRE sent up via PF_KEY to a key management daemon.
12284 *
12285 * The SA references created by the protocol-specific "select"
12286 * function will be released in ip_output_post_ipsec.
12287 */
12288 static boolean_t
12289 ipsec_out_select_sa(mblk_t *mp, ip_xmit_attr_t *ixa)
12290 {
12291 boolean_t need_ah_acquire = B_FALSE, need_esp_acquire = B_FALSE;
12292 ipsec_policy_t *pp;
12293 ipsec_action_t *ap;
12294
12295 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12296 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12297 (ixa->ixa_ipsec_action != NULL));
12298
12299 ap = ixa->ixa_ipsec_action;
12300 if (ap == NULL) {
12301 pp = ixa->ixa_ipsec_policy;
12302 ASSERT(pp != NULL);
12303 ap = pp->ipsp_act;
12304 ASSERT(ap != NULL);
12305 }
12306
12307 /*
12308 * We have an action. now, let's select SA's.
12309 * A side effect of setting ixa_ipsec_*_sa is that it will
12310 * be cached in the conn_t.
12311 */
12312 if (ap->ipa_want_esp) {
12313 if (ixa->ixa_ipsec_esp_sa == NULL) {
12314 need_esp_acquire = !ipsec_outbound_sa(mp, ixa,
12315 IPPROTO_ESP);
12316 }
12317 ASSERT(need_esp_acquire || ixa->ixa_ipsec_esp_sa != NULL);
12318 }
12319
12320 if (ap->ipa_want_ah) {
12321 if (ixa->ixa_ipsec_ah_sa == NULL) {
12322 need_ah_acquire = !ipsec_outbound_sa(mp, ixa,
12323 IPPROTO_AH);
12324 }
12325 ASSERT(need_ah_acquire || ixa->ixa_ipsec_ah_sa != NULL);
12326 /*
12327 * The ESP and AH processing order needs to be preserved
12328 * when both protocols are required (ESP should be applied
12329 * before AH for an outbound packet). Force an ESP ACQUIRE
12330 * when both ESP and AH are required, and an AH ACQUIRE
12331 * is needed.
12332 */
12333 if (ap->ipa_want_esp && need_ah_acquire)
12334 need_esp_acquire = B_TRUE;
12335 }
12336
12337 /*
12338 * Send an ACQUIRE (extended, regular, or both) if we need one.
12339 * Release SAs that got referenced, but will not be used until we
12340 * acquire _all_ of the SAs we need.
12341 */
12342 if (need_ah_acquire || need_esp_acquire) {
12343 if (ixa->ixa_ipsec_ah_sa != NULL) {
12344 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
12345 ixa->ixa_ipsec_ah_sa = NULL;
12346 }
12347 if (ixa->ixa_ipsec_esp_sa != NULL) {
12348 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
12349 ixa->ixa_ipsec_esp_sa = NULL;
12350 }
12351
12352 sadb_acquire(mp, ixa, need_ah_acquire, need_esp_acquire);
12353 return (B_FALSE);
12354 }
12355
12356 return (B_TRUE);
12357 }
12358
12359 /*
12360 * Handle IPsec output processing.
12361 * This function is only entered once for a given packet.
12362 * We try to do things synchronously, but if we need to have user-level
12363 * set up SAs, or ESP or AH uses asynchronous kEF, then the operation
12364 * will be completed
12365 * - when the SAs are added in esp_add_sa_finish/ah_add_sa_finish
12366 * - when asynchronous ESP is done it will do AH
12367 *
12368 * In all cases we come back in ip_output_post_ipsec() to fragment and
12369 * send out the packet.
12370 */
12371 int
12372 ipsec_out_process(mblk_t *mp, ip_xmit_attr_t *ixa)
12373 {
12374 ill_t *ill = ixa->ixa_nce->nce_ill;
12375 ip_stack_t *ipst = ixa->ixa_ipst;
12376 ipsec_stack_t *ipss;
12377 ipsec_policy_t *pp;
12378 ipsec_action_t *ap;
12379
12380 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE);
12381
12382 ASSERT((ixa->ixa_ipsec_policy != NULL) ||
12383 (ixa->ixa_ipsec_action != NULL));
12384
12385 ipss = ipst->ips_netstack->netstack_ipsec;
12386 if (!ipsec_loaded(ipss)) {
12387 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12388 ip_drop_packet(mp, B_TRUE, ill,
12389 DROPPER(ipss, ipds_ip_ipsec_not_loaded),
12390 &ipss->ipsec_dropper);
12391 return (ENOTSUP);
12392 }
12393
12394 ap = ixa->ixa_ipsec_action;
12395 if (ap == NULL) {
12396 pp = ixa->ixa_ipsec_policy;
12397 ASSERT(pp != NULL);
12398 ap = pp->ipsp_act;
12399 ASSERT(ap != NULL);
12400 }
12401
12402 /* Handle explicit drop action and bypass. */
12403 switch (ap->ipa_act.ipa_type) {
12404 case IPSEC_ACT_DISCARD:
12405 case IPSEC_ACT_REJECT:
12406 ip_drop_packet(mp, B_FALSE, ill,
12407 DROPPER(ipss, ipds_spd_explicit), &ipss->ipsec_spd_dropper);
12408 return (EHOSTUNREACH); /* IPsec policy failure */
12409 case IPSEC_ACT_BYPASS:
12410 return (ip_output_post_ipsec(mp, ixa));
12411 }
12412
12413 /*
12414 * The order of processing is first insert a IP header if needed.
12415 * Then insert the ESP header and then the AH header.
12416 */
12417 if ((ixa->ixa_flags & IXAF_IS_IPV4) && ap->ipa_want_se) {
12418 /*
12419 * First get the outer IP header before sending
12420 * it to ESP.
12421 */
12422 ipha_t *oipha, *iipha;
12423 mblk_t *outer_mp, *inner_mp;
12424
12425 if ((outer_mp = allocb(sizeof (ipha_t), BPRI_HI)) == NULL) {
12426 (void) mi_strlog(ill->ill_rq, 0,
12427 SL_ERROR|SL_TRACE|SL_CONSOLE,
12428 "ipsec_out_process: "
12429 "Self-Encapsulation failed: Out of memory\n");
12430 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
12431 ip_drop_output("ipIfStatsOutDiscards", mp, ill);
12432 freemsg(mp);
12433 return (ENOBUFS);
12434 }
12435 inner_mp = mp;
12436 ASSERT(inner_mp->b_datap->db_type == M_DATA);
12437 oipha = (ipha_t *)outer_mp->b_rptr;
12438 iipha = (ipha_t *)inner_mp->b_rptr;
12439 *oipha = *iipha;
12440 outer_mp->b_wptr += sizeof (ipha_t);
12441 oipha->ipha_length = htons(ntohs(iipha->ipha_length) +
12442 sizeof (ipha_t));
12443 oipha->ipha_protocol = IPPROTO_ENCAP;
12444 oipha->ipha_version_and_hdr_length =
12445 IP_SIMPLE_HDR_VERSION;
12446 oipha->ipha_hdr_checksum = 0;
12447 oipha->ipha_hdr_checksum = ip_csum_hdr(oipha);
12448 outer_mp->b_cont = inner_mp;
12449 mp = outer_mp;
12450
12451 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
12452 }
12453
12454 /* If we need to wait for a SA then we can't return any errno */
12455 if (((ap->ipa_want_ah && (ixa->ixa_ipsec_ah_sa == NULL)) ||
12456 (ap->ipa_want_esp && (ixa->ixa_ipsec_esp_sa == NULL))) &&
12457 !ipsec_out_select_sa(mp, ixa))
12458 return (0);
12459
12460 /*
12461 * By now, we know what SA's to use. Toss over to ESP & AH
12462 * to do the heavy lifting.
12463 */
12464 if (ap->ipa_want_esp) {
12465 ASSERT(ixa->ixa_ipsec_esp_sa != NULL);
12466
12467 mp = ixa->ixa_ipsec_esp_sa->ipsa_output_func(mp, ixa);
12468 if (mp == NULL) {
12469 /*
12470 * Either it failed or is pending. In the former case
12471 * ipIfStatsInDiscards was increased.
12472 */
12473 return (0);
12474 }
12475 }
12476
12477 if (ap->ipa_want_ah) {
12478 ASSERT(ixa->ixa_ipsec_ah_sa != NULL);
12479
12480 mp = ixa->ixa_ipsec_ah_sa->ipsa_output_func(mp, ixa);
12481 if (mp == NULL) {
12482 /*
12483 * Either it failed or is pending. In the former case
12484 * ipIfStatsInDiscards was increased.
12485 */
12486 return (0);
12487 }
12488 }
12489 /*
12490 * We are done with IPsec processing. Send it over
12491 * the wire.
12492 */
12493 return (ip_output_post_ipsec(mp, ixa));
12494 }
12495
12496 /*
12497 * ioctls that go through a down/up sequence may need to wait for the down
12498 * to complete. This involves waiting for the ire and ipif refcnts to go down
12499 * to zero. Subsequently the ioctl is restarted from ipif_ill_refrele_tail.
12500 */
12501 /* ARGSUSED */
12502 void
12503 ip_reprocess_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg)
12504 {
12505 struct iocblk *iocp;
12506 mblk_t *mp1;
12507 ip_ioctl_cmd_t *ipip;
12508 int err;
12509 sin_t *sin;
12510 struct lifreq *lifr;
12511 struct ifreq *ifr;
12512
12513 iocp = (struct iocblk *)mp->b_rptr;
12514 ASSERT(ipsq != NULL);
12515 /* Existence of mp1 verified in ip_wput_nondata */
12516 mp1 = mp->b_cont->b_cont;
12517 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12518 if (ipip->ipi_cmd == SIOCSLIFNAME || ipip->ipi_cmd == IF_UNITSEL) {
12519 /*
12520 * Special case where ipx_current_ipif is not set:
12521 * ill_phyint_reinit merged the v4 and v6 into a single ipsq.
12522 * We are here as were not able to complete the operation in
12523 * ipif_set_values because we could not become exclusive on
12524 * the new ipsq.
12525 */
12526 ill_t *ill = q->q_ptr;
12527 ipsq_current_start(ipsq, ill->ill_ipif, ipip->ipi_cmd);
12528 }
12529 ASSERT(ipsq->ipsq_xop->ipx_current_ipif != NULL);
12530
12531 if (ipip->ipi_cmd_type == IF_CMD) {
12532 /* This a old style SIOC[GS]IF* command */
12533 ifr = (struct ifreq *)mp1->b_rptr;
12534 sin = (sin_t *)&ifr->ifr_addr;
12535 } else if (ipip->ipi_cmd_type == LIF_CMD) {
12536 /* This a new style SIOC[GS]LIF* command */
12537 lifr = (struct lifreq *)mp1->b_rptr;
12538 sin = (sin_t *)&lifr->lifr_addr;
12539 } else {
12540 sin = NULL;
12541 }
12542
12543 err = (*ipip->ipi_func_restart)(ipsq->ipsq_xop->ipx_current_ipif, sin,
12544 q, mp, ipip, mp1->b_rptr);
12545
12546 DTRACE_PROBE4(ipif__ioctl, char *, "ip_reprocess_ioctl finish",
12547 int, ipip->ipi_cmd,
12548 ill_t *, ipsq->ipsq_xop->ipx_current_ipif->ipif_ill,
12549 ipif_t *, ipsq->ipsq_xop->ipx_current_ipif);
12550
12551 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12552 }
12553
12554 /*
12555 * ioctl processing
12556 *
12557 * ioctl processing starts with ip_sioctl_copyin_setup(), which looks up
12558 * the ioctl command in the ioctl tables, determines the copyin data size
12559 * from the ipi_copyin_size field, and does an mi_copyin() of that size.
12560 *
12561 * ioctl processing then continues when the M_IOCDATA makes its way down to
12562 * ip_wput_nondata(). The ioctl is looked up again in the ioctl table, its
12563 * associated 'conn' is refheld till the end of the ioctl and the general
12564 * ioctl processing function ip_process_ioctl() is called to extract the
12565 * arguments and process the ioctl. To simplify extraction, ioctl commands
12566 * are "typed" based on the arguments they take (e.g., LIF_CMD which takes a
12567 * `struct lifreq'), and a common extract function (e.g., ip_extract_lifreq())
12568 * is used to extract the ioctl's arguments.
12569 *
12570 * ip_process_ioctl determines if the ioctl needs to be serialized, and if
12571 * so goes thru the serialization primitive ipsq_try_enter. Then the
12572 * appropriate function to handle the ioctl is called based on the entry in
12573 * the ioctl table. ioctl completion is encapsulated in ip_ioctl_finish
12574 * which also refreleases the 'conn' that was refheld at the start of the
12575 * ioctl. Finally ipsq_exit is called if needed to exit the ipsq.
12576 *
12577 * Many exclusive ioctls go thru an internal down up sequence as part of
12578 * the operation. For example an attempt to change the IP address of an
12579 * ipif entails ipif_down, set address, ipif_up. Bringing down the interface
12580 * does all the cleanup such as deleting all ires that use this address.
12581 * Then we need to wait till all references to the interface go away.
12582 */
12583 void
12584 ip_process_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg)
12585 {
12586 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
12587 ip_ioctl_cmd_t *ipip = arg;
12588 ip_extract_func_t *extract_funcp;
12589 ill_t *ill;
12590 cmd_info_t ci;
12591 int err;
12592 boolean_t entered_ipsq = B_FALSE;
12593
12594 ip3dbg(("ip_process_ioctl: ioctl %X\n", iocp->ioc_cmd));
12595
12596 if (ipip == NULL)
12597 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12598
12599 /*
12600 * SIOCLIFADDIF needs to go thru a special path since the
12601 * ill may not exist yet. This happens in the case of lo0
12602 * which is created using this ioctl.
12603 */
12604 if (ipip->ipi_cmd == SIOCLIFADDIF) {
12605 err = ip_sioctl_addif(NULL, NULL, q, mp, NULL, NULL);
12606 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish",
12607 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12608 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12609 return;
12610 }
12611
12612 ci.ci_ipif = NULL;
12613 switch (ipip->ipi_cmd_type) {
12614 case MISC_CMD:
12615 case MSFILT_CMD:
12616 /*
12617 * All MISC_CMD ioctls come in here -- e.g. SIOCGLIFCONF.
12618 */
12619 if (ipip->ipi_cmd == IF_UNITSEL) {
12620 /* ioctl comes down the ill */
12621 ci.ci_ipif = ((ill_t *)q->q_ptr)->ill_ipif;
12622 ipif_refhold(ci.ci_ipif);
12623 }
12624 err = 0;
12625 ci.ci_sin = NULL;
12626 ci.ci_sin6 = NULL;
12627 ci.ci_lifr = NULL;
12628 extract_funcp = NULL;
12629 break;
12630
12631 case IF_CMD:
12632 case LIF_CMD:
12633 extract_funcp = ip_extract_lifreq;
12634 break;
12635
12636 case ARP_CMD:
12637 case XARP_CMD:
12638 extract_funcp = ip_extract_arpreq;
12639 break;
12640
12641 default:
12642 ASSERT(0);
12643 }
12644
12645 if (extract_funcp != NULL) {
12646 err = (*extract_funcp)(q, mp, ipip, &ci);
12647 if (err != 0) {
12648 DTRACE_PROBE4(ipif__ioctl,
12649 char *, "ip_process_ioctl finish err",
12650 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12651 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12652 return;
12653 }
12654
12655 /*
12656 * All of the extraction functions return a refheld ipif.
12657 */
12658 ASSERT(ci.ci_ipif != NULL);
12659 }
12660
12661 if (!(ipip->ipi_flags & IPI_WR)) {
12662 /*
12663 * A return value of EINPROGRESS means the ioctl is
12664 * either queued and waiting for some reason or has
12665 * already completed.
12666 */
12667 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip,
12668 ci.ci_lifr);
12669 if (ci.ci_ipif != NULL) {
12670 DTRACE_PROBE4(ipif__ioctl,
12671 char *, "ip_process_ioctl finish RD",
12672 int, ipip->ipi_cmd, ill_t *, ci.ci_ipif->ipif_ill,
12673 ipif_t *, ci.ci_ipif);
12674 ipif_refrele(ci.ci_ipif);
12675 } else {
12676 DTRACE_PROBE4(ipif__ioctl,
12677 char *, "ip_process_ioctl finish RD",
12678 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL);
12679 }
12680 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL);
12681 return;
12682 }
12683
12684 ASSERT(ci.ci_ipif != NULL);
12685
12686 /*
12687 * If ipsq is non-NULL, we are already being called exclusively
12688 */
12689 ASSERT(ipsq == NULL || IAM_WRITER_IPSQ(ipsq));
12690 if (ipsq == NULL) {
12691 ipsq = ipsq_try_enter(ci.ci_ipif, NULL, q, mp, ip_process_ioctl,
12692 NEW_OP, B_TRUE);
12693 if (ipsq == NULL) {
12694 ipif_refrele(ci.ci_ipif);
12695 return;
12696 }
12697 entered_ipsq = B_TRUE;
12698 }
12699 /*
12700 * Release the ipif so that ipif_down and friends that wait for
12701 * references to go away are not misled about the current ipif_refcnt
12702 * values. We are writer so we can access the ipif even after releasing
12703 * the ipif.
12704 */
12705 ipif_refrele(ci.ci_ipif);
12706
12707 ipsq_current_start(ipsq, ci.ci_ipif, ipip->ipi_cmd);
12708
12709 /*
12710 * We need to cache the ill_t that we're going to use as the argument
12711 * to the ipif-ioctl DTrace probe (below) because the ci_ipif can be
12712 * blown away by calling ipi_func.
12713 */
12714 ill = ci.ci_ipif == NULL ? NULL : ci.ci_ipif->ipif_ill;
12715
12716 /*
12717 * A return value of EINPROGRESS means the ioctl is
12718 * either queued and waiting for some reason or has
12719 * already completed.
12720 */
12721 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip, ci.ci_lifr);
12722
12723 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish WR",
12724 int, ipip->ipi_cmd, ill_t *, ill, ipif_t *, ci.ci_ipif);
12725 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq);
12726
12727 if (entered_ipsq)
12728 ipsq_exit(ipsq);
12729 }
12730
12731 /*
12732 * Complete the ioctl. Typically ioctls use the mi package and need to
12733 * do mi_copyout/mi_copy_done.
12734 */
12735 void
12736 ip_ioctl_finish(queue_t *q, mblk_t *mp, int err, int mode, ipsq_t *ipsq)
12737 {
12738 conn_t *connp = NULL;
12739
12740 if (err == EINPROGRESS)
12741 return;
12742
12743 if (CONN_Q(q)) {
12744 connp = Q_TO_CONN(q);
12745 ASSERT(connp->conn_ref >= 2);
12746 }
12747
12748 switch (mode) {
12749 case COPYOUT:
12750 if (err == 0)
12751 mi_copyout(q, mp);
12752 else
12753 mi_copy_done(q, mp, err);
12754 break;
12755
12756 case NO_COPYOUT:
12757 mi_copy_done(q, mp, err);
12758 break;
12759
12760 default:
12761 ASSERT(mode == CONN_CLOSE); /* aborted through CONN_CLOSE */
12762 break;
12763 }
12764
12765 /*
12766 * The conn refhold and ioctlref placed on the conn at the start of the
12767 * ioctl are released here.
12768 */
12769 if (connp != NULL) {
12770 CONN_DEC_IOCTLREF(connp);
12771 CONN_OPER_PENDING_DONE(connp);
12772 }
12773
12774 if (ipsq != NULL)
12775 ipsq_current_finish(ipsq);
12776 }
12777
12778 /* Handles all non data messages */
12779 void
12780 ip_wput_nondata(queue_t *q, mblk_t *mp)
12781 {
12782 mblk_t *mp1;
12783 struct iocblk *iocp;
12784 ip_ioctl_cmd_t *ipip;
12785 conn_t *connp;
12786 cred_t *cr;
12787 char *proto_str;
12788
12789 if (CONN_Q(q))
12790 connp = Q_TO_CONN(q);
12791 else
12792 connp = NULL;
12793
12794 switch (DB_TYPE(mp)) {
12795 case M_IOCTL:
12796 /*
12797 * IOCTL processing begins in ip_sioctl_copyin_setup which
12798 * will arrange to copy in associated control structures.
12799 */
12800 ip_sioctl_copyin_setup(q, mp);
12801 return;
12802 case M_IOCDATA:
12803 /*
12804 * Ensure that this is associated with one of our trans-
12805 * parent ioctls. If it's not ours, discard it if we're
12806 * running as a driver, or pass it on if we're a module.
12807 */
12808 iocp = (struct iocblk *)mp->b_rptr;
12809 ipip = ip_sioctl_lookup(iocp->ioc_cmd);
12810 if (ipip == NULL) {
12811 if (q->q_next == NULL) {
12812 goto nak;
12813 } else {
12814 putnext(q, mp);
12815 }
12816 return;
12817 }
12818 if ((q->q_next != NULL) && !(ipip->ipi_flags & IPI_MODOK)) {
12819 /*
12820 * The ioctl is one we recognise, but is not consumed
12821 * by IP as a module and we are a module, so we drop
12822 */
12823 goto nak;
12824 }
12825
12826 /* IOCTL continuation following copyin or copyout. */
12827 if (mi_copy_state(q, mp, NULL) == -1) {
12828 /*
12829 * The copy operation failed. mi_copy_state already
12830 * cleaned up, so we're out of here.
12831 */
12832 return;
12833 }
12834 /*
12835 * If we just completed a copy in, we become writer and
12836 * continue processing in ip_sioctl_copyin_done. If it
12837 * was a copy out, we call mi_copyout again. If there is
12838 * nothing more to copy out, it will complete the IOCTL.
12839 */
12840 if (MI_COPY_DIRECTION(mp) == MI_COPY_IN) {
12841 if (!(mp1 = mp->b_cont) || !(mp1 = mp1->b_cont)) {
12842 mi_copy_done(q, mp, EPROTO);
12843 return;
12844 }
12845 /*
12846 * Check for cases that need more copying. A return
12847 * value of 0 means a second copyin has been started,
12848 * so we return; a return value of 1 means no more
12849 * copying is needed, so we continue.
12850 */
12851 if (ipip->ipi_cmd_type == MSFILT_CMD &&
12852 MI_COPY_COUNT(mp) == 1) {
12853 if (ip_copyin_msfilter(q, mp) == 0)
12854 return;
12855 }
12856 /*
12857 * Refhold the conn, till the ioctl completes. This is
12858 * needed in case the ioctl ends up in the pending mp
12859 * list. Every mp in the ipx_pending_mp list must have
12860 * a refhold on the conn to resume processing. The
12861 * refhold is released when the ioctl completes
12862 * (whether normally or abnormally). An ioctlref is also
12863 * placed on the conn to prevent TCP from removing the
12864 * queue needed to send the ioctl reply back.
12865 * In all cases ip_ioctl_finish is called to finish
12866 * the ioctl and release the refholds.
12867 */
12868 if (connp != NULL) {
12869 /* This is not a reentry */
12870 CONN_INC_REF(connp);
12871 CONN_INC_IOCTLREF(connp);
12872 } else {
12873 if (!(ipip->ipi_flags & IPI_MODOK)) {
12874 mi_copy_done(q, mp, EINVAL);
12875 return;
12876 }
12877 }
12878
12879 ip_process_ioctl(NULL, q, mp, ipip);
12880
12881 } else {
12882 mi_copyout(q, mp);
12883 }
12884 return;
12885
12886 case M_IOCNAK:
12887 /*
12888 * The only way we could get here is if a resolver didn't like
12889 * an IOCTL we sent it. This shouldn't happen.
12890 */
12891 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
12892 "ip_wput_nondata: unexpected M_IOCNAK, ioc_cmd 0x%x",
12893 ((struct iocblk *)mp->b_rptr)->ioc_cmd);
12894 freemsg(mp);
12895 return;
12896 case M_IOCACK:
12897 /* /dev/ip shouldn't see this */
12898 goto nak;
12899 case M_FLUSH:
12900 if (*mp->b_rptr & FLUSHW)
12901 flushq(q, FLUSHALL);
12902 if (q->q_next) {
12903 putnext(q, mp);
12904 return;
12905 }
12906 if (*mp->b_rptr & FLUSHR) {
12907 *mp->b_rptr &= ~FLUSHW;
12908 qreply(q, mp);
12909 return;
12910 }
12911 freemsg(mp);
12912 return;
12913 case M_CTL:
12914 break;
12915 case M_PROTO:
12916 case M_PCPROTO:
12917 /*
12918 * The only PROTO messages we expect are SNMP-related.
12919 */
12920 switch (((union T_primitives *)mp->b_rptr)->type) {
12921 case T_SVR4_OPTMGMT_REQ:
12922 ip2dbg(("ip_wput_nondata: T_SVR4_OPTMGMT_REQ "
12923 "flags %x\n",
12924 ((struct T_optmgmt_req *)mp->b_rptr)->MGMT_flags));
12925
12926 if (connp == NULL) {
12927 proto_str = "T_SVR4_OPTMGMT_REQ";
12928 goto protonak;
12929 }
12930
12931 /*
12932 * All Solaris components should pass a db_credp
12933 * for this TPI message, hence we ASSERT.
12934 * But in case there is some other M_PROTO that looks
12935 * like a TPI message sent by some other kernel
12936 * component, we check and return an error.
12937 */
12938 cr = msg_getcred(mp, NULL);
12939 ASSERT(cr != NULL);
12940 if (cr == NULL) {
12941 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, EINVAL);
12942 if (mp != NULL)
12943 qreply(q, mp);
12944 return;
12945 }
12946
12947 if (!snmpcom_req(q, mp, ip_snmp_set, ip_snmp_get, cr)) {
12948 proto_str = "Bad SNMPCOM request?";
12949 goto protonak;
12950 }
12951 return;
12952 default:
12953 ip1dbg(("ip_wput_nondata: dropping M_PROTO prim %u\n",
12954 (int)*(uint_t *)mp->b_rptr));
12955 freemsg(mp);
12956 return;
12957 }
12958 default:
12959 break;
12960 }
12961 if (q->q_next) {
12962 putnext(q, mp);
12963 } else
12964 freemsg(mp);
12965 return;
12966
12967 nak:
12968 iocp->ioc_error = EINVAL;
12969 mp->b_datap->db_type = M_IOCNAK;
12970 iocp->ioc_count = 0;
12971 qreply(q, mp);
12972 return;
12973
12974 protonak:
12975 cmn_err(CE_NOTE, "IP doesn't process %s as a module", proto_str);
12976 if ((mp = mi_tpi_err_ack_alloc(mp, TPROTO, EINVAL)) != NULL)
12977 qreply(q, mp);
12978 }
12979
12980 /*
12981 * Process IP options in an outbound packet. Verify that the nexthop in a
12982 * strict source route is onlink.
12983 * Returns non-zero if something fails in which case an ICMP error has been
12984 * sent and mp freed.
12985 *
12986 * Assumes the ULP has called ip_massage_options to move nexthop into ipha_dst.
12987 */
12988 int
12989 ip_output_options(mblk_t *mp, ipha_t *ipha, ip_xmit_attr_t *ixa, ill_t *ill)
12990 {
12991 ipoptp_t opts;
12992 uchar_t *opt;
12993 uint8_t optval;
12994 uint8_t optlen;
12995 ipaddr_t dst;
12996 intptr_t code = 0;
12997 ire_t *ire;
12998 ip_stack_t *ipst = ixa->ixa_ipst;
12999 ip_recv_attr_t iras;
13000
13001 ip2dbg(("ip_output_options\n"));
13002
13003 dst = ipha->ipha_dst;
13004 for (optval = ipoptp_first(&opts, ipha);
13005 optval != IPOPT_EOL;
13006 optval = ipoptp_next(&opts)) {
13007 opt = opts.ipoptp_cur;
13008 optlen = opts.ipoptp_len;
13009 ip2dbg(("ip_output_options: opt %d, len %d\n",
13010 optval, optlen));
13011 switch (optval) {
13012 uint32_t off;
13013 case IPOPT_SSRR:
13014 case IPOPT_LSRR:
13015 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
13016 ip1dbg((
13017 "ip_output_options: bad option offset\n"));
13018 code = (char *)&opt[IPOPT_OLEN] -
13019 (char *)ipha;
13020 goto param_prob;
13021 }
13022 off = opt[IPOPT_OFFSET];
13023 ip1dbg(("ip_output_options: next hop 0x%x\n",
13024 ntohl(dst)));
13025 /*
13026 * For strict: verify that dst is directly
13027 * reachable.
13028 */
13029 if (optval == IPOPT_SSRR) {
13030 ire = ire_ftable_lookup_v4(dst, 0, 0,
13031 IRE_INTERFACE, NULL, ALL_ZONES,
13032 ixa->ixa_tsl,
13033 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst,
13034 NULL);
13035 if (ire == NULL) {
13036 ip1dbg(("ip_output_options: SSRR not"
13037 " directly reachable: 0x%x\n",
13038 ntohl(dst)));
13039 goto bad_src_route;
13040 }
13041 ire_refrele(ire);
13042 }
13043 break;
13044 case IPOPT_RR:
13045 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
13046 ip1dbg((
13047 "ip_output_options: bad option offset\n"));
13048 code = (char *)&opt[IPOPT_OLEN] -
13049 (char *)ipha;
13050 goto param_prob;
13051 }
13052 break;
13053 case IPOPT_TS:
13054 /*
13055 * Verify that length >=5 and that there is either
13056 * room for another timestamp or that the overflow
13057 * counter is not maxed out.
13058 */
13059 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha;
13060 if (optlen < IPOPT_MINLEN_IT) {
13061 goto param_prob;
13062 }
13063 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
13064 ip1dbg((
13065 "ip_output_options: bad option offset\n"));
13066 code = (char *)&opt[IPOPT_OFFSET] -
13067 (char *)ipha;
13068 goto param_prob;
13069 }
13070 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
13071 case IPOPT_TS_TSONLY:
13072 off = IPOPT_TS_TIMELEN;
13073 break;
13074 case IPOPT_TS_TSANDADDR:
13075 case IPOPT_TS_PRESPEC:
13076 case IPOPT_TS_PRESPEC_RFC791:
13077 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN;
13078 break;
13079 default:
13080 code = (char *)&opt[IPOPT_POS_OV_FLG] -
13081 (char *)ipha;
13082 goto param_prob;
13083 }
13084 if (opt[IPOPT_OFFSET] - 1 + off > optlen &&
13085 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) {
13086 /*
13087 * No room and the overflow counter is 15
13088 * already.
13089 */
13090 goto param_prob;
13091 }
13092 break;
13093 }
13094 }
13095
13096 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0)
13097 return (0);
13098
13099 ip1dbg(("ip_output_options: error processing IP options."));
13100 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha;
13101
13102 param_prob:
13103 bzero(&iras, sizeof (iras));
13104 iras.ira_ill = iras.ira_rill = ill;
13105 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13106 iras.ira_rifindex = iras.ira_ruifindex;
13107 iras.ira_flags = IRAF_IS_IPV4;
13108
13109 ip_drop_output("ip_output_options", mp, ill);
13110 icmp_param_problem(mp, (uint8_t)code, &iras);
13111 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13112 return (-1);
13113
13114 bad_src_route:
13115 bzero(&iras, sizeof (iras));
13116 iras.ira_ill = iras.ira_rill = ill;
13117 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex;
13118 iras.ira_rifindex = iras.ira_ruifindex;
13119 iras.ira_flags = IRAF_IS_IPV4;
13120
13121 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill);
13122 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, &iras);
13123 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE));
13124 return (-1);
13125 }
13126
13127 /*
13128 * The maximum value of conn_drain_list_cnt is CONN_MAXDRAINCNT.
13129 * conn_drain_list_cnt can be changed by setting conn_drain_nthreads
13130 * thru /etc/system.
13131 */
13132 #define CONN_MAXDRAINCNT 64
13133
13134 static void
13135 conn_drain_init(ip_stack_t *ipst)
13136 {
13137 int i, j;
13138 idl_tx_list_t *itl_tx;
13139
13140 ipst->ips_conn_drain_list_cnt = conn_drain_nthreads;
13141
13142 if ((ipst->ips_conn_drain_list_cnt == 0) ||
13143 (ipst->ips_conn_drain_list_cnt > CONN_MAXDRAINCNT)) {
13144 /*
13145 * Default value of the number of drainers is the
13146 * number of cpus, subject to maximum of 8 drainers.
13147 */
13148 if (boot_max_ncpus != -1)
13149 ipst->ips_conn_drain_list_cnt = MIN(boot_max_ncpus, 8);
13150 else
13151 ipst->ips_conn_drain_list_cnt = MIN(max_ncpus, 8);
13152 }
13153
13154 ipst->ips_idl_tx_list =
13155 kmem_zalloc(TX_FANOUT_SIZE * sizeof (idl_tx_list_t), KM_SLEEP);
13156 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13157 itl_tx = &ipst->ips_idl_tx_list[i];
13158 itl_tx->txl_drain_list =
13159 kmem_zalloc(ipst->ips_conn_drain_list_cnt *
13160 sizeof (idl_t), KM_SLEEP);
13161 mutex_init(&itl_tx->txl_lock, NULL, MUTEX_DEFAULT, NULL);
13162 for (j = 0; j < ipst->ips_conn_drain_list_cnt; j++) {
13163 mutex_init(&itl_tx->txl_drain_list[j].idl_lock, NULL,
13164 MUTEX_DEFAULT, NULL);
13165 itl_tx->txl_drain_list[j].idl_itl = itl_tx;
13166 }
13167 }
13168 }
13169
13170 static void
13171 conn_drain_fini(ip_stack_t *ipst)
13172 {
13173 int i;
13174 idl_tx_list_t *itl_tx;
13175
13176 for (i = 0; i < TX_FANOUT_SIZE; i++) {
13177 itl_tx = &ipst->ips_idl_tx_list[i];
13178 kmem_free(itl_tx->txl_drain_list,
13179 ipst->ips_conn_drain_list_cnt * sizeof (idl_t));
13180 }
13181 kmem_free(ipst->ips_idl_tx_list,
13182 TX_FANOUT_SIZE * sizeof (idl_tx_list_t));
13183 ipst->ips_idl_tx_list = NULL;
13184 }
13185
13186 /*
13187 * Flow control has blocked us from proceeding. Insert the given conn in one
13188 * of the conn drain lists. When flow control is unblocked, either ip_wsrv()
13189 * (STREAMS) or ill_flow_enable() (direct) will be called back, which in turn
13190 * will call conn_walk_drain(). See the flow control notes at the top of this
13191 * file for more details.
13192 */
13193 void
13194 conn_drain_insert(conn_t *connp, idl_tx_list_t *tx_list)
13195 {
13196 idl_t *idl = tx_list->txl_drain_list;
13197 uint_t index;
13198 ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
13199
13200 mutex_enter(&connp->conn_lock);
13201 if (connp->conn_state_flags & CONN_CLOSING) {
13202 /*
13203 * The conn is closing as a result of which CONN_CLOSING
13204 * is set. Return.
13205 */
13206 mutex_exit(&connp->conn_lock);
13207 return;
13208 } else if (connp->conn_idl == NULL) {
13209 /*
13210 * Assign the next drain list round robin. We dont' use
13211 * a lock, and thus it may not be strictly round robin.
13212 * Atomicity of load/stores is enough to make sure that
13213 * conn_drain_list_index is always within bounds.
13214 */
13215 index = tx_list->txl_drain_index;
13216 ASSERT(index < ipst->ips_conn_drain_list_cnt);
13217 connp->conn_idl = &tx_list->txl_drain_list[index];
13218 index++;
13219 if (index == ipst->ips_conn_drain_list_cnt)
13220 index = 0;
13221 tx_list->txl_drain_index = index;
13222 } else {
13223 ASSERT(connp->conn_idl->idl_itl == tx_list);
13224 }
13225 mutex_exit(&connp->conn_lock);
13226
13227 idl = connp->conn_idl;
13228 mutex_enter(&idl->idl_lock);
13229 if ((connp->conn_drain_prev != NULL) ||
13230 (connp->conn_state_flags & CONN_CLOSING)) {
13231 /*
13232 * The conn is either already in the drain list or closing.
13233 * (We needed to check for CONN_CLOSING again since close can
13234 * sneak in between dropping conn_lock and acquiring idl_lock.)
13235 */
13236 mutex_exit(&idl->idl_lock);
13237 return;
13238 }
13239
13240 /*
13241 * The conn is not in the drain list. Insert it at the
13242 * tail of the drain list. The drain list is circular
13243 * and doubly linked. idl_conn points to the 1st element
13244 * in the list.
13245 */
13246 if (idl->idl_conn == NULL) {
13247 idl->idl_conn = connp;
13248 connp->conn_drain_next = connp;
13249 connp->conn_drain_prev = connp;
13250 } else {
13251 conn_t *head = idl->idl_conn;
13252
13253 connp->conn_drain_next = head;
13254 connp->conn_drain_prev = head->conn_drain_prev;
13255 head->conn_drain_prev->conn_drain_next = connp;
13256 head->conn_drain_prev = connp;
13257 }
13258 /*
13259 * For non streams based sockets assert flow control.
13260 */
13261 conn_setqfull(connp, NULL);
13262 mutex_exit(&idl->idl_lock);
13263 }
13264
13265 static void
13266 conn_drain_remove(conn_t *connp)
13267 {
13268 idl_t *idl = connp->conn_idl;
13269
13270 if (idl != NULL) {
13271 /*
13272 * Remove ourself from the drain list.
13273 */
13274 if (connp->conn_drain_next == connp) {
13275 /* Singleton in the list */
13276 ASSERT(connp->conn_drain_prev == connp);
13277 idl->idl_conn = NULL;
13278 } else {
13279 connp->conn_drain_prev->conn_drain_next =
13280 connp->conn_drain_next;
13281 connp->conn_drain_next->conn_drain_prev =
13282 connp->conn_drain_prev;
13283 if (idl->idl_conn == connp)
13284 idl->idl_conn = connp->conn_drain_next;
13285 }
13286
13287 /*
13288 * NOTE: because conn_idl is associated with a specific drain
13289 * list which in turn is tied to the index the TX ring
13290 * (txl_cookie) hashes to, and because the TX ring can change
13291 * over the lifetime of the conn_t, we must clear conn_idl so
13292 * a subsequent conn_drain_insert() will set conn_idl again
13293 * based on the latest txl_cookie.
13294 */
13295 connp->conn_idl = NULL;
13296 }
13297 connp->conn_drain_next = NULL;
13298 connp->conn_drain_prev = NULL;
13299
13300 conn_clrqfull(connp, NULL);
13301 /*
13302 * For streams based sockets open up flow control.
13303 */
13304 if (!IPCL_IS_NONSTR(connp))
13305 enableok(connp->conn_wq);
13306 }
13307
13308 /*
13309 * This conn is closing, and we are called from ip_close. OR
13310 * this conn is draining because flow-control on the ill has been relieved.
13311 *
13312 * We must also need to remove conn's on this idl from the list, and also
13313 * inform the sockfs upcalls about the change in flow-control.
13314 */
13315 static void
13316 conn_drain(conn_t *connp, boolean_t closing)
13317 {
13318 idl_t *idl;
13319 conn_t *next_connp;
13320
13321 /*
13322 * connp->conn_idl is stable at this point, and no lock is needed
13323 * to check it. If we are called from ip_close, close has already
13324 * set CONN_CLOSING, thus freezing the value of conn_idl, and
13325 * called us only because conn_idl is non-null. If we are called thru
13326 * service, conn_idl could be null, but it cannot change because
13327 * service is single-threaded per queue, and there cannot be another
13328 * instance of service trying to call conn_drain_insert on this conn
13329 * now.
13330 */
13331 ASSERT(!closing || connp == NULL || connp->conn_idl != NULL);
13332
13333 /*
13334 * If the conn doesn't exist or is not on a drain list, bail.
13335 */
13336 if (connp == NULL || connp->conn_idl == NULL ||
13337 connp->conn_drain_prev == NULL) {
13338 return;
13339 }
13340
13341 idl = connp->conn_idl;
13342 ASSERT(MUTEX_HELD(&idl->idl_lock));
13343
13344 if (!closing) {
13345 next_connp = connp->conn_drain_next;
13346 while (next_connp != connp) {
13347 conn_t *delconnp = next_connp;
13348
13349 next_connp = next_connp->conn_drain_next;
13350 conn_drain_remove(delconnp);
13351 }
13352 ASSERT(connp->conn_drain_next == idl->idl_conn);
13353 }
13354 conn_drain_remove(connp);
13355 }
13356
13357 /*
13358 * Write service routine. Shared perimeter entry point.
13359 * The device queue's messages has fallen below the low water mark and STREAMS
13360 * has backenabled the ill_wq. Send sockfs notification about flow-control on
13361 * each waiting conn.
13362 */
13363 void
13364 ip_wsrv(queue_t *q)
13365 {
13366 ill_t *ill;
13367
13368 ill = (ill_t *)q->q_ptr;
13369 if (ill->ill_state_flags == 0) {
13370 ip_stack_t *ipst = ill->ill_ipst;
13371
13372 /*
13373 * The device flow control has opened up.
13374 * Walk through conn drain lists and qenable the
13375 * first conn in each list. This makes sense only
13376 * if the stream is fully plumbed and setup.
13377 * Hence the ill_state_flags check above.
13378 */
13379 ip1dbg(("ip_wsrv: walking\n"));
13380 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[0]);
13381 enableok(ill->ill_wq);
13382 }
13383 }
13384
13385 /*
13386 * Callback to disable flow control in IP.
13387 *
13388 * This is a mac client callback added when the DLD_CAPAB_DIRECT capability
13389 * is enabled.
13390 *
13391 * When MAC_TX() is not able to send any more packets, dld sets its queue
13392 * to QFULL and enable the STREAMS flow control. Later, when the underlying
13393 * driver is able to continue to send packets, it calls mac_tx_(ring_)update()
13394 * function and wakes up corresponding mac worker threads, which in turn
13395 * calls this callback function, and disables flow control.
13396 */
13397 void
13398 ill_flow_enable(void *arg, ip_mac_tx_cookie_t cookie)
13399 {
13400 ill_t *ill = (ill_t *)arg;
13401 ip_stack_t *ipst = ill->ill_ipst;
13402 idl_tx_list_t *idl_txl;
13403
13404 idl_txl = &ipst->ips_idl_tx_list[IDLHASHINDEX(cookie)];
13405 mutex_enter(&idl_txl->txl_lock);
13406 /* add code to to set a flag to indicate idl_txl is enabled */
13407 conn_walk_drain(ipst, idl_txl);
13408 mutex_exit(&idl_txl->txl_lock);
13409 }
13410
13411 /*
13412 * Flow control has been relieved and STREAMS has backenabled us; drain
13413 * all the conn lists on `tx_list'.
13414 */
13415 static void
13416 conn_walk_drain(ip_stack_t *ipst, idl_tx_list_t *tx_list)
13417 {
13418 int i;
13419 idl_t *idl;
13420
13421 IP_STAT(ipst, ip_conn_walk_drain);
13422
13423 for (i = 0; i < ipst->ips_conn_drain_list_cnt; i++) {
13424 idl = &tx_list->txl_drain_list[i];
13425 mutex_enter(&idl->idl_lock);
13426 conn_drain(idl->idl_conn, B_FALSE);
13427 mutex_exit(&idl->idl_lock);
13428 }
13429 }
13430
13431 /*
13432 * Determine if the ill and multicast aspects of that packets
13433 * "matches" the conn.
13434 */
13435 boolean_t
13436 conn_wantpacket(conn_t *connp, ip_recv_attr_t *ira, ipha_t *ipha)
13437 {
13438 ill_t *ill = ira->ira_rill;
13439 zoneid_t zoneid = ira->ira_zoneid;
13440 uint_t in_ifindex;
13441 ipaddr_t dst, src;
13442
13443 dst = ipha->ipha_dst;
13444 src = ipha->ipha_src;
13445
13446 /*
13447 * conn_incoming_ifindex is set by IP_BOUND_IF which limits
13448 * unicast, broadcast and multicast reception to
13449 * conn_incoming_ifindex.
13450 * conn_wantpacket is called for unicast, broadcast and
13451 * multicast packets.
13452 */
13453 in_ifindex = connp->conn_incoming_ifindex;
13454
13455 /* mpathd can bind to the under IPMP interface, which we allow */
13456 if (in_ifindex != 0 && in_ifindex != ill->ill_phyint->phyint_ifindex) {
13457 if (!IS_UNDER_IPMP(ill))
13458 return (B_FALSE);
13459
13460 if (in_ifindex != ipmp_ill_get_ipmp_ifindex(ill))
13461 return (B_FALSE);
13462 }
13463
13464 if (!IPCL_ZONE_MATCH(connp, zoneid))
13465 return (B_FALSE);
13466
13467 if (!(ira->ira_flags & IRAF_MULTICAST))
13468 return (B_TRUE);
13469
13470 if (connp->conn_multi_router) {
13471 /* multicast packet and multicast router socket: send up */
13472 return (B_TRUE);
13473 }
13474
13475 if (ipha->ipha_protocol == IPPROTO_PIM ||
13476 ipha->ipha_protocol == IPPROTO_RSVP)
13477 return (B_TRUE);
13478
13479 return (conn_hasmembers_ill_withsrc_v4(connp, dst, src, ira->ira_ill));
13480 }
13481
13482 void
13483 conn_setqfull(conn_t *connp, boolean_t *flow_stopped)
13484 {
13485 if (IPCL_IS_NONSTR(connp)) {
13486 (*connp->conn_upcalls->su_txq_full)
13487 (connp->conn_upper_handle, B_TRUE);
13488 if (flow_stopped != NULL)
13489 *flow_stopped = B_TRUE;
13490 } else {
13491 queue_t *q = connp->conn_wq;
13492
13493 ASSERT(q != NULL);
13494 if (!(q->q_flag & QFULL)) {
13495 mutex_enter(QLOCK(q));
13496 if (!(q->q_flag & QFULL)) {
13497 /* still need to set QFULL */
13498 q->q_flag |= QFULL;
13499 /* set flow_stopped to true under QLOCK */
13500 if (flow_stopped != NULL)
13501 *flow_stopped = B_TRUE;
13502 mutex_exit(QLOCK(q));
13503 } else {
13504 /* flow_stopped is left unchanged */
13505 mutex_exit(QLOCK(q));
13506 }
13507 }
13508 }
13509 }
13510
13511 void
13512 conn_clrqfull(conn_t *connp, boolean_t *flow_stopped)
13513 {
13514 if (IPCL_IS_NONSTR(connp)) {
13515 (*connp->conn_upcalls->su_txq_full)
13516 (connp->conn_upper_handle, B_FALSE);
13517 if (flow_stopped != NULL)
13518 *flow_stopped = B_FALSE;
13519 } else {
13520 queue_t *q = connp->conn_wq;
13521
13522 ASSERT(q != NULL);
13523 if (q->q_flag & QFULL) {
13524 mutex_enter(QLOCK(q));
13525 if (q->q_flag & QFULL) {
13526 q->q_flag &= ~QFULL;
13527 /* set flow_stopped to false under QLOCK */
13528 if (flow_stopped != NULL)
13529 *flow_stopped = B_FALSE;
13530 mutex_exit(QLOCK(q));
13531 if (q->q_flag & QWANTW)
13532 qbackenable(q, 0);
13533 } else {
13534 /* flow_stopped is left unchanged */
13535 mutex_exit(QLOCK(q));
13536 }
13537 }
13538 }
13539
13540 mutex_enter(&connp->conn_lock);
13541 connp->conn_blocked = B_FALSE;
13542 mutex_exit(&connp->conn_lock);
13543 }
13544
13545 /*
13546 * Return the length in bytes of the IPv4 headers (base header, label, and
13547 * other IP options) that will be needed based on the
13548 * ip_pkt_t structure passed by the caller.
13549 *
13550 * The returned length does not include the length of the upper level
13551 * protocol (ULP) header.
13552 * The caller needs to check that the length doesn't exceed the max for IPv4.
13553 */
13554 int
13555 ip_total_hdrs_len_v4(const ip_pkt_t *ipp)
13556 {
13557 int len;
13558
13559 len = IP_SIMPLE_HDR_LENGTH;
13560 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13561 ASSERT(ipp->ipp_label_len_v4 != 0);
13562 /* We need to round up here */
13563 len += (ipp->ipp_label_len_v4 + 3) & ~3;
13564 }
13565
13566 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13567 ASSERT(ipp->ipp_ipv4_options_len != 0);
13568 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13569 len += ipp->ipp_ipv4_options_len;
13570 }
13571 return (len);
13572 }
13573
13574 /*
13575 * All-purpose routine to build an IPv4 header with options based
13576 * on the abstract ip_pkt_t.
13577 *
13578 * The caller has to set the source and destination address as well as
13579 * ipha_length. The caller has to massage any source route and compensate
13580 * for the ULP pseudo-header checksum due to the source route.
13581 */
13582 void
13583 ip_build_hdrs_v4(uchar_t *buf, uint_t buf_len, const ip_pkt_t *ipp,
13584 uint8_t protocol)
13585 {
13586 ipha_t *ipha = (ipha_t *)buf;
13587 uint8_t *cp;
13588
13589 /* Initialize IPv4 header */
13590 ipha->ipha_type_of_service = ipp->ipp_type_of_service;
13591 ipha->ipha_length = 0; /* Caller will set later */
13592 ipha->ipha_ident = 0;
13593 ipha->ipha_fragment_offset_and_flags = 0;
13594 ipha->ipha_ttl = ipp->ipp_unicast_hops;
13595 ipha->ipha_protocol = protocol;
13596 ipha->ipha_hdr_checksum = 0;
13597
13598 if ((ipp->ipp_fields & IPPF_ADDR) &&
13599 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr))
13600 ipha->ipha_src = ipp->ipp_addr_v4;
13601
13602 cp = (uint8_t *)&ipha[1];
13603 if (ipp->ipp_fields & IPPF_LABEL_V4) {
13604 ASSERT(ipp->ipp_label_len_v4 != 0);
13605 bcopy(ipp->ipp_label_v4, cp, ipp->ipp_label_len_v4);
13606 cp += ipp->ipp_label_len_v4;
13607 /* We need to round up here */
13608 while ((uintptr_t)cp & 0x3) {
13609 *cp++ = IPOPT_NOP;
13610 }
13611 }
13612
13613 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) {
13614 ASSERT(ipp->ipp_ipv4_options_len != 0);
13615 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0);
13616 bcopy(ipp->ipp_ipv4_options, cp, ipp->ipp_ipv4_options_len);
13617 cp += ipp->ipp_ipv4_options_len;
13618 }
13619 ipha->ipha_version_and_hdr_length =
13620 (uint8_t)((IP_VERSION << 4) + buf_len / 4);
13621
13622 ASSERT((int)(cp - buf) == buf_len);
13623 }
13624
13625 /* Allocate the private structure */
13626 static int
13627 ip_priv_alloc(void **bufp)
13628 {
13629 void *buf;
13630
13631 if ((buf = kmem_alloc(sizeof (ip_priv_t), KM_NOSLEEP)) == NULL)
13632 return (ENOMEM);
13633
13634 *bufp = buf;
13635 return (0);
13636 }
13637
13638 /* Function to delete the private structure */
13639 void
13640 ip_priv_free(void *buf)
13641 {
13642 ASSERT(buf != NULL);
13643 kmem_free(buf, sizeof (ip_priv_t));
13644 }
13645
13646 /*
13647 * The entry point for IPPF processing.
13648 * If the classifier (IPGPC_CLASSIFY) is not loaded and configured, the
13649 * routine just returns.
13650 *
13651 * When called, ip_process generates an ipp_packet_t structure
13652 * which holds the state information for this packet and invokes the
13653 * the classifier (via ipp_packet_process). The classification, depending on
13654 * configured filters, results in a list of actions for this packet. Invoking
13655 * an action may cause the packet to be dropped, in which case we return NULL.
13656 * proc indicates the callout position for
13657 * this packet and ill is the interface this packet arrived on or will leave
13658 * on (inbound and outbound resp.).
13659 *
13660 * We do the processing on the rill (mapped to the upper if ipmp), but MIB
13661 * on the ill corrsponding to the destination IP address.
13662 */
13663 mblk_t *
13664 ip_process(ip_proc_t proc, mblk_t *mp, ill_t *rill, ill_t *ill)
13665 {
13666 ip_priv_t *priv;
13667 ipp_action_id_t aid;
13668 int rc = 0;
13669 ipp_packet_t *pp;
13670
13671 /* If the classifier is not loaded, return */
13672 if ((aid = ipp_action_lookup(IPGPC_CLASSIFY)) == IPP_ACTION_INVAL) {
13673 return (mp);
13674 }
13675
13676 ASSERT(mp != NULL);
13677
13678 /* Allocate the packet structure */
13679 rc = ipp_packet_alloc(&pp, "ip", aid);
13680 if (rc != 0)
13681 goto drop;
13682
13683 /* Allocate the private structure */
13684 rc = ip_priv_alloc((void **)&priv);
13685 if (rc != 0) {
13686 ipp_packet_free(pp);
13687 goto drop;
13688 }
13689 priv->proc = proc;
13690 priv->ill_index = ill_get_upper_ifindex(rill);
13691
13692 ipp_packet_set_private(pp, priv, ip_priv_free);
13693 ipp_packet_set_data(pp, mp);
13694
13695 /* Invoke the classifier */
13696 rc = ipp_packet_process(&pp);
13697 if (pp != NULL) {
13698 mp = ipp_packet_get_data(pp);
13699 ipp_packet_free(pp);
13700 if (rc != 0)
13701 goto drop;
13702 return (mp);
13703 } else {
13704 /* No mp to trace in ip_drop_input/ip_drop_output */
13705 mp = NULL;
13706 }
13707 drop:
13708 if (proc == IPP_LOCAL_IN || proc == IPP_FWD_IN) {
13709 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
13710 ip_drop_input("ip_process", mp, ill);
13711 } else {
13712 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
13713 ip_drop_output("ip_process", mp, ill);
13714 }
13715 freemsg(mp);
13716 return (NULL);
13717 }
13718
13719 /*
13720 * Propagate a multicast group membership operation (add/drop) on
13721 * all the interfaces crossed by the related multirt routes.
13722 * The call is considered successful if the operation succeeds
13723 * on at least one interface.
13724 *
13725 * This assumes that a set of IRE_HOST/RTF_MULTIRT has been created for the
13726 * multicast addresses with the ire argument being the first one.
13727 * We walk the bucket to find all the of those.
13728 *
13729 * Common to IPv4 and IPv6.
13730 */
13731 static int
13732 ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t,
13733 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *),
13734 ire_t *ire, conn_t *connp, boolean_t checkonly, const in6_addr_t *v6group,
13735 mcast_record_t fmode, const in6_addr_t *v6src)
13736 {
13737 ire_t *ire_gw;
13738 irb_t *irb;
13739 int ifindex;
13740 int error = 0;
13741 int result;
13742 ip_stack_t *ipst = ire->ire_ipst;
13743 ipaddr_t group;
13744 boolean_t isv6;
13745 int match_flags;
13746
13747 if (IN6_IS_ADDR_V4MAPPED(v6group)) {
13748 IN6_V4MAPPED_TO_IPADDR(v6group, group);
13749 isv6 = B_FALSE;
13750 } else {
13751 isv6 = B_TRUE;
13752 }
13753
13754 irb = ire->ire_bucket;
13755 ASSERT(irb != NULL);
13756
13757 result = 0;
13758 irb_refhold(irb);
13759 for (; ire != NULL; ire = ire->ire_next) {
13760 if ((ire->ire_flags & RTF_MULTIRT) == 0)
13761 continue;
13762
13763 /* We handle -ifp routes by matching on the ill if set */
13764 match_flags = MATCH_IRE_TYPE;
13765 if (ire->ire_ill != NULL)
13766 match_flags |= MATCH_IRE_ILL;
13767
13768 if (isv6) {
13769 if (!IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, v6group))
13770 continue;
13771
13772 ire_gw = ire_ftable_lookup_v6(&ire->ire_gateway_addr_v6,
13773 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13774 match_flags, 0, ipst, NULL);
13775 } else {
13776 if (ire->ire_addr != group)
13777 continue;
13778
13779 ire_gw = ire_ftable_lookup_v4(ire->ire_gateway_addr,
13780 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL,
13781 match_flags, 0, ipst, NULL);
13782 }
13783 /* No interface route exists for the gateway; skip this ire. */
13784 if (ire_gw == NULL)
13785 continue;
13786 if (ire_gw->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
13787 ire_refrele(ire_gw);
13788 continue;
13789 }
13790 ASSERT(ire_gw->ire_ill != NULL); /* IRE_INTERFACE */
13791 ifindex = ire_gw->ire_ill->ill_phyint->phyint_ifindex;
13792
13793 /*
13794 * The operation is considered a success if
13795 * it succeeds at least once on any one interface.
13796 */
13797 error = fn(connp, checkonly, v6group, INADDR_ANY, ifindex,
13798 fmode, v6src);
13799 if (error == 0)
13800 result = CGTP_MCAST_SUCCESS;
13801
13802 ire_refrele(ire_gw);
13803 }
13804 irb_refrele(irb);
13805 /*
13806 * Consider the call as successful if we succeeded on at least
13807 * one interface. Otherwise, return the last encountered error.
13808 */
13809 return (result == CGTP_MCAST_SUCCESS ? 0 : error);
13810 }
13811
13812 /*
13813 * Return the expected CGTP hooks version number.
13814 */
13815 int
13816 ip_cgtp_filter_supported(void)
13817 {
13818 return (ip_cgtp_filter_rev);
13819 }
13820
13821 /*
13822 * CGTP hooks can be registered by invoking this function.
13823 * Checks that the version number matches.
13824 */
13825 int
13826 ip_cgtp_filter_register(netstackid_t stackid, cgtp_filter_ops_t *ops)
13827 {
13828 netstack_t *ns;
13829 ip_stack_t *ipst;
13830
13831 if (ops->cfo_filter_rev != CGTP_FILTER_REV)
13832 return (ENOTSUP);
13833
13834 ns = netstack_find_by_stackid(stackid);
13835 if (ns == NULL)
13836 return (EINVAL);
13837 ipst = ns->netstack_ip;
13838 ASSERT(ipst != NULL);
13839
13840 if (ipst->ips_ip_cgtp_filter_ops != NULL) {
13841 netstack_rele(ns);
13842 return (EALREADY);
13843 }
13844
13845 ipst->ips_ip_cgtp_filter_ops = ops;
13846
13847 ill_set_inputfn_all(ipst);
13848
13849 netstack_rele(ns);
13850 return (0);
13851 }
13852
13853 /*
13854 * CGTP hooks can be unregistered by invoking this function.
13855 * Returns ENXIO if there was no registration.
13856 * Returns EBUSY if the ndd variable has not been turned off.
13857 */
13858 int
13859 ip_cgtp_filter_unregister(netstackid_t stackid)
13860 {
13861 netstack_t *ns;
13862 ip_stack_t *ipst;
13863
13864 ns = netstack_find_by_stackid(stackid);
13865 if (ns == NULL)
13866 return (EINVAL);
13867 ipst = ns->netstack_ip;
13868 ASSERT(ipst != NULL);
13869
13870 if (ipst->ips_ip_cgtp_filter) {
13871 netstack_rele(ns);
13872 return (EBUSY);
13873 }
13874
13875 if (ipst->ips_ip_cgtp_filter_ops == NULL) {
13876 netstack_rele(ns);
13877 return (ENXIO);
13878 }
13879 ipst->ips_ip_cgtp_filter_ops = NULL;
13880
13881 ill_set_inputfn_all(ipst);
13882
13883 netstack_rele(ns);
13884 return (0);
13885 }
13886
13887 /*
13888 * Check whether there is a CGTP filter registration.
13889 * Returns non-zero if there is a registration, otherwise returns zero.
13890 * Note: returns zero if bad stackid.
13891 */
13892 int
13893 ip_cgtp_filter_is_registered(netstackid_t stackid)
13894 {
13895 netstack_t *ns;
13896 ip_stack_t *ipst;
13897 int ret;
13898
13899 ns = netstack_find_by_stackid(stackid);
13900 if (ns == NULL)
13901 return (0);
13902 ipst = ns->netstack_ip;
13903 ASSERT(ipst != NULL);
13904
13905 if (ipst->ips_ip_cgtp_filter_ops != NULL)
13906 ret = 1;
13907 else
13908 ret = 0;
13909
13910 netstack_rele(ns);
13911 return (ret);
13912 }
13913
13914 static int
13915 ip_squeue_switch(int val)
13916 {
13917 int rval;
13918
13919 switch (val) {
13920 case IP_SQUEUE_ENTER_NODRAIN:
13921 rval = SQ_NODRAIN;
13922 break;
13923 case IP_SQUEUE_ENTER:
13924 rval = SQ_PROCESS;
13925 break;
13926 case IP_SQUEUE_FILL:
13927 default:
13928 rval = SQ_FILL;
13929 break;
13930 }
13931 return (rval);
13932 }
13933
13934 static void *
13935 ip_kstat2_init(netstackid_t stackid, ip_stat_t *ip_statisticsp)
13936 {
13937 kstat_t *ksp;
13938
13939 ip_stat_t template = {
13940 { "ip_udp_fannorm", KSTAT_DATA_UINT64 },
13941 { "ip_udp_fanmb", KSTAT_DATA_UINT64 },
13942 { "ip_recv_pullup", KSTAT_DATA_UINT64 },
13943 { "ip_db_ref", KSTAT_DATA_UINT64 },
13944 { "ip_notaligned", KSTAT_DATA_UINT64 },
13945 { "ip_multimblk", KSTAT_DATA_UINT64 },
13946 { "ip_opt", KSTAT_DATA_UINT64 },
13947 { "ipsec_proto_ahesp", KSTAT_DATA_UINT64 },
13948 { "ip_conn_flputbq", KSTAT_DATA_UINT64 },
13949 { "ip_conn_walk_drain", KSTAT_DATA_UINT64 },
13950 { "ip_out_sw_cksum", KSTAT_DATA_UINT64 },
13951 { "ip_out_sw_cksum_bytes", KSTAT_DATA_UINT64 },
13952 { "ip_in_sw_cksum", KSTAT_DATA_UINT64 },
13953 { "ip_ire_reclaim_calls", KSTAT_DATA_UINT64 },
13954 { "ip_ire_reclaim_deleted", KSTAT_DATA_UINT64 },
13955 { "ip_nce_reclaim_calls", KSTAT_DATA_UINT64 },
13956 { "ip_nce_reclaim_deleted", KSTAT_DATA_UINT64 },
13957 { "ip_dce_reclaim_calls", KSTAT_DATA_UINT64 },
13958 { "ip_dce_reclaim_deleted", KSTAT_DATA_UINT64 },
13959 { "ip_tcp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13960 { "ip_tcp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13961 { "ip_tcp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13962 { "ip_udp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 },
13963 { "ip_udp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 },
13964 { "ip_udp_in_sw_cksum_err", KSTAT_DATA_UINT64 },
13965 { "conn_in_recvdstaddr", KSTAT_DATA_UINT64 },
13966 { "conn_in_recvopts", KSTAT_DATA_UINT64 },
13967 { "conn_in_recvif", KSTAT_DATA_UINT64 },
13968 { "conn_in_recvslla", KSTAT_DATA_UINT64 },
13969 { "conn_in_recvucred", KSTAT_DATA_UINT64 },
13970 { "conn_in_recvttl", KSTAT_DATA_UINT64 },
13971 { "conn_in_recvhopopts", KSTAT_DATA_UINT64 },
13972 { "conn_in_recvhoplimit", KSTAT_DATA_UINT64 },
13973 { "conn_in_recvdstopts", KSTAT_DATA_UINT64 },
13974 { "conn_in_recvrthdrdstopts", KSTAT_DATA_UINT64 },
13975 { "conn_in_recvrthdr", KSTAT_DATA_UINT64 },
13976 { "conn_in_recvpktinfo", KSTAT_DATA_UINT64 },
13977 { "conn_in_recvtclass", KSTAT_DATA_UINT64 },
13978 { "conn_in_timestamp", KSTAT_DATA_UINT64 },
13979 };
13980
13981 ksp = kstat_create_netstack("ip", 0, "ipstat", "net",
13982 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
13983 KSTAT_FLAG_VIRTUAL, stackid);
13984
13985 if (ksp == NULL)
13986 return (NULL);
13987
13988 bcopy(&template, ip_statisticsp, sizeof (template));
13989 ksp->ks_data = (void *)ip_statisticsp;
13990 ksp->ks_private = (void *)(uintptr_t)stackid;
13991
13992 kstat_install(ksp);
13993 return (ksp);
13994 }
13995
13996 static void
13997 ip_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
13998 {
13999 if (ksp != NULL) {
14000 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14001 kstat_delete_netstack(ksp, stackid);
14002 }
14003 }
14004
14005 static void *
14006 ip_kstat_init(netstackid_t stackid, ip_stack_t *ipst)
14007 {
14008 kstat_t *ksp;
14009
14010 ip_named_kstat_t template = {
14011 { "forwarding", KSTAT_DATA_UINT32, 0 },
14012 { "defaultTTL", KSTAT_DATA_UINT32, 0 },
14013 { "inReceives", KSTAT_DATA_UINT64, 0 },
14014 { "inHdrErrors", KSTAT_DATA_UINT32, 0 },
14015 { "inAddrErrors", KSTAT_DATA_UINT32, 0 },
14016 { "forwDatagrams", KSTAT_DATA_UINT64, 0 },
14017 { "inUnknownProtos", KSTAT_DATA_UINT32, 0 },
14018 { "inDiscards", KSTAT_DATA_UINT32, 0 },
14019 { "inDelivers", KSTAT_DATA_UINT64, 0 },
14020 { "outRequests", KSTAT_DATA_UINT64, 0 },
14021 { "outDiscards", KSTAT_DATA_UINT32, 0 },
14022 { "outNoRoutes", KSTAT_DATA_UINT32, 0 },
14023 { "reasmTimeout", KSTAT_DATA_UINT32, 0 },
14024 { "reasmReqds", KSTAT_DATA_UINT32, 0 },
14025 { "reasmOKs", KSTAT_DATA_UINT32, 0 },
14026 { "reasmFails", KSTAT_DATA_UINT32, 0 },
14027 { "fragOKs", KSTAT_DATA_UINT32, 0 },
14028 { "fragFails", KSTAT_DATA_UINT32, 0 },
14029 { "fragCreates", KSTAT_DATA_UINT32, 0 },
14030 { "addrEntrySize", KSTAT_DATA_INT32, 0 },
14031 { "routeEntrySize", KSTAT_DATA_INT32, 0 },
14032 { "netToMediaEntrySize", KSTAT_DATA_INT32, 0 },
14033 { "routingDiscards", KSTAT_DATA_UINT32, 0 },
14034 { "inErrs", KSTAT_DATA_UINT32, 0 },
14035 { "noPorts", KSTAT_DATA_UINT32, 0 },
14036 { "inCksumErrs", KSTAT_DATA_UINT32, 0 },
14037 { "reasmDuplicates", KSTAT_DATA_UINT32, 0 },
14038 { "reasmPartDups", KSTAT_DATA_UINT32, 0 },
14039 { "forwProhibits", KSTAT_DATA_UINT32, 0 },
14040 { "udpInCksumErrs", KSTAT_DATA_UINT32, 0 },
14041 { "udpInOverflows", KSTAT_DATA_UINT32, 0 },
14042 { "rawipInOverflows", KSTAT_DATA_UINT32, 0 },
14043 { "ipsecInSucceeded", KSTAT_DATA_UINT32, 0 },
14044 { "ipsecInFailed", KSTAT_DATA_INT32, 0 },
14045 { "memberEntrySize", KSTAT_DATA_INT32, 0 },
14046 { "inIPv6", KSTAT_DATA_UINT32, 0 },
14047 { "outIPv6", KSTAT_DATA_UINT32, 0 },
14048 { "outSwitchIPv6", KSTAT_DATA_UINT32, 0 },
14049 };
14050
14051 ksp = kstat_create_netstack("ip", 0, "ip", "mib2", KSTAT_TYPE_NAMED,
14052 NUM_OF_FIELDS(ip_named_kstat_t), 0, stackid);
14053 if (ksp == NULL || ksp->ks_data == NULL)
14054 return (NULL);
14055
14056 template.forwarding.value.ui32 = WE_ARE_FORWARDING(ipst) ? 1:2;
14057 template.defaultTTL.value.ui32 = (uint32_t)ipst->ips_ip_def_ttl;
14058 template.reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14059 template.addrEntrySize.value.i32 = sizeof (mib2_ipAddrEntry_t);
14060 template.routeEntrySize.value.i32 = sizeof (mib2_ipRouteEntry_t);
14061
14062 template.netToMediaEntrySize.value.i32 =
14063 sizeof (mib2_ipNetToMediaEntry_t);
14064
14065 template.memberEntrySize.value.i32 = sizeof (ipv6_member_t);
14066
14067 bcopy(&template, ksp->ks_data, sizeof (template));
14068 ksp->ks_update = ip_kstat_update;
14069 ksp->ks_private = (void *)(uintptr_t)stackid;
14070
14071 kstat_install(ksp);
14072 return (ksp);
14073 }
14074
14075 static void
14076 ip_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14077 {
14078 if (ksp != NULL) {
14079 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14080 kstat_delete_netstack(ksp, stackid);
14081 }
14082 }
14083
14084 static int
14085 ip_kstat_update(kstat_t *kp, int rw)
14086 {
14087 ip_named_kstat_t *ipkp;
14088 mib2_ipIfStatsEntry_t ipmib;
14089 ill_walk_context_t ctx;
14090 ill_t *ill;
14091 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14092 netstack_t *ns;
14093 ip_stack_t *ipst;
14094
14095 if (kp == NULL || kp->ks_data == NULL)
14096 return (EIO);
14097
14098 if (rw == KSTAT_WRITE)
14099 return (EACCES);
14100
14101 ns = netstack_find_by_stackid(stackid);
14102 if (ns == NULL)
14103 return (-1);
14104 ipst = ns->netstack_ip;
14105 if (ipst == NULL) {
14106 netstack_rele(ns);
14107 return (-1);
14108 }
14109 ipkp = (ip_named_kstat_t *)kp->ks_data;
14110
14111 bcopy(&ipst->ips_ip_mib, &ipmib, sizeof (ipmib));
14112 rw_enter(&ipst->ips_ill_g_lock, RW_READER);
14113 ill = ILL_START_WALK_V4(&ctx, ipst);
14114 for (; ill != NULL; ill = ill_next(&ctx, ill))
14115 ip_mib2_add_ip_stats(&ipmib, ill->ill_ip_mib);
14116 rw_exit(&ipst->ips_ill_g_lock);
14117
14118 ipkp->forwarding.value.ui32 = ipmib.ipIfStatsForwarding;
14119 ipkp->defaultTTL.value.ui32 = ipmib.ipIfStatsDefaultTTL;
14120 ipkp->inReceives.value.ui64 = ipmib.ipIfStatsHCInReceives;
14121 ipkp->inHdrErrors.value.ui32 = ipmib.ipIfStatsInHdrErrors;
14122 ipkp->inAddrErrors.value.ui32 = ipmib.ipIfStatsInAddrErrors;
14123 ipkp->forwDatagrams.value.ui64 = ipmib.ipIfStatsHCOutForwDatagrams;
14124 ipkp->inUnknownProtos.value.ui32 = ipmib.ipIfStatsInUnknownProtos;
14125 ipkp->inDiscards.value.ui32 = ipmib.ipIfStatsInDiscards;
14126 ipkp->inDelivers.value.ui64 = ipmib.ipIfStatsHCInDelivers;
14127 ipkp->outRequests.value.ui64 = ipmib.ipIfStatsHCOutRequests;
14128 ipkp->outDiscards.value.ui32 = ipmib.ipIfStatsOutDiscards;
14129 ipkp->outNoRoutes.value.ui32 = ipmib.ipIfStatsOutNoRoutes;
14130 ipkp->reasmTimeout.value.ui32 = ipst->ips_ip_reassembly_timeout;
14131 ipkp->reasmReqds.value.ui32 = ipmib.ipIfStatsReasmReqds;
14132 ipkp->reasmOKs.value.ui32 = ipmib.ipIfStatsReasmOKs;
14133 ipkp->reasmFails.value.ui32 = ipmib.ipIfStatsReasmFails;
14134 ipkp->fragOKs.value.ui32 = ipmib.ipIfStatsOutFragOKs;
14135 ipkp->fragFails.value.ui32 = ipmib.ipIfStatsOutFragFails;
14136 ipkp->fragCreates.value.ui32 = ipmib.ipIfStatsOutFragCreates;
14137
14138 ipkp->routingDiscards.value.ui32 = 0;
14139 ipkp->inErrs.value.ui32 = ipmib.tcpIfStatsInErrs;
14140 ipkp->noPorts.value.ui32 = ipmib.udpIfStatsNoPorts;
14141 ipkp->inCksumErrs.value.ui32 = ipmib.ipIfStatsInCksumErrs;
14142 ipkp->reasmDuplicates.value.ui32 = ipmib.ipIfStatsReasmDuplicates;
14143 ipkp->reasmPartDups.value.ui32 = ipmib.ipIfStatsReasmPartDups;
14144 ipkp->forwProhibits.value.ui32 = ipmib.ipIfStatsForwProhibits;
14145 ipkp->udpInCksumErrs.value.ui32 = ipmib.udpIfStatsInCksumErrs;
14146 ipkp->udpInOverflows.value.ui32 = ipmib.udpIfStatsInOverflows;
14147 ipkp->rawipInOverflows.value.ui32 = ipmib.rawipIfStatsInOverflows;
14148 ipkp->ipsecInSucceeded.value.ui32 = ipmib.ipsecIfStatsInSucceeded;
14149 ipkp->ipsecInFailed.value.i32 = ipmib.ipsecIfStatsInFailed;
14150
14151 ipkp->inIPv6.value.ui32 = ipmib.ipIfStatsInWrongIPVersion;
14152 ipkp->outIPv6.value.ui32 = ipmib.ipIfStatsOutWrongIPVersion;
14153 ipkp->outSwitchIPv6.value.ui32 = ipmib.ipIfStatsOutSwitchIPVersion;
14154
14155 netstack_rele(ns);
14156
14157 return (0);
14158 }
14159
14160 static void *
14161 icmp_kstat_init(netstackid_t stackid)
14162 {
14163 kstat_t *ksp;
14164
14165 icmp_named_kstat_t template = {
14166 { "inMsgs", KSTAT_DATA_UINT32 },
14167 { "inErrors", KSTAT_DATA_UINT32 },
14168 { "inDestUnreachs", KSTAT_DATA_UINT32 },
14169 { "inTimeExcds", KSTAT_DATA_UINT32 },
14170 { "inParmProbs", KSTAT_DATA_UINT32 },
14171 { "inSrcQuenchs", KSTAT_DATA_UINT32 },
14172 { "inRedirects", KSTAT_DATA_UINT32 },
14173 { "inEchos", KSTAT_DATA_UINT32 },
14174 { "inEchoReps", KSTAT_DATA_UINT32 },
14175 { "inTimestamps", KSTAT_DATA_UINT32 },
14176 { "inTimestampReps", KSTAT_DATA_UINT32 },
14177 { "inAddrMasks", KSTAT_DATA_UINT32 },
14178 { "inAddrMaskReps", KSTAT_DATA_UINT32 },
14179 { "outMsgs", KSTAT_DATA_UINT32 },
14180 { "outErrors", KSTAT_DATA_UINT32 },
14181 { "outDestUnreachs", KSTAT_DATA_UINT32 },
14182 { "outTimeExcds", KSTAT_DATA_UINT32 },
14183 { "outParmProbs", KSTAT_DATA_UINT32 },
14184 { "outSrcQuenchs", KSTAT_DATA_UINT32 },
14185 { "outRedirects", KSTAT_DATA_UINT32 },
14186 { "outEchos", KSTAT_DATA_UINT32 },
14187 { "outEchoReps", KSTAT_DATA_UINT32 },
14188 { "outTimestamps", KSTAT_DATA_UINT32 },
14189 { "outTimestampReps", KSTAT_DATA_UINT32 },
14190 { "outAddrMasks", KSTAT_DATA_UINT32 },
14191 { "outAddrMaskReps", KSTAT_DATA_UINT32 },
14192 { "inChksumErrs", KSTAT_DATA_UINT32 },
14193 { "inUnknowns", KSTAT_DATA_UINT32 },
14194 { "inFragNeeded", KSTAT_DATA_UINT32 },
14195 { "outFragNeeded", KSTAT_DATA_UINT32 },
14196 { "outDrops", KSTAT_DATA_UINT32 },
14197 { "inOverFlows", KSTAT_DATA_UINT32 },
14198 { "inBadRedirects", KSTAT_DATA_UINT32 },
14199 };
14200
14201 ksp = kstat_create_netstack("ip", 0, "icmp", "mib2", KSTAT_TYPE_NAMED,
14202 NUM_OF_FIELDS(icmp_named_kstat_t), 0, stackid);
14203 if (ksp == NULL || ksp->ks_data == NULL)
14204 return (NULL);
14205
14206 bcopy(&template, ksp->ks_data, sizeof (template));
14207
14208 ksp->ks_update = icmp_kstat_update;
14209 ksp->ks_private = (void *)(uintptr_t)stackid;
14210
14211 kstat_install(ksp);
14212 return (ksp);
14213 }
14214
14215 static void
14216 icmp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
14217 {
14218 if (ksp != NULL) {
14219 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
14220 kstat_delete_netstack(ksp, stackid);
14221 }
14222 }
14223
14224 static int
14225 icmp_kstat_update(kstat_t *kp, int rw)
14226 {
14227 icmp_named_kstat_t *icmpkp;
14228 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
14229 netstack_t *ns;
14230 ip_stack_t *ipst;
14231
14232 if ((kp == NULL) || (kp->ks_data == NULL))
14233 return (EIO);
14234
14235 if (rw == KSTAT_WRITE)
14236 return (EACCES);
14237
14238 ns = netstack_find_by_stackid(stackid);
14239 if (ns == NULL)
14240 return (-1);
14241 ipst = ns->netstack_ip;
14242 if (ipst == NULL) {
14243 netstack_rele(ns);
14244 return (-1);
14245 }
14246 icmpkp = (icmp_named_kstat_t *)kp->ks_data;
14247
14248 icmpkp->inMsgs.value.ui32 = ipst->ips_icmp_mib.icmpInMsgs;
14249 icmpkp->inErrors.value.ui32 = ipst->ips_icmp_mib.icmpInErrors;
14250 icmpkp->inDestUnreachs.value.ui32 =
14251 ipst->ips_icmp_mib.icmpInDestUnreachs;
14252 icmpkp->inTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpInTimeExcds;
14253 icmpkp->inParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpInParmProbs;
14254 icmpkp->inSrcQuenchs.value.ui32 = ipst->ips_icmp_mib.icmpInSrcQuenchs;
14255 icmpkp->inRedirects.value.ui32 = ipst->ips_icmp_mib.icmpInRedirects;
14256 icmpkp->inEchos.value.ui32 = ipst->ips_icmp_mib.icmpInEchos;
14257 icmpkp->inEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpInEchoReps;
14258 icmpkp->inTimestamps.value.ui32 = ipst->ips_icmp_mib.icmpInTimestamps;
14259 icmpkp->inTimestampReps.value.ui32 =
14260 ipst->ips_icmp_mib.icmpInTimestampReps;
14261 icmpkp->inAddrMasks.value.ui32 = ipst->ips_icmp_mib.icmpInAddrMasks;
14262 icmpkp->inAddrMaskReps.value.ui32 =
14263 ipst->ips_icmp_mib.icmpInAddrMaskReps;
14264 icmpkp->outMsgs.value.ui32 = ipst->ips_icmp_mib.icmpOutMsgs;
14265 icmpkp->outErrors.value.ui32 = ipst->ips_icmp_mib.icmpOutErrors;
14266 icmpkp->outDestUnreachs.value.ui32 =
14267 ipst->ips_icmp_mib.icmpOutDestUnreachs;
14268 icmpkp->outTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpOutTimeExcds;
14269 icmpkp->outParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpOutParmProbs;
14270 icmpkp->outSrcQuenchs.value.ui32 =
14271 ipst->ips_icmp_mib.icmpOutSrcQuenchs;
14272 icmpkp->outRedirects.value.ui32 = ipst->ips_icmp_mib.icmpOutRedirects;
14273 icmpkp->outEchos.value.ui32 = ipst->ips_icmp_mib.icmpOutEchos;
14274 icmpkp->outEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpOutEchoReps;
14275 icmpkp->outTimestamps.value.ui32 =
14276 ipst->ips_icmp_mib.icmpOutTimestamps;
14277 icmpkp->outTimestampReps.value.ui32 =
14278 ipst->ips_icmp_mib.icmpOutTimestampReps;
14279 icmpkp->outAddrMasks.value.ui32 =
14280 ipst->ips_icmp_mib.icmpOutAddrMasks;
14281 icmpkp->outAddrMaskReps.value.ui32 =
14282 ipst->ips_icmp_mib.icmpOutAddrMaskReps;
14283 icmpkp->inCksumErrs.value.ui32 = ipst->ips_icmp_mib.icmpInCksumErrs;
14284 icmpkp->inUnknowns.value.ui32 = ipst->ips_icmp_mib.icmpInUnknowns;
14285 icmpkp->inFragNeeded.value.ui32 = ipst->ips_icmp_mib.icmpInFragNeeded;
14286 icmpkp->outFragNeeded.value.ui32 =
14287 ipst->ips_icmp_mib.icmpOutFragNeeded;
14288 icmpkp->outDrops.value.ui32 = ipst->ips_icmp_mib.icmpOutDrops;
14289 icmpkp->inOverflows.value.ui32 = ipst->ips_icmp_mib.icmpInOverflows;
14290 icmpkp->inBadRedirects.value.ui32 =
14291 ipst->ips_icmp_mib.icmpInBadRedirects;
14292
14293 netstack_rele(ns);
14294 return (0);
14295 }
14296
14297 /*
14298 * This is the fanout function for raw socket opened for SCTP. Note
14299 * that it is called after SCTP checks that there is no socket which
14300 * wants a packet. Then before SCTP handles this out of the blue packet,
14301 * this function is called to see if there is any raw socket for SCTP.
14302 * If there is and it is bound to the correct address, the packet will
14303 * be sent to that socket. Note that only one raw socket can be bound to
14304 * a port. This is assured in ipcl_sctp_hash_insert();
14305 */
14306 void
14307 ip_fanout_sctp_raw(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, uint32_t ports,
14308 ip_recv_attr_t *ira)
14309 {
14310 conn_t *connp;
14311 queue_t *rq;
14312 boolean_t secure;
14313 ill_t *ill = ira->ira_ill;
14314 ip_stack_t *ipst = ill->ill_ipst;
14315 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
14316 sctp_stack_t *sctps = ipst->ips_netstack->netstack_sctp;
14317 iaflags_t iraflags = ira->ira_flags;
14318 ill_t *rill = ira->ira_rill;
14319
14320 secure = iraflags & IRAF_IPSEC_SECURE;
14321
14322 connp = ipcl_classify_raw(mp, IPPROTO_SCTP, ports, ipha, ip6h,
14323 ira, ipst);
14324 if (connp == NULL) {
14325 /*
14326 * Although raw sctp is not summed, OOB chunks must be.
14327 * Drop the packet here if the sctp checksum failed.
14328 */
14329 if (iraflags & IRAF_SCTP_CSUM_ERR) {
14330 SCTPS_BUMP_MIB(sctps, sctpChecksumError);
14331 freemsg(mp);
14332 return;
14333 }
14334 ira->ira_ill = ira->ira_rill = NULL;
14335 sctp_ootb_input(mp, ira, ipst);
14336 ira->ira_ill = ill;
14337 ira->ira_rill = rill;
14338 return;
14339 }
14340 rq = connp->conn_rq;
14341 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) {
14342 CONN_DEC_REF(connp);
14343 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows);
14344 freemsg(mp);
14345 return;
14346 }
14347 if (((iraflags & IRAF_IS_IPV4) ?
14348 CONN_INBOUND_POLICY_PRESENT(connp, ipss) :
14349 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) ||
14350 secure) {
14351 mp = ipsec_check_inbound_policy(mp, connp, ipha,
14352 ip6h, ira);
14353 if (mp == NULL) {
14354 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards);
14355 /* Note that mp is NULL */
14356 ip_drop_input("ipIfStatsInDiscards", mp, ill);
14357 CONN_DEC_REF(connp);
14358 return;
14359 }
14360 }
14361
14362 if (iraflags & IRAF_ICMP_ERROR) {
14363 (connp->conn_recvicmp)(connp, mp, NULL, ira);
14364 } else {
14365 ill_t *rill = ira->ira_rill;
14366
14367 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers);
14368 /* This is the SOCK_RAW, IPPROTO_SCTP case. */
14369 ira->ira_ill = ira->ira_rill = NULL;
14370 (connp->conn_recv)(connp, mp, NULL, ira);
14371 ira->ira_ill = ill;
14372 ira->ira_rill = rill;
14373 }
14374 CONN_DEC_REF(connp);
14375 }
14376
14377 /*
14378 * Free a packet that has the link-layer dl_unitdata_req_t or fast-path
14379 * header before the ip payload.
14380 */
14381 static void
14382 ip_xmit_flowctl_drop(ill_t *ill, mblk_t *mp, boolean_t is_fp_mp, int fp_mp_len)
14383 {
14384 int len = (mp->b_wptr - mp->b_rptr);
14385 mblk_t *ip_mp;
14386
14387 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14388 if (is_fp_mp || len != fp_mp_len) {
14389 if (len > fp_mp_len) {
14390 /*
14391 * fastpath header and ip header in the first mblk
14392 */
14393 mp->b_rptr += fp_mp_len;
14394 } else {
14395 /*
14396 * ip_xmit_attach_llhdr had to prepend an mblk to
14397 * attach the fastpath header before ip header.
14398 */
14399 ip_mp = mp->b_cont;
14400 freeb(mp);
14401 mp = ip_mp;
14402 mp->b_rptr += (fp_mp_len - len);
14403 }
14404 } else {
14405 ip_mp = mp->b_cont;
14406 freeb(mp);
14407 mp = ip_mp;
14408 }
14409 ip_drop_output("ipIfStatsOutDiscards - flow ctl", mp, ill);
14410 freemsg(mp);
14411 }
14412
14413 /*
14414 * Normal post fragmentation function.
14415 *
14416 * Send a packet using the passed in nce. This handles both IPv4 and IPv6
14417 * using the same state machine.
14418 *
14419 * We return an error on failure. In particular we return EWOULDBLOCK
14420 * when the driver flow controls. In that case this ensures that ip_wsrv runs
14421 * (currently by canputnext failure resulting in backenabling from GLD.)
14422 * This allows the callers of conn_ip_output() to use EWOULDBLOCK as an
14423 * indication that they can flow control until ip_wsrv() tells then to restart.
14424 *
14425 * If the nce passed by caller is incomplete, this function
14426 * queues the packet and if necessary, sends ARP request and bails.
14427 * If the Neighbor Cache passed is fully resolved, we simply prepend
14428 * the link-layer header to the packet, do ipsec hw acceleration
14429 * work if necessary, and send the packet out on the wire.
14430 */
14431 /* ARGSUSED6 */
14432 int
14433 ip_xmit(mblk_t *mp, nce_t *nce, iaflags_t ixaflags, uint_t pkt_len,
14434 uint32_t xmit_hint, zoneid_t szone, zoneid_t nolzid, uintptr_t *ixacookie)
14435 {
14436 queue_t *wq;
14437 ill_t *ill = nce->nce_ill;
14438 ip_stack_t *ipst = ill->ill_ipst;
14439 uint64_t delta;
14440 boolean_t isv6 = ill->ill_isv6;
14441 boolean_t fp_mp;
14442 ncec_t *ncec = nce->nce_common;
14443 int64_t now = LBOLT_FASTPATH64;
14444 boolean_t is_probe;
14445
14446 DTRACE_PROBE1(ip__xmit, nce_t *, nce);
14447
14448 ASSERT(mp != NULL);
14449 ASSERT(mp->b_datap->db_type == M_DATA);
14450 ASSERT(pkt_len == msgdsize(mp));
14451
14452 /*
14453 * If we have already been here and are coming back after ARP/ND.
14454 * the IXAF_NO_TRACE flag is set. We skip FW_HOOKS, DTRACE and ipobs
14455 * in that case since they have seen the packet when it came here
14456 * the first time.
14457 */
14458 if (ixaflags & IXAF_NO_TRACE)
14459 goto sendit;
14460
14461 if (ixaflags & IXAF_IS_IPV4) {
14462 ipha_t *ipha = (ipha_t *)mp->b_rptr;
14463
14464 ASSERT(!isv6);
14465 ASSERT(pkt_len == ntohs(((ipha_t *)mp->b_rptr)->ipha_length));
14466 if (HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) &&
14467 !(ixaflags & IXAF_NO_PFHOOK)) {
14468 int error;
14469
14470 FW_HOOKS(ipst->ips_ip4_physical_out_event,
14471 ipst->ips_ipv4firewall_physical_out,
14472 NULL, ill, ipha, mp, mp, 0, ipst, error);
14473 DTRACE_PROBE1(ip4__physical__out__end,
14474 mblk_t *, mp);
14475 if (mp == NULL)
14476 return (error);
14477
14478 /* The length could have changed */
14479 pkt_len = msgdsize(mp);
14480 }
14481 if (ipst->ips_ip4_observe.he_interested) {
14482 /*
14483 * Note that for TX the zoneid is the sending
14484 * zone, whether or not MLP is in play.
14485 * Since the szone argument is the IP zoneid (i.e.,
14486 * zero for exclusive-IP zones) and ipobs wants
14487 * the system zoneid, we map it here.
14488 */
14489 szone = IP_REAL_ZONEID(szone, ipst);
14490
14491 /*
14492 * On the outbound path the destination zone will be
14493 * unknown as we're sending this packet out on the
14494 * wire.
14495 */
14496 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14497 ill, ipst);
14498 }
14499 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14500 void_ip_t *, ipha, __dtrace_ipsr_ill_t *, ill,
14501 ipha_t *, ipha, ip6_t *, NULL, int, 0);
14502 } else {
14503 ip6_t *ip6h = (ip6_t *)mp->b_rptr;
14504
14505 ASSERT(isv6);
14506 ASSERT(pkt_len ==
14507 ntohs(((ip6_t *)mp->b_rptr)->ip6_plen) + IPV6_HDR_LEN);
14508 if (HOOKS6_INTERESTED_PHYSICAL_OUT(ipst) &&
14509 !(ixaflags & IXAF_NO_PFHOOK)) {
14510 int error;
14511
14512 FW_HOOKS6(ipst->ips_ip6_physical_out_event,
14513 ipst->ips_ipv6firewall_physical_out,
14514 NULL, ill, ip6h, mp, mp, 0, ipst, error);
14515 DTRACE_PROBE1(ip6__physical__out__end,
14516 mblk_t *, mp);
14517 if (mp == NULL)
14518 return (error);
14519
14520 /* The length could have changed */
14521 pkt_len = msgdsize(mp);
14522 }
14523 if (ipst->ips_ip6_observe.he_interested) {
14524 /* See above */
14525 szone = IP_REAL_ZONEID(szone, ipst);
14526
14527 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES,
14528 ill, ipst);
14529 }
14530 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
14531 void_ip_t *, ip6h, __dtrace_ipsr_ill_t *, ill,
14532 ipha_t *, NULL, ip6_t *, ip6h, int, 0);
14533 }
14534
14535 sendit:
14536 /*
14537 * We check the state without a lock because the state can never
14538 * move "backwards" to initial or incomplete.
14539 */
14540 switch (ncec->ncec_state) {
14541 case ND_REACHABLE:
14542 case ND_STALE:
14543 case ND_DELAY:
14544 case ND_PROBE:
14545 mp = ip_xmit_attach_llhdr(mp, nce);
14546 if (mp == NULL) {
14547 /*
14548 * ip_xmit_attach_llhdr has increased
14549 * ipIfStatsOutDiscards and called ip_drop_output()
14550 */
14551 return (ENOBUFS);
14552 }
14553 /*
14554 * check if nce_fastpath completed and we tagged on a
14555 * copy of nce_fp_mp in ip_xmit_attach_llhdr().
14556 */
14557 fp_mp = (mp->b_datap->db_type == M_DATA);
14558
14559 if (fp_mp &&
14560 (ill->ill_capabilities & ILL_CAPAB_DLD_DIRECT)) {
14561 ill_dld_direct_t *idd;
14562
14563 idd = &ill->ill_dld_capab->idc_direct;
14564 /*
14565 * Send the packet directly to DLD, where it
14566 * may be queued depending on the availability
14567 * of transmit resources at the media layer.
14568 * Return value should be taken into
14569 * account and flow control the TCP.
14570 */
14571 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14572 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14573 pkt_len);
14574
14575 if (ixaflags & IXAF_NO_DEV_FLOW_CTL) {
14576 (void) idd->idd_tx_df(idd->idd_tx_dh, mp,
14577 (uintptr_t)xmit_hint, IP_DROP_ON_NO_DESC);
14578 } else {
14579 uintptr_t cookie;
14580
14581 if ((cookie = idd->idd_tx_df(idd->idd_tx_dh,
14582 mp, (uintptr_t)xmit_hint, 0)) != 0) {
14583 if (ixacookie != NULL)
14584 *ixacookie = cookie;
14585 return (EWOULDBLOCK);
14586 }
14587 }
14588 } else {
14589 wq = ill->ill_wq;
14590
14591 if (!(ixaflags & IXAF_NO_DEV_FLOW_CTL) &&
14592 !canputnext(wq)) {
14593 if (ixacookie != NULL)
14594 *ixacookie = 0;
14595 ip_xmit_flowctl_drop(ill, mp, fp_mp,
14596 nce->nce_fp_mp != NULL ?
14597 MBLKL(nce->nce_fp_mp) : 0);
14598 return (EWOULDBLOCK);
14599 }
14600 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
14601 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
14602 pkt_len);
14603 putnext(wq, mp);
14604 }
14605
14606 /*
14607 * The rest of this function implements Neighbor Unreachability
14608 * detection. Determine if the ncec is eligible for NUD.
14609 */
14610 if (ncec->ncec_flags & NCE_F_NONUD)
14611 return (0);
14612
14613 ASSERT(ncec->ncec_state != ND_INCOMPLETE);
14614
14615 /*
14616 * Check for upper layer advice
14617 */
14618 if (ixaflags & IXAF_REACH_CONF) {
14619 timeout_id_t tid;
14620
14621 /*
14622 * It should be o.k. to check the state without
14623 * a lock here, at most we lose an advice.
14624 */
14625 ncec->ncec_last = TICK_TO_MSEC(now);
14626 if (ncec->ncec_state != ND_REACHABLE) {
14627 mutex_enter(&ncec->ncec_lock);
14628 ncec->ncec_state = ND_REACHABLE;
14629 tid = ncec->ncec_timeout_id;
14630 ncec->ncec_timeout_id = 0;
14631 mutex_exit(&ncec->ncec_lock);
14632 (void) untimeout(tid);
14633 if (ip_debug > 2) {
14634 /* ip1dbg */
14635 pr_addr_dbg("ip_xmit: state"
14636 " for %s changed to"
14637 " REACHABLE\n", AF_INET6,
14638 &ncec->ncec_addr);
14639 }
14640 }
14641 return (0);
14642 }
14643
14644 delta = TICK_TO_MSEC(now) - ncec->ncec_last;
14645 ip1dbg(("ip_xmit: delta = %" PRId64
14646 " ill_reachable_time = %d \n", delta,
14647 ill->ill_reachable_time));
14648 if (delta > (uint64_t)ill->ill_reachable_time) {
14649 mutex_enter(&ncec->ncec_lock);
14650 switch (ncec->ncec_state) {
14651 case ND_REACHABLE:
14652 ASSERT((ncec->ncec_flags & NCE_F_NONUD) == 0);
14653 /* FALLTHROUGH */
14654 case ND_STALE:
14655 /*
14656 * ND_REACHABLE is identical to
14657 * ND_STALE in this specific case. If
14658 * reachable time has expired for this
14659 * neighbor (delta is greater than
14660 * reachable time), conceptually, the
14661 * neighbor cache is no longer in
14662 * REACHABLE state, but already in
14663 * STALE state. So the correct
14664 * transition here is to ND_DELAY.
14665 */
14666 ncec->ncec_state = ND_DELAY;
14667 mutex_exit(&ncec->ncec_lock);
14668 nce_restart_timer(ncec,
14669 ipst->ips_delay_first_probe_time);
14670 if (ip_debug > 3) {
14671 /* ip2dbg */
14672 pr_addr_dbg("ip_xmit: state"
14673 " for %s changed to"
14674 " DELAY\n", AF_INET6,
14675 &ncec->ncec_addr);
14676 }
14677 break;
14678 case ND_DELAY:
14679 case ND_PROBE:
14680 mutex_exit(&ncec->ncec_lock);
14681 /* Timers have already started */
14682 break;
14683 case ND_UNREACHABLE:
14684 /*
14685 * nce_timer has detected that this ncec
14686 * is unreachable and initiated deleting
14687 * this ncec.
14688 * This is a harmless race where we found the
14689 * ncec before it was deleted and have
14690 * just sent out a packet using this
14691 * unreachable ncec.
14692 */
14693 mutex_exit(&ncec->ncec_lock);
14694 break;
14695 default:
14696 ASSERT(0);
14697 mutex_exit(&ncec->ncec_lock);
14698 }
14699 }
14700 return (0);
14701
14702 case ND_INCOMPLETE:
14703 /*
14704 * the state could have changed since we didn't hold the lock.
14705 * Re-verify state under lock.
14706 */
14707 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14708 mutex_enter(&ncec->ncec_lock);
14709 if (NCE_ISREACHABLE(ncec)) {
14710 mutex_exit(&ncec->ncec_lock);
14711 goto sendit;
14712 }
14713 /* queue the packet */
14714 nce_queue_mp(ncec, mp, is_probe);
14715 mutex_exit(&ncec->ncec_lock);
14716 DTRACE_PROBE2(ip__xmit__incomplete,
14717 (ncec_t *), ncec, (mblk_t *), mp);
14718 return (0);
14719
14720 case ND_INITIAL:
14721 /*
14722 * State could have changed since we didn't hold the lock, so
14723 * re-verify state.
14724 */
14725 is_probe = ipmp_packet_is_probe(mp, nce->nce_ill);
14726 mutex_enter(&ncec->ncec_lock);
14727 if (NCE_ISREACHABLE(ncec)) {
14728 mutex_exit(&ncec->ncec_lock);
14729 goto sendit;
14730 }
14731 nce_queue_mp(ncec, mp, is_probe);
14732 if (ncec->ncec_state == ND_INITIAL) {
14733 ncec->ncec_state = ND_INCOMPLETE;
14734 mutex_exit(&ncec->ncec_lock);
14735 /*
14736 * figure out the source we want to use
14737 * and resolve it.
14738 */
14739 ip_ndp_resolve(ncec);
14740 } else {
14741 mutex_exit(&ncec->ncec_lock);
14742 }
14743 return (0);
14744
14745 case ND_UNREACHABLE:
14746 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14747 ip_drop_output("ipIfStatsOutDiscards - ND_UNREACHABLE",
14748 mp, ill);
14749 freemsg(mp);
14750 return (0);
14751
14752 default:
14753 ASSERT(0);
14754 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards);
14755 ip_drop_output("ipIfStatsOutDiscards - ND_other",
14756 mp, ill);
14757 freemsg(mp);
14758 return (ENETUNREACH);
14759 }
14760 }
14761
14762 /*
14763 * Return B_TRUE if the buffers differ in length or content.
14764 * This is used for comparing extension header buffers.
14765 * Note that an extension header would be declared different
14766 * even if all that changed was the next header value in that header i.e.
14767 * what really changed is the next extension header.
14768 */
14769 boolean_t
14770 ip_cmpbuf(const void *abuf, uint_t alen, boolean_t b_valid, const void *bbuf,
14771 uint_t blen)
14772 {
14773 if (!b_valid)
14774 blen = 0;
14775
14776 if (alen != blen)
14777 return (B_TRUE);
14778 if (alen == 0)
14779 return (B_FALSE); /* Both zero length */
14780 return (bcmp(abuf, bbuf, alen));
14781 }
14782
14783 /*
14784 * Preallocate memory for ip_savebuf(). Returns B_TRUE if ok.
14785 * Return B_FALSE if memory allocation fails - don't change any state!
14786 */
14787 boolean_t
14788 ip_allocbuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14789 const void *src, uint_t srclen)
14790 {
14791 void *dst;
14792
14793 if (!src_valid)
14794 srclen = 0;
14795
14796 ASSERT(*dstlenp == 0);
14797 if (src != NULL && srclen != 0) {
14798 dst = mi_alloc(srclen, BPRI_MED);
14799 if (dst == NULL)
14800 return (B_FALSE);
14801 } else {
14802 dst = NULL;
14803 }
14804 if (*dstp != NULL)
14805 mi_free(*dstp);
14806 *dstp = dst;
14807 *dstlenp = dst == NULL ? 0 : srclen;
14808 return (B_TRUE);
14809 }
14810
14811 /*
14812 * Replace what is in *dst, *dstlen with the source.
14813 * Assumes ip_allocbuf has already been called.
14814 */
14815 void
14816 ip_savebuf(void **dstp, uint_t *dstlenp, boolean_t src_valid,
14817 const void *src, uint_t srclen)
14818 {
14819 if (!src_valid)
14820 srclen = 0;
14821
14822 ASSERT(*dstlenp == srclen);
14823 if (src != NULL && srclen != 0)
14824 bcopy(src, *dstp, srclen);
14825 }
14826
14827 /*
14828 * Free the storage pointed to by the members of an ip_pkt_t.
14829 */
14830 void
14831 ip_pkt_free(ip_pkt_t *ipp)
14832 {
14833 uint_t fields = ipp->ipp_fields;
14834
14835 if (fields & IPPF_HOPOPTS) {
14836 kmem_free(ipp->ipp_hopopts, ipp->ipp_hopoptslen);
14837 ipp->ipp_hopopts = NULL;
14838 ipp->ipp_hopoptslen = 0;
14839 }
14840 if (fields & IPPF_RTHDRDSTOPTS) {
14841 kmem_free(ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
14842 ipp->ipp_rthdrdstopts = NULL;
14843 ipp->ipp_rthdrdstoptslen = 0;
14844 }
14845 if (fields & IPPF_DSTOPTS) {
14846 kmem_free(ipp->ipp_dstopts, ipp->ipp_dstoptslen);
14847 ipp->ipp_dstopts = NULL;
14848 ipp->ipp_dstoptslen = 0;
14849 }
14850 if (fields & IPPF_RTHDR) {
14851 kmem_free(ipp->ipp_rthdr, ipp->ipp_rthdrlen);
14852 ipp->ipp_rthdr = NULL;
14853 ipp->ipp_rthdrlen = 0;
14854 }
14855 if (fields & IPPF_IPV4_OPTIONS) {
14856 kmem_free(ipp->ipp_ipv4_options, ipp->ipp_ipv4_options_len);
14857 ipp->ipp_ipv4_options = NULL;
14858 ipp->ipp_ipv4_options_len = 0;
14859 }
14860 if (fields & IPPF_LABEL_V4) {
14861 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4);
14862 ipp->ipp_label_v4 = NULL;
14863 ipp->ipp_label_len_v4 = 0;
14864 }
14865 if (fields & IPPF_LABEL_V6) {
14866 kmem_free(ipp->ipp_label_v6, ipp->ipp_label_len_v6);
14867 ipp->ipp_label_v6 = NULL;
14868 ipp->ipp_label_len_v6 = 0;
14869 }
14870 ipp->ipp_fields &= ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14871 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14872 }
14873
14874 /*
14875 * Copy from src to dst and allocate as needed.
14876 * Returns zero or ENOMEM.
14877 *
14878 * The caller must initialize dst to zero.
14879 */
14880 int
14881 ip_pkt_copy(ip_pkt_t *src, ip_pkt_t *dst, int kmflag)
14882 {
14883 uint_t fields = src->ipp_fields;
14884
14885 /* Start with fields that don't require memory allocation */
14886 dst->ipp_fields = fields &
14887 ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14888 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6);
14889
14890 dst->ipp_addr = src->ipp_addr;
14891 dst->ipp_unicast_hops = src->ipp_unicast_hops;
14892 dst->ipp_hoplimit = src->ipp_hoplimit;
14893 dst->ipp_tclass = src->ipp_tclass;
14894 dst->ipp_type_of_service = src->ipp_type_of_service;
14895
14896 if (!(fields & (IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS |
14897 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6)))
14898 return (0);
14899
14900 if (fields & IPPF_HOPOPTS) {
14901 dst->ipp_hopopts = kmem_alloc(src->ipp_hopoptslen, kmflag);
14902 if (dst->ipp_hopopts == NULL) {
14903 ip_pkt_free(dst);
14904 return (ENOMEM);
14905 }
14906 dst->ipp_fields |= IPPF_HOPOPTS;
14907 bcopy(src->ipp_hopopts, dst->ipp_hopopts,
14908 src->ipp_hopoptslen);
14909 dst->ipp_hopoptslen = src->ipp_hopoptslen;
14910 }
14911 if (fields & IPPF_RTHDRDSTOPTS) {
14912 dst->ipp_rthdrdstopts = kmem_alloc(src->ipp_rthdrdstoptslen,
14913 kmflag);
14914 if (dst->ipp_rthdrdstopts == NULL) {
14915 ip_pkt_free(dst);
14916 return (ENOMEM);
14917 }
14918 dst->ipp_fields |= IPPF_RTHDRDSTOPTS;
14919 bcopy(src->ipp_rthdrdstopts, dst->ipp_rthdrdstopts,
14920 src->ipp_rthdrdstoptslen);
14921 dst->ipp_rthdrdstoptslen = src->ipp_rthdrdstoptslen;
14922 }
14923 if (fields & IPPF_DSTOPTS) {
14924 dst->ipp_dstopts = kmem_alloc(src->ipp_dstoptslen, kmflag);
14925 if (dst->ipp_dstopts == NULL) {
14926 ip_pkt_free(dst);
14927 return (ENOMEM);
14928 }
14929 dst->ipp_fields |= IPPF_DSTOPTS;
14930 bcopy(src->ipp_dstopts, dst->ipp_dstopts,
14931 src->ipp_dstoptslen);
14932 dst->ipp_dstoptslen = src->ipp_dstoptslen;
14933 }
14934 if (fields & IPPF_RTHDR) {
14935 dst->ipp_rthdr = kmem_alloc(src->ipp_rthdrlen, kmflag);
14936 if (dst->ipp_rthdr == NULL) {
14937 ip_pkt_free(dst);
14938 return (ENOMEM);
14939 }
14940 dst->ipp_fields |= IPPF_RTHDR;
14941 bcopy(src->ipp_rthdr, dst->ipp_rthdr,
14942 src->ipp_rthdrlen);
14943 dst->ipp_rthdrlen = src->ipp_rthdrlen;
14944 }
14945 if (fields & IPPF_IPV4_OPTIONS) {
14946 dst->ipp_ipv4_options = kmem_alloc(src->ipp_ipv4_options_len,
14947 kmflag);
14948 if (dst->ipp_ipv4_options == NULL) {
14949 ip_pkt_free(dst);
14950 return (ENOMEM);
14951 }
14952 dst->ipp_fields |= IPPF_IPV4_OPTIONS;
14953 bcopy(src->ipp_ipv4_options, dst->ipp_ipv4_options,
14954 src->ipp_ipv4_options_len);
14955 dst->ipp_ipv4_options_len = src->ipp_ipv4_options_len;
14956 }
14957 if (fields & IPPF_LABEL_V4) {
14958 dst->ipp_label_v4 = kmem_alloc(src->ipp_label_len_v4, kmflag);
14959 if (dst->ipp_label_v4 == NULL) {
14960 ip_pkt_free(dst);
14961 return (ENOMEM);
14962 }
14963 dst->ipp_fields |= IPPF_LABEL_V4;
14964 bcopy(src->ipp_label_v4, dst->ipp_label_v4,
14965 src->ipp_label_len_v4);
14966 dst->ipp_label_len_v4 = src->ipp_label_len_v4;
14967 }
14968 if (fields & IPPF_LABEL_V6) {
14969 dst->ipp_label_v6 = kmem_alloc(src->ipp_label_len_v6, kmflag);
14970 if (dst->ipp_label_v6 == NULL) {
14971 ip_pkt_free(dst);
14972 return (ENOMEM);
14973 }
14974 dst->ipp_fields |= IPPF_LABEL_V6;
14975 bcopy(src->ipp_label_v6, dst->ipp_label_v6,
14976 src->ipp_label_len_v6);
14977 dst->ipp_label_len_v6 = src->ipp_label_len_v6;
14978 }
14979 if (fields & IPPF_FRAGHDR) {
14980 dst->ipp_fraghdr = kmem_alloc(src->ipp_fraghdrlen, kmflag);
14981 if (dst->ipp_fraghdr == NULL) {
14982 ip_pkt_free(dst);
14983 return (ENOMEM);
14984 }
14985 dst->ipp_fields |= IPPF_FRAGHDR;
14986 bcopy(src->ipp_fraghdr, dst->ipp_fraghdr,
14987 src->ipp_fraghdrlen);
14988 dst->ipp_fraghdrlen = src->ipp_fraghdrlen;
14989 }
14990 return (0);
14991 }
14992
14993 /*
14994 * Returns INADDR_ANY if no source route
14995 */
14996 ipaddr_t
14997 ip_pkt_source_route_v4(const ip_pkt_t *ipp)
14998 {
14999 ipaddr_t nexthop = INADDR_ANY;
15000 ipoptp_t opts;
15001 uchar_t *opt;
15002 uint8_t optval;
15003 uint8_t optlen;
15004 uint32_t totallen;
15005
15006 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
15007 return (INADDR_ANY);
15008
15009 totallen = ipp->ipp_ipv4_options_len;
15010 if (totallen & 0x3)
15011 return (INADDR_ANY);
15012
15013 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
15014 optval != IPOPT_EOL;
15015 optval = ipoptp_next(&opts)) {
15016 opt = opts.ipoptp_cur;
15017 switch (optval) {
15018 uint8_t off;
15019 case IPOPT_SSRR:
15020 case IPOPT_LSRR:
15021 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
15022 break;
15023 }
15024 optlen = opts.ipoptp_len;
15025 off = opt[IPOPT_OFFSET];
15026 off--;
15027 if (optlen < IP_ADDR_LEN ||
15028 off > optlen - IP_ADDR_LEN) {
15029 /* End of source route */
15030 break;
15031 }
15032 bcopy((char *)opt + off, &nexthop, IP_ADDR_LEN);
15033 if (nexthop == htonl(INADDR_LOOPBACK)) {
15034 /* Ignore */
15035 nexthop = INADDR_ANY;
15036 break;
15037 }
15038 break;
15039 }
15040 }
15041 return (nexthop);
15042 }
15043
15044 /*
15045 * Reverse a source route.
15046 */
15047 void
15048 ip_pkt_source_route_reverse_v4(ip_pkt_t *ipp)
15049 {
15050 ipaddr_t tmp;
15051 ipoptp_t opts;
15052 uchar_t *opt;
15053 uint8_t optval;
15054 uint32_t totallen;
15055
15056 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS))
15057 return;
15058
15059 totallen = ipp->ipp_ipv4_options_len;
15060 if (totallen & 0x3)
15061 return;
15062
15063 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options);
15064 optval != IPOPT_EOL;
15065 optval = ipoptp_next(&opts)) {
15066 uint8_t off1, off2;
15067
15068 opt = opts.ipoptp_cur;
15069 switch (optval) {
15070 case IPOPT_SSRR:
15071 case IPOPT_LSRR:
15072 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) {
15073 break;
15074 }
15075 off1 = IPOPT_MINOFF_SR - 1;
15076 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1;
15077 while (off2 > off1) {
15078 bcopy(opt + off2, &tmp, IP_ADDR_LEN);
15079 bcopy(opt + off1, opt + off2, IP_ADDR_LEN);
15080 bcopy(&tmp, opt + off2, IP_ADDR_LEN);
15081 off2 -= IP_ADDR_LEN;
15082 off1 += IP_ADDR_LEN;
15083 }
15084 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR;
15085 break;
15086 }
15087 }
15088 }
15089
15090 /*
15091 * Returns NULL if no routing header
15092 */
15093 in6_addr_t *
15094 ip_pkt_source_route_v6(const ip_pkt_t *ipp)
15095 {
15096 in6_addr_t *nexthop = NULL;
15097 ip6_rthdr0_t *rthdr;
15098
15099 if (!(ipp->ipp_fields & IPPF_RTHDR))
15100 return (NULL);
15101
15102 rthdr = (ip6_rthdr0_t *)ipp->ipp_rthdr;
15103 if (rthdr->ip6r0_segleft == 0)
15104 return (NULL);
15105
15106 nexthop = (in6_addr_t *)((char *)rthdr + sizeof (*rthdr));
15107 return (nexthop);
15108 }
15109
15110 zoneid_t
15111 ip_get_zoneid_v4(ipaddr_t addr, mblk_t *mp, ip_recv_attr_t *ira,
15112 zoneid_t lookup_zoneid)
15113 {
15114 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15115 ire_t *ire;
15116 int ire_flags = MATCH_IRE_TYPE;
15117 zoneid_t zoneid = ALL_ZONES;
15118
15119 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15120 return (ALL_ZONES);
15121
15122 if (lookup_zoneid != ALL_ZONES)
15123 ire_flags |= MATCH_IRE_ZONEONLY;
15124 ire = ire_ftable_lookup_v4(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15125 NULL, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15126 if (ire != NULL) {
15127 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15128 ire_refrele(ire);
15129 }
15130 return (zoneid);
15131 }
15132
15133 zoneid_t
15134 ip_get_zoneid_v6(in6_addr_t *addr, mblk_t *mp, const ill_t *ill,
15135 ip_recv_attr_t *ira, zoneid_t lookup_zoneid)
15136 {
15137 ip_stack_t *ipst = ira->ira_ill->ill_ipst;
15138 ire_t *ire;
15139 int ire_flags = MATCH_IRE_TYPE;
15140 zoneid_t zoneid = ALL_ZONES;
15141
15142 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE))
15143 return (ALL_ZONES);
15144
15145 if (IN6_IS_ADDR_LINKLOCAL(addr))
15146 ire_flags |= MATCH_IRE_ILL;
15147
15148 if (lookup_zoneid != ALL_ZONES)
15149 ire_flags |= MATCH_IRE_ZONEONLY;
15150 ire = ire_ftable_lookup_v6(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK,
15151 ill, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL);
15152 if (ire != NULL) {
15153 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst);
15154 ire_refrele(ire);
15155 }
15156 return (zoneid);
15157 }
15158
15159 /*
15160 * IP obserability hook support functions.
15161 */
15162 static void
15163 ipobs_init(ip_stack_t *ipst)
15164 {
15165 netid_t id;
15166
15167 id = net_getnetidbynetstackid(ipst->ips_netstack->netstack_stackid);
15168
15169 ipst->ips_ip4_observe_pr = net_protocol_lookup(id, NHF_INET);
15170 VERIFY(ipst->ips_ip4_observe_pr != NULL);
15171
15172 ipst->ips_ip6_observe_pr = net_protocol_lookup(id, NHF_INET6);
15173 VERIFY(ipst->ips_ip6_observe_pr != NULL);
15174 }
15175
15176 static void
15177 ipobs_fini(ip_stack_t *ipst)
15178 {
15179
15180 VERIFY(net_protocol_release(ipst->ips_ip4_observe_pr) == 0);
15181 VERIFY(net_protocol_release(ipst->ips_ip6_observe_pr) == 0);
15182 }
15183
15184 /*
15185 * hook_pkt_observe_t is composed in network byte order so that the
15186 * entire mblk_t chain handed into hook_run can be used as-is.
15187 * The caveat is that use of the fields, such as the zone fields,
15188 * requires conversion into host byte order first.
15189 */
15190 void
15191 ipobs_hook(mblk_t *mp, int htype, zoneid_t zsrc, zoneid_t zdst,
15192 const ill_t *ill, ip_stack_t *ipst)
15193 {
15194 hook_pkt_observe_t *hdr;
15195 uint64_t grifindex;
15196 mblk_t *imp;
15197
15198 imp = allocb(sizeof (*hdr), BPRI_HI);
15199 if (imp == NULL)
15200 return;
15201
15202 hdr = (hook_pkt_observe_t *)imp->b_rptr;
15203 /*
15204 * b_wptr is set to make the apparent size of the data in the mblk_t
15205 * to exclude the pointers at the end of hook_pkt_observer_t.
15206 */
15207 imp->b_wptr = imp->b_rptr + sizeof (dl_ipnetinfo_t);
15208 imp->b_cont = mp;
15209
15210 ASSERT(DB_TYPE(mp) == M_DATA);
15211
15212 if (IS_UNDER_IPMP(ill))
15213 grifindex = ipmp_ill_get_ipmp_ifindex(ill);
15214 else
15215 grifindex = 0;
15216
15217 hdr->hpo_version = 1;
15218 hdr->hpo_htype = htons(htype);
15219 hdr->hpo_pktlen = htonl((ulong_t)msgdsize(mp));
15220 hdr->hpo_ifindex = htonl(ill->ill_phyint->phyint_ifindex);
15221 hdr->hpo_grifindex = htonl(grifindex);
15222 hdr->hpo_zsrc = htonl(zsrc);
15223 hdr->hpo_zdst = htonl(zdst);
15224 hdr->hpo_pkt = imp;
15225 hdr->hpo_ctx = ipst->ips_netstack;
15226
15227 if (ill->ill_isv6) {
15228 hdr->hpo_family = AF_INET6;
15229 (void) hook_run(ipst->ips_ipv6_net_data->netd_hooks,
15230 ipst->ips_ipv6observing, (hook_data_t)hdr);
15231 } else {
15232 hdr->hpo_family = AF_INET;
15233 (void) hook_run(ipst->ips_ipv4_net_data->netd_hooks,
15234 ipst->ips_ipv4observing, (hook_data_t)hdr);
15235 }
15236
15237 imp->b_cont = NULL;
15238 freemsg(imp);
15239 }
15240
15241 /*
15242 * Utility routine that checks if `v4srcp' is a valid address on underlying
15243 * interface `ill'. If `ipifp' is non-NULL, it's set to a held ipif
15244 * associated with `v4srcp' on success. NOTE: if this is not called from
15245 * inside the IPSQ (ill_g_lock is not held), `ill' may be removed from the
15246 * group during or after this lookup.
15247 */
15248 boolean_t
15249 ipif_lookup_testaddr_v4(ill_t *ill, const in_addr_t *v4srcp, ipif_t **ipifp)
15250 {
15251 ipif_t *ipif;
15252
15253 ipif = ipif_lookup_addr_exact(*v4srcp, ill, ill->ill_ipst);
15254 if (ipif != NULL) {
15255 if (ipifp != NULL)
15256 *ipifp = ipif;
15257 else
15258 ipif_refrele(ipif);
15259 return (B_TRUE);
15260 }
15261
15262 ip1dbg(("ipif_lookup_testaddr_v4: cannot find ipif for src %x\n",
15263 *v4srcp));
15264 return (B_FALSE);
15265 }
15266
15267 /*
15268 * Transport protocol call back function for CPU state change.
15269 */
15270 /* ARGSUSED */
15271 static int
15272 ip_tp_cpu_update(cpu_setup_t what, int id, void *arg)
15273 {
15274 processorid_t cpu_seqid;
15275 netstack_handle_t nh;
15276 netstack_t *ns;
15277
15278 ASSERT(MUTEX_HELD(&cpu_lock));
15279
15280 switch (what) {
15281 case CPU_CONFIG:
15282 case CPU_ON:
15283 case CPU_INIT:
15284 case CPU_CPUPART_IN:
15285 cpu_seqid = cpu[id]->cpu_seqid;
15286 netstack_next_init(&nh);
15287 while ((ns = netstack_next(&nh)) != NULL) {
15288 tcp_stack_cpu_add(ns->netstack_tcp, cpu_seqid);
15289 sctp_stack_cpu_add(ns->netstack_sctp, cpu_seqid);
15290 udp_stack_cpu_add(ns->netstack_udp, cpu_seqid);
15291 netstack_rele(ns);
15292 }
15293 netstack_next_fini(&nh);
15294 break;
15295 case CPU_UNCONFIG:
15296 case CPU_OFF:
15297 case CPU_CPUPART_OUT:
15298 /*
15299 * Nothing to do. We don't remove the per CPU stats from
15300 * the IP stack even when the CPU goes offline.
15301 */
15302 break;
15303 default:
15304 break;
15305 }
15306 return (0);
15307 }