106 static void netstack_zone_shutdown(zoneid_t zoneid, void *arg);
107 static void netstack_zone_destroy(zoneid_t zoneid, void *arg);
108
109 static void netstack_shared_zone_add(zoneid_t zoneid);
110 static void netstack_shared_zone_remove(zoneid_t zoneid);
111 static void netstack_shared_kstat_add(kstat_t *ks);
112 static void netstack_shared_kstat_remove(kstat_t *ks);
113
114 typedef boolean_t applyfn_t(kmutex_t *, netstack_t *, int);
115
116 static void apply_all_netstacks(int, applyfn_t *);
117 static void apply_all_modules(netstack_t *, applyfn_t *);
118 static void apply_all_modules_reverse(netstack_t *, applyfn_t *);
119 static boolean_t netstack_apply_create(kmutex_t *, netstack_t *, int);
120 static boolean_t netstack_apply_shutdown(kmutex_t *, netstack_t *, int);
121 static boolean_t netstack_apply_destroy(kmutex_t *, netstack_t *, int);
122 static boolean_t wait_for_zone_creator(netstack_t *, kmutex_t *);
123 static boolean_t wait_for_nms_inprogress(netstack_t *, nm_state_t *,
124 kmutex_t *);
125
126 static ksema_t netstack_reap_limiter;
127 /*
128 * Hard-coded constant, but since this is not tunable in real-time, it seems
129 * making it an /etc/system tunable is better than nothing.
130 */
131 uint_t netstack_outstanding_reaps = 1024;
132
133 void
134 netstack_init(void)
135 {
136 mutex_init(&netstack_g_lock, NULL, MUTEX_DEFAULT, NULL);
137 mutex_init(&netstack_shared_lock, NULL, MUTEX_DEFAULT, NULL);
138
139 sema_init(&netstack_reap_limiter, netstack_outstanding_reaps, NULL,
140 SEMA_DRIVER, NULL);
141
142 netstack_initialized = 1;
143
144 /*
145 * We want to be informed each time a zone is created or
910 mutex_exit(&ns->netstack_lock);
911 mutex_enter(lockp);
912 mutex_enter(&ns->netstack_lock);
913 }
914 }
915 return (dropped);
916 }
917
918 /*
919 * Get the stack instance used in caller's zone.
920 * Increases the reference count, caller must do a netstack_rele.
921 * It can't be called after zone_destroy() has started.
922 */
923 netstack_t *
924 netstack_get_current(void)
925 {
926 netstack_t *ns;
927
928 ns = curproc->p_zone->zone_netstack;
929 ASSERT(ns != NULL);
930 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
931 return (NULL);
932
933 netstack_hold(ns);
934
935 return (ns);
936 }
937
938 /*
939 * Find a stack instance given the cred.
940 * This is used by the modules to potentially allow for a future when
941 * something other than the zoneid is used to determine the stack.
942 */
943 netstack_t *
944 netstack_find_by_cred(const cred_t *cr)
945 {
946 zoneid_t zoneid = crgetzoneid(cr);
947
948 /* Handle the case when cr_zone is NULL */
949 if (zoneid == (zoneid_t)-1)
950 zoneid = GLOBAL_ZONEID;
951
952 /* For performance ... */
953 if (curproc->p_zone->zone_id == zoneid)
954 return (netstack_get_current());
955 else
956 return (netstack_find_by_zoneid(zoneid));
957 }
958
959 /*
960 * Find a stack instance given the zoneid.
961 * Increases the reference count if found; caller must do a
962 * netstack_rele().
963 *
964 * If there is no exact match then assume the shared stack instance
965 * matches.
966 *
967 * Skip the unitialized ones.
968 */
969 netstack_t *
970 netstack_find_by_zoneid(zoneid_t zoneid)
971 {
972 netstack_t *ns;
973 zone_t *zone;
974
975 zone = zone_find_by_id(zoneid);
976
977 if (zone == NULL)
978 return (NULL);
979
980 ns = zone->zone_netstack;
981 ASSERT(ns != NULL);
982 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
983 ns = NULL;
984 else
985 netstack_hold(ns);
986
987 zone_rele(zone);
988 return (ns);
989 }
990
991 /*
992 * Find a stack instance given the zoneid. Can only be called from
993 * the create callback. See the comments in zone_find_by_id_nolock why
994 * that limitation exists.
995 *
996 * Increases the reference count if found; caller must do a
997 * netstack_rele().
998 *
999 * If there is no exact match then assume the shared stack instance
1000 * matches.
1001 *
1002 * Skip the unitialized ones.
1003 */
1004 netstack_t *
1005 netstack_find_by_zoneid_nolock(zoneid_t zoneid)
1006 {
1007 netstack_t *ns;
1008 zone_t *zone;
1009
1010 zone = zone_find_by_id_nolock(zoneid);
1011
1012 if (zone == NULL)
1013 return (NULL);
1014
1015 ns = zone->zone_netstack;
1016 ASSERT(ns != NULL);
1017
1018 if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
1019 ns = NULL;
1020 else
1021 netstack_hold(ns);
1022
1023 /* zone_find_by_id_nolock does not have a hold on the zone */
1024 return (ns);
1025 }
1026
1027 /*
1028 * Find a stack instance given the stackid with exact match?
1029 * Increases the reference count if found; caller must do a
1030 * netstack_rele().
1031 *
1032 * Skip the unitialized ones.
1033 */
1034 netstack_t *
1035 netstack_find_by_stackid(netstackid_t stackid)
1036 {
1037 netstack_t *ns;
1038
1039 mutex_enter(&netstack_g_lock);
1040 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1041 mutex_enter(&ns->netstack_lock);
1042 if (ns->netstack_stackid == stackid &&
1043 !(ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))) {
1044 mutex_exit(&ns->netstack_lock);
1045 netstack_hold(ns);
1046 mutex_exit(&netstack_g_lock);
1047 return (ns);
1048 }
1049 mutex_exit(&ns->netstack_lock);
1050 }
1051 mutex_exit(&netstack_g_lock);
1052 return (NULL);
1053 }
1054
1055 boolean_t
1056 netstack_inuse_by_stackid(netstackid_t stackid)
1057 {
1058 netstack_t *ns;
1059 boolean_t rval = B_FALSE;
1060
1061 mutex_enter(&netstack_g_lock);
1062
1063 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1064 if (ns->netstack_stackid == stackid) {
1065 rval = B_TRUE;
1155 * we can use KM_SLEEP and semaphores.
1156 */
1157 if (sema_tryp(&netstack_reap_limiter) == 0) {
1158 /*
1159 * Indicate we're slamming against a limit.
1160 */
1161 hrtime_t measurement = gethrtime();
1162
1163 sema_p(&netstack_reap_limiter);
1164 /* Capture delay in ns. */
1165 DTRACE_PROBE1(netstack__reap__rate__limited,
1166 hrtime_t, gethrtime() - measurement);
1167 }
1168
1169 /* TQ_SLEEP should prevent taskq_dispatch() from failing. */
1170 (void) taskq_dispatch(system_taskq, netstack_reap, ns,
1171 TQ_SLEEP);
1172 }
1173 }
1174
1175 void
1176 netstack_hold(netstack_t *ns)
1177 {
1178 mutex_enter(&ns->netstack_lock);
1179 ns->netstack_refcnt++;
1180 ASSERT(ns->netstack_refcnt > 0);
1181 mutex_exit(&ns->netstack_lock);
1182 DTRACE_PROBE1(netstack__inc__ref, netstack_t *, ns);
1183 }
1184
1185 /*
1186 * To support kstat_create_netstack() using kstat_zone_add we need
1187 * to track both
1188 * - all zoneids that use the global/shared stack
1189 * - all kstats that have been added for the shared stack
1190 */
1191 kstat_t *
1192 kstat_create_netstack(char *ks_module, int ks_instance, char *ks_name,
1193 char *ks_class, uchar_t ks_type, uint_t ks_ndata, uchar_t ks_flags,
1194 netstackid_t ks_netstackid)
1195 {
1196 kstat_t *ks;
1197
1198 if (ks_netstackid == GLOBAL_NETSTACKID) {
1199 ks = kstat_create_zone(ks_module, ks_instance, ks_name,
1200 ks_class, ks_type, ks_ndata, ks_flags, GLOBAL_ZONEID);
1201 if (ks != NULL)
1202 netstack_shared_kstat_add(ks);
1403 {
1404 }
1405
1406 netstack_t *
1407 netstack_next(netstack_handle_t *handle)
1408 {
1409 netstack_t *ns;
1410 int i, end;
1411
1412 end = *handle;
1413 /* Walk skipping *handle number of instances */
1414
1415 /* Look if there is a matching stack instance */
1416 mutex_enter(&netstack_g_lock);
1417 ns = netstack_head;
1418 for (i = 0; i < end; i++) {
1419 if (ns == NULL)
1420 break;
1421 ns = ns->netstack_next;
1422 }
1423 /* skip those with that aren't really here */
1424 while (ns != NULL) {
1425 mutex_enter(&ns->netstack_lock);
1426 if ((ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING)) == 0) {
1427 mutex_exit(&ns->netstack_lock);
1428 break;
1429 }
1430 mutex_exit(&ns->netstack_lock);
1431 end++;
1432 ns = ns->netstack_next;
1433 }
1434 if (ns != NULL) {
1435 *handle = end + 1;
1436 netstack_hold(ns);
1437 }
1438 mutex_exit(&netstack_g_lock);
1439 return (ns);
1440 }
|
106 static void netstack_zone_shutdown(zoneid_t zoneid, void *arg);
107 static void netstack_zone_destroy(zoneid_t zoneid, void *arg);
108
109 static void netstack_shared_zone_add(zoneid_t zoneid);
110 static void netstack_shared_zone_remove(zoneid_t zoneid);
111 static void netstack_shared_kstat_add(kstat_t *ks);
112 static void netstack_shared_kstat_remove(kstat_t *ks);
113
114 typedef boolean_t applyfn_t(kmutex_t *, netstack_t *, int);
115
116 static void apply_all_netstacks(int, applyfn_t *);
117 static void apply_all_modules(netstack_t *, applyfn_t *);
118 static void apply_all_modules_reverse(netstack_t *, applyfn_t *);
119 static boolean_t netstack_apply_create(kmutex_t *, netstack_t *, int);
120 static boolean_t netstack_apply_shutdown(kmutex_t *, netstack_t *, int);
121 static boolean_t netstack_apply_destroy(kmutex_t *, netstack_t *, int);
122 static boolean_t wait_for_zone_creator(netstack_t *, kmutex_t *);
123 static boolean_t wait_for_nms_inprogress(netstack_t *, nm_state_t *,
124 kmutex_t *);
125
126 static void netstack_hold_locked(netstack_t *);
127
128 static ksema_t netstack_reap_limiter;
129 /*
130 * Hard-coded constant, but since this is not tunable in real-time, it seems
131 * making it an /etc/system tunable is better than nothing.
132 */
133 uint_t netstack_outstanding_reaps = 1024;
134
135 void
136 netstack_init(void)
137 {
138 mutex_init(&netstack_g_lock, NULL, MUTEX_DEFAULT, NULL);
139 mutex_init(&netstack_shared_lock, NULL, MUTEX_DEFAULT, NULL);
140
141 sema_init(&netstack_reap_limiter, netstack_outstanding_reaps, NULL,
142 SEMA_DRIVER, NULL);
143
144 netstack_initialized = 1;
145
146 /*
147 * We want to be informed each time a zone is created or
912 mutex_exit(&ns->netstack_lock);
913 mutex_enter(lockp);
914 mutex_enter(&ns->netstack_lock);
915 }
916 }
917 return (dropped);
918 }
919
920 /*
921 * Get the stack instance used in caller's zone.
922 * Increases the reference count, caller must do a netstack_rele.
923 * It can't be called after zone_destroy() has started.
924 */
925 netstack_t *
926 netstack_get_current(void)
927 {
928 netstack_t *ns;
929
930 ns = curproc->p_zone->zone_netstack;
931 ASSERT(ns != NULL);
932 return (netstack_hold_if_active(ns));
933 }
934
935 /*
936 * Find a stack instance given the cred.
937 * This is used by the modules to potentially allow for a future when
938 * something other than the zoneid is used to determine the stack.
939 */
940 netstack_t *
941 netstack_find_by_cred(const cred_t *cr)
942 {
943 zoneid_t zoneid = crgetzoneid(cr);
944
945 /* Handle the case when cr_zone is NULL */
946 if (zoneid == (zoneid_t)-1)
947 zoneid = GLOBAL_ZONEID;
948
949 /* For performance ... */
950 if (curproc->p_zone->zone_id == zoneid)
951 return (netstack_get_current());
952 else
953 return (netstack_find_by_zoneid(zoneid));
954 }
955
956 /*
957 * Find a stack instance given the zoneid.
958 * Increases the reference count if found; caller must do a
959 * netstack_rele().
960 *
961 * If there is no exact match then assume the shared stack instance
962 * matches.
963 *
964 * Skip the uninitialized and closing ones.
965 */
966 netstack_t *
967 netstack_find_by_zoneid(zoneid_t zoneid)
968 {
969 netstack_t *ns;
970 zone_t *zone;
971
972 zone = zone_find_by_id(zoneid);
973
974 if (zone == NULL)
975 return (NULL);
976
977 ASSERT(zone->zone_netstack != NULL);
978 ns = netstack_hold_if_active(zone->zone_netstack);
979
980 zone_rele(zone);
981 return (ns);
982 }
983
984 /*
985 * Find a stack instance given the zoneid. Can only be called from
986 * the create callback. See the comments in zone_find_by_id_nolock why
987 * that limitation exists.
988 *
989 * Increases the reference count if found; caller must do a
990 * netstack_rele().
991 *
992 * If there is no exact match then assume the shared stack instance
993 * matches.
994 *
995 * Skip the unitialized ones.
996 */
997 netstack_t *
998 netstack_find_by_zoneid_nolock(zoneid_t zoneid)
999 {
1000 zone_t *zone;
1001
1002 zone = zone_find_by_id_nolock(zoneid);
1003
1004 if (zone == NULL)
1005 return (NULL);
1006
1007 ASSERT(zone->zone_netstack != NULL);
1008 /* zone_find_by_id_nolock does not have a hold on the zone */
1009 return (netstack_hold_if_active(zone->zone_netstack));
1010 }
1011
1012 /*
1013 * Find a stack instance given the stackid with exact match?
1014 * Increases the reference count if found; caller must do a
1015 * netstack_rele().
1016 *
1017 * Skip the unitialized ones.
1018 */
1019 netstack_t *
1020 netstack_find_by_stackid(netstackid_t stackid)
1021 {
1022 netstack_t *ns;
1023
1024 mutex_enter(&netstack_g_lock);
1025 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1026 /* Can't use hold_if_active because of stackid check. */
1027 mutex_enter(&ns->netstack_lock);
1028 if (ns->netstack_stackid == stackid &&
1029 !(ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))) {
1030 netstack_hold_locked(ns);
1031 mutex_exit(&ns->netstack_lock);
1032 mutex_exit(&netstack_g_lock);
1033 return (ns);
1034 }
1035 mutex_exit(&ns->netstack_lock);
1036 }
1037 mutex_exit(&netstack_g_lock);
1038 return (NULL);
1039 }
1040
1041 boolean_t
1042 netstack_inuse_by_stackid(netstackid_t stackid)
1043 {
1044 netstack_t *ns;
1045 boolean_t rval = B_FALSE;
1046
1047 mutex_enter(&netstack_g_lock);
1048
1049 for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1050 if (ns->netstack_stackid == stackid) {
1051 rval = B_TRUE;
1141 * we can use KM_SLEEP and semaphores.
1142 */
1143 if (sema_tryp(&netstack_reap_limiter) == 0) {
1144 /*
1145 * Indicate we're slamming against a limit.
1146 */
1147 hrtime_t measurement = gethrtime();
1148
1149 sema_p(&netstack_reap_limiter);
1150 /* Capture delay in ns. */
1151 DTRACE_PROBE1(netstack__reap__rate__limited,
1152 hrtime_t, gethrtime() - measurement);
1153 }
1154
1155 /* TQ_SLEEP should prevent taskq_dispatch() from failing. */
1156 (void) taskq_dispatch(system_taskq, netstack_reap, ns,
1157 TQ_SLEEP);
1158 }
1159 }
1160
1161 static void
1162 netstack_hold_locked(netstack_t *ns)
1163 {
1164 ASSERT(MUTEX_HELD(&ns->netstack_lock));
1165 ns->netstack_refcnt++;
1166 ASSERT(ns->netstack_refcnt > 0);
1167 DTRACE_PROBE1(netstack__inc__ref, netstack_t *, ns);
1168 }
1169
1170 /*
1171 * If the passed-in netstack isn't active (i.e. it's uninitialized or closing),
1172 * return NULL, otherwise return it with its reference held. Common code
1173 * for many netstack_find*() functions.
1174 */
1175 netstack_t *
1176 netstack_hold_if_active(netstack_t *ns)
1177 {
1178 netstack_t *retval;
1179
1180 mutex_enter(&ns->netstack_lock);
1181 if (ns->netstack_flags & (NSF_UNINIT | NSF_CLOSING)) {
1182 retval = NULL;
1183 } else {
1184 netstack_hold_locked(ns);
1185 retval = ns;
1186 }
1187 mutex_exit(&ns->netstack_lock);
1188
1189 return (retval);
1190 }
1191
1192 void
1193 netstack_hold(netstack_t *ns)
1194 {
1195 mutex_enter(&ns->netstack_lock);
1196 netstack_hold_locked(ns);
1197 mutex_exit(&ns->netstack_lock);
1198 }
1199
1200 /*
1201 * To support kstat_create_netstack() using kstat_zone_add we need
1202 * to track both
1203 * - all zoneids that use the global/shared stack
1204 * - all kstats that have been added for the shared stack
1205 */
1206 kstat_t *
1207 kstat_create_netstack(char *ks_module, int ks_instance, char *ks_name,
1208 char *ks_class, uchar_t ks_type, uint_t ks_ndata, uchar_t ks_flags,
1209 netstackid_t ks_netstackid)
1210 {
1211 kstat_t *ks;
1212
1213 if (ks_netstackid == GLOBAL_NETSTACKID) {
1214 ks = kstat_create_zone(ks_module, ks_instance, ks_name,
1215 ks_class, ks_type, ks_ndata, ks_flags, GLOBAL_ZONEID);
1216 if (ks != NULL)
1217 netstack_shared_kstat_add(ks);
1418 {
1419 }
1420
1421 netstack_t *
1422 netstack_next(netstack_handle_t *handle)
1423 {
1424 netstack_t *ns;
1425 int i, end;
1426
1427 end = *handle;
1428 /* Walk skipping *handle number of instances */
1429
1430 /* Look if there is a matching stack instance */
1431 mutex_enter(&netstack_g_lock);
1432 ns = netstack_head;
1433 for (i = 0; i < end; i++) {
1434 if (ns == NULL)
1435 break;
1436 ns = ns->netstack_next;
1437 }
1438 /*
1439 * Skip those that aren't really here (uninitialized or closing).
1440 * Can't use hold_if_active because of "end" tracking.
1441 */
1442 while (ns != NULL) {
1443 mutex_enter(&ns->netstack_lock);
1444 if ((ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING)) == 0) {
1445 *handle = end + 1;
1446 netstack_hold_locked(ns);
1447 mutex_exit(&ns->netstack_lock);
1448 break;
1449 }
1450 mutex_exit(&ns->netstack_lock);
1451 end++;
1452 ns = ns->netstack_next;
1453 }
1454 mutex_exit(&netstack_g_lock);
1455 return (ns);
1456 }
|