Print this page
MFV: illumos-gate@7ec6bfcf4280022b95218f03e28fb50217e4b6ff
9902 mac: mac_soft_ring_poll should use size_t
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Reviewed by: Andy Fiddaman <andy@omniosce.org>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Ryan Zezeski <ryan.zeseski@joyent.com>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: Toomas Soome <tsoome@me.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mac/mac_soft_ring.c
+++ new/usr/src/uts/common/io/mac/mac_soft_ring.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright 2017 Joyent, Inc.
25 25 */
26 26
27 27 /*
28 28 * General Soft rings - Simulating Rx rings in S/W.
29 29 *
30 30 * Soft ring is a data abstraction containing a queue and a worker
31 31 * thread and represents a hardware Rx ring in software. Each soft
32 32 * ring set can have a collection of soft rings for separating
33 33 * L3/L4 specific traffic (IPv4 from IPv6 or TCP from UDP) or for
34 34 * allowing a higher degree of parallelism by sending traffic to
35 35 * one of the soft rings for a SRS (using a hash on src IP or port).
36 36 * Each soft ring worker thread can be bound to a different CPU
37 37 * allowing the processing for each soft ring to happen in parallel
38 38 * and independent from each other.
39 39 *
40 40 * Protocol soft rings:
41 41 *
42 42 * Each SRS has at an minimum 3 softrings. One each for IPv4 TCP,
43 43 * IPv4 UDP and rest (OTH - for IPv6 and everything else). The
44 44 * SRS does dynamic polling and enforces link level bandwidth but
45 45 * it does so for all traffic (IPv4 and IPv6 and all protocols) on
46 46 * that link. However, each protocol layer wants a different
47 47 * behaviour. For instance IPv4 TCP has per CPU squeues which
48 48 * enforce their own polling and flow control so IPv4 TCP traffic
49 49 * needs to go to a separate soft ring which can be polled by the
50 50 * TCP squeue. It also allows TCP squeue to push back flow control
51 51 * all the way to NIC hardware (if it puts its corresponding soft
52 52 * ring in the poll mode and soft ring queue builds up, the
53 53 * shared srs_poll_pkt_cnt goes up and SRS automatically stops
54 54 * more packets from entering the system).
55 55 *
56 56 * Similarly, the UDP benefits from a DLS bypass and packet chaining
57 57 * so sending it to a separate soft ring is desired. All the rest of
58 58 * the traffic (including IPv6 is sent to OTH softring). The IPv6
59 59 * traffic current goes through OTH softring and via DLS because
60 60 * it need more processing to be done. Irrespective of the sap
61 61 * (IPv4 or IPv6) or the transport, the dynamic polling, B/W enforcement,
62 62 * cpu assignment, fanout, etc apply to all traffic since they
63 63 * are implement by the SRS which is agnostic to sap or transport.
64 64 *
65 65 * Fanout soft rings:
66 66 *
67 67 * On a multithreaded system, we can assign more CPU and multi thread
68 68 * the stack by creating a soft ring per CPU and spreading traffic
69 69 * based on a hash computed on src IP etc. Since we still need to
70 70 * keep the protocol separation, we create a set of 3 soft ring per
71 71 * CPU (specified by cpu list or degree of fanout).
72 72 *
73 73 * NOTE: See the block level comment on top of mac_sched.c
74 74 */
75 75
76 76 #include <sys/types.h>
77 77 #include <sys/callb.h>
78 78 #include <sys/sdt.h>
79 79 #include <sys/strsubr.h>
80 80 #include <sys/strsun.h>
81 81 #include <sys/vlan.h>
82 82 #include <inet/ipsec_impl.h>
83 83 #include <inet/ip_impl.h>
84 84 #include <inet/sadb.h>
85 85 #include <inet/ipsecesp.h>
86 86 #include <inet/ipsecah.h>
87 87
88 88 #include <sys/mac_impl.h>
89 89 #include <sys/mac_client_impl.h>
90 90 #include <sys/mac_soft_ring.h>
91 91 #include <sys/mac_flow_impl.h>
92 92 #include <sys/mac_stat.h>
93 93
94 94 static void mac_rx_soft_ring_drain(mac_soft_ring_t *);
95 95 static void mac_soft_ring_fire(void *);
96 96 static void mac_soft_ring_worker(mac_soft_ring_t *);
97 97 static void mac_tx_soft_ring_drain(mac_soft_ring_t *);
98 98
99 99 uint32_t mac_tx_soft_ring_max_q_cnt = 100000;
100 100 uint32_t mac_tx_soft_ring_hiwat = 1000;
101 101
102 102 extern kmem_cache_t *mac_soft_ring_cache;
103 103
104 104 #define ADD_SOFTRING_TO_SET(mac_srs, softring) { \
105 105 if (mac_srs->srs_soft_ring_head == NULL) { \
106 106 mac_srs->srs_soft_ring_head = softring; \
107 107 mac_srs->srs_soft_ring_tail = softring; \
108 108 } else { \
109 109 /* ADD to the list */ \
110 110 softring->s_ring_prev = \
111 111 mac_srs->srs_soft_ring_tail; \
112 112 mac_srs->srs_soft_ring_tail->s_ring_next = softring; \
113 113 mac_srs->srs_soft_ring_tail = softring; \
114 114 } \
115 115 mac_srs->srs_soft_ring_count++; \
116 116 }
117 117
118 118 /*
119 119 * mac_soft_ring_worker_wakeup
120 120 *
121 121 * Wake up the soft ring worker thread to process the queue as long
122 122 * as no one else is processing it and upper layer (client) is still
123 123 * ready to receive packets.
124 124 */
125 125 void
126 126 mac_soft_ring_worker_wakeup(mac_soft_ring_t *ringp)
127 127 {
128 128 ASSERT(MUTEX_HELD(&ringp->s_ring_lock));
129 129 if (!(ringp->s_ring_state & S_RING_PROC) &&
130 130 !(ringp->s_ring_state & S_RING_BLANK) &&
131 131 (ringp->s_ring_tid == NULL)) {
132 132 if (ringp->s_ring_wait != 0) {
133 133 ringp->s_ring_tid =
134 134 timeout(mac_soft_ring_fire, ringp,
135 135 ringp->s_ring_wait);
136 136 } else {
137 137 /* Schedule the worker thread. */
138 138 cv_signal(&ringp->s_ring_async);
139 139 }
140 140 }
141 141 }
142 142
143 143 /*
144 144 * mac_soft_ring_create
|
↓ open down ↓ |
144 lines elided |
↑ open up ↑ |
145 145 *
146 146 * Create a soft ring, do the necessary setup and bind the worker
147 147 * thread to the assigned CPU.
148 148 */
149 149 mac_soft_ring_t *
150 150 mac_soft_ring_create(int id, clock_t wait, uint16_t type,
151 151 pri_t pri, mac_client_impl_t *mcip, mac_soft_ring_set_t *mac_srs,
152 152 processorid_t cpuid, mac_direct_rx_t rx_func, void *x_arg1,
153 153 mac_resource_handle_t x_arg2)
154 154 {
155 - mac_soft_ring_t *ringp;
156 - char name[S_RING_NAMELEN];
155 + mac_soft_ring_t *ringp;
156 + char name[S_RING_NAMELEN];
157 157
158 158 bzero(name, 64);
159 159 ringp = kmem_cache_alloc(mac_soft_ring_cache, KM_SLEEP);
160 160
161 161 if (type & ST_RING_TCP) {
162 162 (void) snprintf(name, sizeof (name),
163 163 "mac_tcp_soft_ring_%d_%p", id, (void *)mac_srs);
164 164 } else if (type & ST_RING_UDP) {
165 165 (void) snprintf(name, sizeof (name),
166 166 "mac_udp_soft_ring_%d_%p", id, (void *)mac_srs);
167 167 } else if (type & ST_RING_OTH) {
168 168 (void) snprintf(name, sizeof (name),
169 169 "mac_oth_soft_ring_%d_%p", id, (void *)mac_srs);
170 170 } else {
171 171 ASSERT(type & ST_RING_TX);
172 172 (void) snprintf(name, sizeof (name),
173 173 "mac_tx_soft_ring_%d_%p", id, (void *)mac_srs);
174 174 }
175 175
176 176 bzero(ringp, sizeof (mac_soft_ring_t));
177 177 (void) strncpy(ringp->s_ring_name, name, S_RING_NAMELEN + 1);
178 178 ringp->s_ring_name[S_RING_NAMELEN] = '\0';
179 179 mutex_init(&ringp->s_ring_lock, NULL, MUTEX_DEFAULT, NULL);
180 180 ringp->s_ring_notify_cb_info.mcbi_lockp = &ringp->s_ring_lock;
181 181
182 182 ringp->s_ring_type = type;
183 183 ringp->s_ring_wait = MSEC_TO_TICK(wait);
184 184 ringp->s_ring_mcip = mcip;
185 185 ringp->s_ring_set = mac_srs;
186 186
187 187 /*
188 188 * Protect against access from DR callbacks (mac_walk_srs_bind/unbind)
189 189 * which can't grab the mac perimeter
190 190 */
191 191 mutex_enter(&mac_srs->srs_lock);
192 192 ADD_SOFTRING_TO_SET(mac_srs, ringp);
193 193 mutex_exit(&mac_srs->srs_lock);
194 194
195 195 /*
196 196 * set the bind CPU to -1 to indicate
197 197 * no thread affinity set
198 198 */
199 199 ringp->s_ring_cpuid = ringp->s_ring_cpuid_save = -1;
200 200 ringp->s_ring_worker = thread_create(NULL, 0,
201 201 mac_soft_ring_worker, ringp, 0, &p0, TS_RUN, pri);
202 202 if (type & ST_RING_TX) {
203 203 ringp->s_ring_drain_func = mac_tx_soft_ring_drain;
204 204 ringp->s_ring_tx_arg1 = x_arg1;
205 205 ringp->s_ring_tx_arg2 = x_arg2;
206 206 ringp->s_ring_tx_max_q_cnt = mac_tx_soft_ring_max_q_cnt;
207 207 ringp->s_ring_tx_hiwat =
208 208 (mac_tx_soft_ring_hiwat > mac_tx_soft_ring_max_q_cnt) ?
209 209 mac_tx_soft_ring_max_q_cnt : mac_tx_soft_ring_hiwat;
210 210 if (mcip->mci_state_flags & MCIS_IS_AGGR) {
211 211 mac_srs_tx_t *tx = &mac_srs->srs_tx;
212 212
213 213 ASSERT(tx->st_soft_rings[
214 214 ((mac_ring_t *)x_arg2)->mr_index] == NULL);
215 215 tx->st_soft_rings[((mac_ring_t *)x_arg2)->mr_index] =
216 216 ringp;
217 217 }
218 218 } else {
219 219 ringp->s_ring_drain_func = mac_rx_soft_ring_drain;
220 220 ringp->s_ring_rx_func = rx_func;
221 221 ringp->s_ring_rx_arg1 = x_arg1;
222 222 ringp->s_ring_rx_arg2 = x_arg2;
223 223 if (mac_srs->srs_state & SRS_SOFTRING_QUEUE)
224 224 ringp->s_ring_type |= ST_RING_WORKER_ONLY;
225 225 }
226 226 if (cpuid != -1)
227 227 (void) mac_soft_ring_bind(ringp, cpuid);
228 228
229 229 mac_soft_ring_stat_create(ringp);
230 230
231 231 return (ringp);
232 232 }
233 233
234 234 /*
235 235 * mac_soft_ring_free
236 236 *
237 237 * Free the soft ring once we are done with it.
238 238 */
239 239 void
240 240 mac_soft_ring_free(mac_soft_ring_t *softring)
241 241 {
242 242 ASSERT((softring->s_ring_state &
243 243 (S_RING_CONDEMNED | S_RING_CONDEMNED_DONE | S_RING_PROC)) ==
244 244 (S_RING_CONDEMNED | S_RING_CONDEMNED_DONE));
245 245 mac_pkt_drop(NULL, NULL, softring->s_ring_first, B_FALSE);
246 246 softring->s_ring_tx_arg2 = NULL;
247 247 mac_soft_ring_stat_delete(softring);
248 248 mac_callback_free(softring->s_ring_notify_cb_list);
249 249 kmem_cache_free(mac_soft_ring_cache, softring);
250 250 }
251 251
252 252 int mac_soft_ring_thread_bind = 1;
253 253
254 254 /*
255 255 * mac_soft_ring_bind
256 256 *
257 257 * Bind a soft ring worker thread to supplied CPU.
258 258 */
259 259 cpu_t *
260 260 mac_soft_ring_bind(mac_soft_ring_t *ringp, processorid_t cpuid)
261 261 {
262 262 cpu_t *cp;
263 263 boolean_t clear = B_FALSE;
264 264
265 265 ASSERT(MUTEX_HELD(&cpu_lock));
266 266
267 267 if (mac_soft_ring_thread_bind == 0) {
268 268 DTRACE_PROBE1(mac__soft__ring__no__cpu__bound,
269 269 mac_soft_ring_t *, ringp);
270 270 return (NULL);
271 271 }
272 272
273 273 cp = cpu_get(cpuid);
274 274 if (cp == NULL || !cpu_is_online(cp))
275 275 return (NULL);
276 276
277 277 mutex_enter(&ringp->s_ring_lock);
278 278 ringp->s_ring_state |= S_RING_BOUND;
279 279 if (ringp->s_ring_cpuid != -1)
280 280 clear = B_TRUE;
281 281 ringp->s_ring_cpuid = cpuid;
282 282 mutex_exit(&ringp->s_ring_lock);
283 283
284 284 if (clear)
285 285 thread_affinity_clear(ringp->s_ring_worker);
286 286
287 287 DTRACE_PROBE2(mac__soft__ring__cpu__bound, mac_soft_ring_t *,
288 288 ringp, processorid_t, cpuid);
289 289
290 290 thread_affinity_set(ringp->s_ring_worker, cpuid);
291 291
292 292 return (cp);
293 293 }
294 294
295 295 /*
296 296 * mac_soft_ring_unbind
297 297 *
298 298 * Un Bind a soft ring worker thread.
299 299 */
300 300 void
301 301 mac_soft_ring_unbind(mac_soft_ring_t *ringp)
302 302 {
303 303 ASSERT(MUTEX_HELD(&cpu_lock));
304 304
305 305 mutex_enter(&ringp->s_ring_lock);
306 306 if (!(ringp->s_ring_state & S_RING_BOUND)) {
307 307 ASSERT(ringp->s_ring_cpuid == -1);
308 308 mutex_exit(&ringp->s_ring_lock);
309 309 return;
310 310 }
311 311
312 312 ringp->s_ring_cpuid = -1;
313 313 ringp->s_ring_state &= ~S_RING_BOUND;
314 314 thread_affinity_clear(ringp->s_ring_worker);
315 315 mutex_exit(&ringp->s_ring_lock);
316 316 }
317 317
318 318 /*
319 319 * PRIVATE FUNCTIONS
320 320 */
321 321
322 322 static void
323 323 mac_soft_ring_fire(void *arg)
324 324 {
325 325 mac_soft_ring_t *ringp = arg;
326 326
327 327 mutex_enter(&ringp->s_ring_lock);
328 328 if (ringp->s_ring_tid == NULL) {
329 329 mutex_exit(&ringp->s_ring_lock);
330 330 return;
331 331 }
332 332
333 333 ringp->s_ring_tid = NULL;
334 334
335 335 if (!(ringp->s_ring_state & S_RING_PROC)) {
336 336 cv_signal(&ringp->s_ring_async);
337 337 }
338 338 mutex_exit(&ringp->s_ring_lock);
339 339 }
340 340
341 341 /*
342 342 * mac_rx_soft_ring_drain
343 343 *
344 344 * Called when worker thread model (ST_RING_WORKER_ONLY) of processing
345 345 * incoming packets is used. s_ring_first contain the queued packets.
346 346 * s_ring_rx_func contains the upper level (client) routine where the
|
↓ open down ↓ |
180 lines elided |
↑ open up ↑ |
347 347 * packets are destined and s_ring_rx_arg1/s_ring_rx_arg2 are the
348 348 * cookie meant for the client.
349 349 */
350 350 /* ARGSUSED */
351 351 static void
352 352 mac_rx_soft_ring_drain(mac_soft_ring_t *ringp)
353 353 {
354 354 mblk_t *mp;
355 355 void *arg1;
356 356 mac_resource_handle_t arg2;
357 - timeout_id_t tid;
357 + timeout_id_t tid;
358 358 mac_direct_rx_t proc;
359 359 size_t sz;
360 360 int cnt;
361 361 mac_soft_ring_set_t *mac_srs = ringp->s_ring_set;
362 362
363 363 ringp->s_ring_run = curthread;
364 364 ASSERT(mutex_owned(&ringp->s_ring_lock));
365 365 ASSERT(!(ringp->s_ring_state & S_RING_PROC));
366 366
367 367 if ((tid = ringp->s_ring_tid) != NULL)
368 368 ringp->s_ring_tid = NULL;
369 369
370 370 ringp->s_ring_state |= S_RING_PROC;
371 371
372 372 proc = ringp->s_ring_rx_func;
373 373 arg1 = ringp->s_ring_rx_arg1;
374 374 arg2 = ringp->s_ring_rx_arg2;
375 375
376 376 while ((ringp->s_ring_first != NULL) &&
377 377 !(ringp->s_ring_state & S_RING_PAUSE)) {
378 378 mp = ringp->s_ring_first;
379 379 ringp->s_ring_first = NULL;
380 380 ringp->s_ring_last = NULL;
381 381 cnt = ringp->s_ring_count;
382 382 ringp->s_ring_count = 0;
383 383 sz = ringp->s_ring_size;
384 384 ringp->s_ring_size = 0;
385 385 mutex_exit(&ringp->s_ring_lock);
386 386
387 387 if (tid != NULL) {
388 388 (void) untimeout(tid);
389 389 tid = NULL;
390 390 }
391 391
392 392 (*proc)(arg1, arg2, mp, NULL);
393 393
394 394 /*
395 395 * If we have a soft ring set which is doing
396 396 * bandwidth control, we need to decrement its
397 397 * srs_size so it can have a accurate idea of
398 398 * what is the real data queued between SRS and
399 399 * its soft rings. We decrement the size for a
400 400 * packet only when it gets processed by both
401 401 * SRS and the soft ring.
402 402 */
403 403 mutex_enter(&mac_srs->srs_lock);
404 404 MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
405 405 MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs, sz);
406 406 mutex_exit(&mac_srs->srs_lock);
407 407
408 408 mutex_enter(&ringp->s_ring_lock);
409 409 }
410 410 ringp->s_ring_state &= ~S_RING_PROC;
411 411 if (ringp->s_ring_state & S_RING_CLIENT_WAIT)
412 412 cv_signal(&ringp->s_ring_client_cv);
413 413 ringp->s_ring_run = NULL;
414 414 }
415 415
416 416 /*
417 417 * mac_soft_ring_worker
418 418 *
419 419 * The soft ring worker routine to process any queued packets. In
420 420 * normal case, the worker thread is bound to a CPU. It the soft
421 421 * ring is dealing with TCP packets, then the worker thread will
422 422 * be bound to the same CPU as the TCP squeue.
423 423 */
424 424 static void
425 425 mac_soft_ring_worker(mac_soft_ring_t *ringp)
426 426 {
427 427 kmutex_t *lock = &ringp->s_ring_lock;
428 428 kcondvar_t *async = &ringp->s_ring_async;
429 429 mac_soft_ring_set_t *srs = ringp->s_ring_set;
430 430 callb_cpr_t cprinfo;
431 431
432 432 CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "mac_soft_ring");
433 433 mutex_enter(lock);
434 434 start:
435 435 for (;;) {
436 436 while (((ringp->s_ring_first == NULL ||
437 437 (ringp->s_ring_state & (S_RING_BLOCK|S_RING_BLANK))) &&
438 438 !(ringp->s_ring_state & S_RING_PAUSE)) ||
439 439 (ringp->s_ring_state & S_RING_PROC)) {
440 440
441 441 CALLB_CPR_SAFE_BEGIN(&cprinfo);
442 442 cv_wait(async, lock);
443 443 CALLB_CPR_SAFE_END(&cprinfo, lock);
444 444 }
445 445
446 446 /*
447 447 * Either we have work to do, or we have been asked to
448 448 * shutdown temporarily or permanently
449 449 */
450 450 if (ringp->s_ring_state & S_RING_PAUSE)
451 451 goto done;
452 452
453 453 ringp->s_ring_drain_func(ringp);
454 454 }
455 455 done:
456 456 mutex_exit(lock);
457 457 mutex_enter(&srs->srs_lock);
458 458 mutex_enter(lock);
459 459
460 460 ringp->s_ring_state |= S_RING_QUIESCE_DONE;
461 461 if (!(ringp->s_ring_state & S_RING_CONDEMNED)) {
462 462 srs->srs_soft_ring_quiesced_count++;
463 463 cv_broadcast(&srs->srs_async);
464 464 mutex_exit(&srs->srs_lock);
465 465 while (!(ringp->s_ring_state &
466 466 (S_RING_RESTART | S_RING_CONDEMNED)))
467 467 cv_wait(&ringp->s_ring_async, &ringp->s_ring_lock);
468 468 mutex_exit(lock);
469 469 mutex_enter(&srs->srs_lock);
470 470 mutex_enter(lock);
471 471 srs->srs_soft_ring_quiesced_count--;
472 472 if (ringp->s_ring_state & S_RING_RESTART) {
473 473 ASSERT(!(ringp->s_ring_state & S_RING_CONDEMNED));
474 474 ringp->s_ring_state &= ~(S_RING_RESTART |
475 475 S_RING_QUIESCE | S_RING_QUIESCE_DONE);
476 476 cv_broadcast(&srs->srs_async);
477 477 mutex_exit(&srs->srs_lock);
478 478 goto start;
479 479 }
480 480 }
481 481 ASSERT(ringp->s_ring_state & S_RING_CONDEMNED);
482 482 ringp->s_ring_state |= S_RING_CONDEMNED_DONE;
483 483 CALLB_CPR_EXIT(&cprinfo);
484 484 srs->srs_soft_ring_condemned_count++;
485 485 cv_broadcast(&srs->srs_async);
486 486 mutex_exit(&srs->srs_lock);
487 487 thread_exit();
488 488 }
489 489
490 490 /*
491 491 * mac_soft_ring_intr_enable and mac_soft_ring_intr_disable
492 492 *
493 493 * these functions are called to toggle the sending of packets to the
494 494 * client. They are called by the client. the client gets the name
495 495 * of these routine and corresponding cookie (pointing to softring)
496 496 * during capability negotiation at setup time.
497 497 *
498 498 * Enabling is allow the processing thread to send packets to the
499 499 * client while disabling does the opposite.
500 500 */
501 501 void
502 502 mac_soft_ring_intr_enable(void *arg)
503 503 {
504 504 mac_soft_ring_t *ringp = (mac_soft_ring_t *)arg;
505 505 mutex_enter(&ringp->s_ring_lock);
506 506 ringp->s_ring_state &= ~S_RING_BLANK;
507 507 if (ringp->s_ring_first != NULL)
508 508 mac_soft_ring_worker_wakeup(ringp);
509 509 mutex_exit(&ringp->s_ring_lock);
510 510 }
511 511
512 512 boolean_t
513 513 mac_soft_ring_intr_disable(void *arg)
514 514 {
515 515 mac_soft_ring_t *ringp = (mac_soft_ring_t *)arg;
516 516 boolean_t sring_blanked = B_FALSE;
517 517 /*
518 518 * Stop worker thread from sending packets above.
519 519 * Squeue will poll soft ring when it needs packets.
520 520 */
521 521 mutex_enter(&ringp->s_ring_lock);
522 522 if (!(ringp->s_ring_state & S_RING_PROC)) {
523 523 ringp->s_ring_state |= S_RING_BLANK;
524 524 sring_blanked = B_TRUE;
525 525 }
526 526 mutex_exit(&ringp->s_ring_lock);
527 527 return (sring_blanked);
528 528 }
|
↓ open down ↓ |
161 lines elided |
↑ open up ↑ |
529 529
530 530 /*
531 531 * mac_soft_ring_poll
532 532 *
533 533 * This routine is called by the client to poll for packets from
534 534 * the soft ring. The function name and cookie corresponding to
535 535 * the soft ring is exchanged during capability negotiation during
536 536 * setup.
537 537 */
538 538 mblk_t *
539 -mac_soft_ring_poll(mac_soft_ring_t *ringp, int bytes_to_pickup)
539 +mac_soft_ring_poll(mac_soft_ring_t *ringp, size_t bytes_to_pickup)
540 540 {
541 541 mblk_t *head, *tail;
542 542 mblk_t *mp;
543 543 size_t sz = 0;
544 544 int cnt = 0;
545 545 mac_soft_ring_set_t *mac_srs = ringp->s_ring_set;
546 546
547 547 ASSERT(mac_srs != NULL);
548 548
549 549 mutex_enter(&ringp->s_ring_lock);
550 550 head = tail = mp = ringp->s_ring_first;
551 551 if (head == NULL) {
552 552 mutex_exit(&ringp->s_ring_lock);
553 553 return (NULL);
554 554 }
555 555
556 556 if (ringp->s_ring_size <= bytes_to_pickup) {
557 557 head = ringp->s_ring_first;
558 558 ringp->s_ring_first = NULL;
559 559 ringp->s_ring_last = NULL;
560 560 cnt = ringp->s_ring_count;
561 561 ringp->s_ring_count = 0;
562 562 sz = ringp->s_ring_size;
563 563 ringp->s_ring_size = 0;
564 564 } else {
565 565 while (mp && sz <= bytes_to_pickup) {
566 566 sz += msgdsize(mp);
567 567 cnt++;
568 568 tail = mp;
569 569 mp = mp->b_next;
570 570 }
571 571 ringp->s_ring_count -= cnt;
572 572 ringp->s_ring_size -= sz;
573 573 tail->b_next = NULL;
574 574 if (mp == NULL) {
575 575 ringp->s_ring_first = NULL;
576 576 ringp->s_ring_last = NULL;
577 577 ASSERT(ringp->s_ring_count == 0);
578 578 } else {
579 579 ringp->s_ring_first = mp;
580 580 }
581 581 }
582 582
583 583 mutex_exit(&ringp->s_ring_lock);
584 584 /*
585 585 * Update the shared count and size counters so
586 586 * that SRS has a accurate idea of queued packets.
587 587 */
588 588 mutex_enter(&mac_srs->srs_lock);
589 589 MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
590 590 MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs, sz);
591 591 mutex_exit(&mac_srs->srs_lock);
592 592 return (head);
593 593 }
594 594
595 595 /*
596 596 * mac_soft_ring_dls_bypass
597 597 *
598 598 * Enable direct client (IP) callback function from the softrings.
599 599 * Callers need to make sure they don't need any DLS layer processing
600 600 */
601 601 void
602 602 mac_soft_ring_dls_bypass(void *arg, mac_direct_rx_t rx_func, void *rx_arg1)
603 603 {
604 604 mac_soft_ring_t *softring = arg;
605 605 mac_soft_ring_set_t *srs;
606 606
607 607 ASSERT(rx_func != NULL);
608 608
609 609 mutex_enter(&softring->s_ring_lock);
610 610 softring->s_ring_rx_func = rx_func;
611 611 softring->s_ring_rx_arg1 = rx_arg1;
612 612 mutex_exit(&softring->s_ring_lock);
613 613
614 614 srs = softring->s_ring_set;
615 615 mutex_enter(&srs->srs_lock);
616 616 srs->srs_type |= SRST_DLS_BYPASS;
617 617 mutex_exit(&srs->srs_lock);
618 618 }
619 619
620 620 /*
621 621 * mac_soft_ring_signal
622 622 *
623 623 * Typically used to set the soft ring state to QUIESCE, CONDEMNED, or
624 624 * RESTART.
625 625 *
626 626 * In the Rx side, the quiescing is done bottom up. After the Rx upcalls
627 627 * from the driver are done, then the Rx SRS is quiesced and only then can
628 628 * we signal the soft rings. Thus this function can't be called arbitrarily
629 629 * without satisfying the prerequisites. On the Tx side, the threads from
630 630 * top need to quiesced, then the Tx SRS and only then can we signal the
631 631 * Tx soft rings.
632 632 */
633 633 void
634 634 mac_soft_ring_signal(mac_soft_ring_t *softring, uint_t sr_flag)
635 635 {
636 636 mutex_enter(&softring->s_ring_lock);
637 637 softring->s_ring_state |= sr_flag;
638 638 cv_signal(&softring->s_ring_async);
639 639 mutex_exit(&softring->s_ring_lock);
640 640 }
|
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
641 641
642 642 /*
643 643 * mac_tx_soft_ring_drain
644 644 *
645 645 * The transmit side drain routine in case the soft ring was being
646 646 * used to transmit packets.
647 647 */
648 648 static void
649 649 mac_tx_soft_ring_drain(mac_soft_ring_t *ringp)
650 650 {
651 - mblk_t *mp;
652 - void *arg1;
653 - void *arg2;
654 - mblk_t *tail;
651 + mblk_t *mp;
652 + void *arg1;
653 + void *arg2;
654 + mblk_t *tail;
655 655 uint_t saved_pkt_count, saved_size;
656 656 mac_tx_stats_t stats;
657 657 mac_soft_ring_set_t *mac_srs = ringp->s_ring_set;
658 658
659 659 saved_pkt_count = saved_size = 0;
660 660 ringp->s_ring_run = curthread;
661 661 ASSERT(mutex_owned(&ringp->s_ring_lock));
662 662 ASSERT(!(ringp->s_ring_state & S_RING_PROC));
663 663
664 664 ringp->s_ring_state |= S_RING_PROC;
665 665 arg1 = ringp->s_ring_tx_arg1;
666 666 arg2 = ringp->s_ring_tx_arg2;
667 667
668 668 while (ringp->s_ring_first != NULL) {
669 669 mp = ringp->s_ring_first;
670 670 tail = ringp->s_ring_last;
671 671 saved_pkt_count = ringp->s_ring_count;
672 672 saved_size = ringp->s_ring_size;
673 673 ringp->s_ring_first = NULL;
674 674 ringp->s_ring_last = NULL;
675 675 ringp->s_ring_count = 0;
676 676 ringp->s_ring_size = 0;
677 677 mutex_exit(&ringp->s_ring_lock);
678 678
679 679 mp = mac_tx_send(arg1, arg2, mp, &stats);
680 680
681 681 mutex_enter(&ringp->s_ring_lock);
682 682 if (mp != NULL) {
683 683 /* Device out of tx desc, set block */
684 684 tail->b_next = ringp->s_ring_first;
685 685 ringp->s_ring_first = mp;
686 686 ringp->s_ring_count +=
687 687 (saved_pkt_count - stats.mts_opackets);
688 688 ringp->s_ring_size += (saved_size - stats.mts_obytes);
689 689 if (ringp->s_ring_last == NULL)
690 690 ringp->s_ring_last = tail;
691 691
692 692 if (ringp->s_ring_tx_woken_up) {
693 693 ringp->s_ring_tx_woken_up = B_FALSE;
694 694 } else {
695 695 ringp->s_ring_state |= S_RING_BLOCK;
696 696 ringp->s_st_stat.mts_blockcnt++;
697 697 }
698 698
699 699 ringp->s_ring_state &= ~S_RING_PROC;
700 700 ringp->s_ring_run = NULL;
701 701 return;
702 702 } else {
703 703 ringp->s_ring_tx_woken_up = B_FALSE;
704 704 SRS_TX_STATS_UPDATE(mac_srs, &stats);
705 705 SOFTRING_TX_STATS_UPDATE(ringp, &stats);
706 706 }
707 707 }
708 708
709 709 if (ringp->s_ring_count == 0 && ringp->s_ring_state &
710 710 (S_RING_TX_HIWAT | S_RING_WAKEUP_CLIENT | S_RING_ENQUEUED)) {
711 711 mac_client_impl_t *mcip = ringp->s_ring_mcip;
712 712 boolean_t wakeup_required = B_FALSE;
713 713
714 714 if (ringp->s_ring_state &
715 715 (S_RING_TX_HIWAT|S_RING_WAKEUP_CLIENT)) {
716 716 wakeup_required = B_TRUE;
717 717 }
718 718 ringp->s_ring_state &=
719 719 ~(S_RING_TX_HIWAT | S_RING_WAKEUP_CLIENT | S_RING_ENQUEUED);
720 720 mutex_exit(&ringp->s_ring_lock);
721 721 if (wakeup_required) {
722 722 mac_tx_invoke_callbacks(mcip, (mac_tx_cookie_t)ringp);
723 723 /*
724 724 * If the client is not the primary MAC client, then we
725 725 * need to send the notification to the clients upper
726 726 * MAC, i.e. mci_upper_mip.
727 727 */
728 728 mac_tx_notify(mcip->mci_upper_mip != NULL ?
729 729 mcip->mci_upper_mip : mcip->mci_mip);
730 730 }
731 731 mutex_enter(&ringp->s_ring_lock);
732 732 }
733 733 ringp->s_ring_state &= ~S_RING_PROC;
734 734 ringp->s_ring_run = NULL;
735 735 }
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX