Print this page
MFV: illumos-gate@54b146cf23443d91aef04e2d2a59b7434add3030
7096 vioif should not log to the console on boot, or ever
Reviewed by: Alexander Pyhalov <apyhalov@gmail.com>
Reviewed by: Andy Stormont <astormont@racktopsystems.com>
Reviewed by: Igor Kozhukhov <igor@dilos.org>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: Joshua M. Clulow <jmc@joyent.com>
OS-76 vioif kernel heap corruption, NULL pointer dereference and mtu problem
port of illumos-3644
3644 Add virtio-net support into the Illumos
Reviewed by: Alexey Zaytsev <alexey.zaytsev@gmail.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: David Hoppner <0xffea@gmail.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/vioif/vioif.c
+++ new/usr/src/uts/common/io/vioif/vioif.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 - * Copyright 2013 Nexenta Inc. All rights reserved.
13 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
14 14 * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
15 + * Copyright 2015 Joyent, Inc.
15 16 */
16 17
17 18 /* Based on the NetBSD virtio driver by Minoura Makoto. */
18 19 /*
19 20 * Copyright (c) 2010 Minoura Makoto.
20 21 * All rights reserved.
21 22 *
22 23 * Redistribution and use in source and binary forms, with or without
23 24 * modification, are permitted provided that the following conditions
24 25 * are met:
25 26 * 1. Redistributions of source code must retain the above copyright
26 27 * notice, this list of conditions and the following disclaimer.
27 28 * 2. Redistributions in binary form must reproduce the above copyright
28 29 * notice, this list of conditions and the following disclaimer in the
29 30 * documentation and/or other materials provided with the distribution.
30 31 *
31 32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 33 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 34 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 35 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 36 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 37 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 41 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 42 */
42 43
43 44 #include <sys/types.h>
44 45 #include <sys/errno.h>
45 46 #include <sys/param.h>
46 47 #include <sys/stropts.h>
47 48 #include <sys/stream.h>
48 49 #include <sys/strsubr.h>
49 50 #include <sys/kmem.h>
50 51 #include <sys/conf.h>
51 52 #include <sys/devops.h>
52 53 #include <sys/ksynch.h>
53 54 #include <sys/stat.h>
54 55 #include <sys/modctl.h>
55 56 #include <sys/debug.h>
56 57 #include <sys/pci.h>
57 58 #include <sys/ethernet.h>
58 59 #include <sys/vlan.h>
59 60
60 61 #include <sys/dlpi.h>
61 62 #include <sys/taskq.h>
62 63 #include <sys/cyclic.h>
63 64
64 65 #include <sys/pattr.h>
65 66 #include <sys/strsun.h>
66 67
67 68 #include <sys/random.h>
68 69 #include <sys/sysmacros.h>
69 70 #include <sys/stream.h>
70 71
71 72 #include <sys/mac.h>
72 73 #include <sys/mac_provider.h>
73 74 #include <sys/mac_ether.h>
74 75
75 76 #include "virtiovar.h"
76 77 #include "virtioreg.h"
77 78
78 79 /* Configuration registers */
79 80 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
80 81 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
81 82
82 83 /* Feature bits */
83 84 #define VIRTIO_NET_F_CSUM (1 << 0) /* Host handles pkts w/ partial csum */
84 85 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* Guest handles pkts w/ part csum */
85 86 #define VIRTIO_NET_F_MAC (1 << 5) /* Host has given MAC address. */
86 87 #define VIRTIO_NET_F_GSO (1 << 6) /* Host handles pkts w/ any GSO type */
87 88 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* Guest can handle TSOv4 in. */
88 89 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* Guest can handle TSOv6 in. */
89 90 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* Guest can handle TSO[6] w/ ECN in */
90 91 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* Guest can handle UFO in. */
91 92 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* Host can handle TSOv4 in. */
92 93 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* Host can handle TSOv6 in. */
93 94 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* Host can handle TSO[6] w/ ECN in */
94 95 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* Host can handle UFO in. */
95 96 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* Host can merge receive buffers. */
96 97 #define VIRTIO_NET_F_STATUS (1 << 16) /* Config.status available */
97 98 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* Control channel available */
98 99 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* Control channel RX mode support */
99 100 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* Control channel VLAN filtering */
100 101 #define VIRTIO_NET_F_CTRL_RX_EXTRA (1 << 20) /* Extra RX mode control support */
101 102
102 103 #define VIRTIO_NET_FEATURE_BITS \
103 104 "\020" \
104 105 "\1CSUM" \
105 106 "\2GUEST_CSUM" \
106 107 "\6MAC" \
107 108 "\7GSO" \
108 109 "\10GUEST_TSO4" \
109 110 "\11GUEST_TSO6" \
110 111 "\12GUEST_ECN" \
111 112 "\13GUEST_UFO" \
112 113 "\14HOST_TSO4" \
113 114 "\15HOST_TSO6" \
114 115 "\16HOST_ECN" \
115 116 "\17HOST_UFO" \
116 117 "\20MRG_RXBUF" \
117 118 "\21STATUS" \
118 119 "\22CTRL_VQ" \
119 120 "\23CTRL_RX" \
120 121 "\24CTRL_VLAN" \
121 122 "\25CTRL_RX_EXTRA"
122 123
123 124 /* Status */
124 125 #define VIRTIO_NET_S_LINK_UP 1
125 126
126 127 #pragma pack(1)
127 128 /* Packet header structure */
128 129 struct virtio_net_hdr {
129 130 uint8_t flags;
130 131 uint8_t gso_type;
131 132 uint16_t hdr_len;
132 133 uint16_t gso_size;
133 134 uint16_t csum_start;
134 135 uint16_t csum_offset;
135 136 };
136 137 #pragma pack()
137 138
138 139 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
139 140 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
140 141 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
141 142 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
142 143 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
143 144 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
144 145
145 146
146 147 /* Control virtqueue */
147 148 #pragma pack(1)
148 149 struct virtio_net_ctrl_cmd {
149 150 uint8_t class;
150 151 uint8_t command;
151 152 };
152 153 #pragma pack()
153 154
154 155 #define VIRTIO_NET_CTRL_RX 0
155 156 #define VIRTIO_NET_CTRL_RX_PROMISC 0
156 157 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
157 158
158 159 #define VIRTIO_NET_CTRL_MAC 1
159 160 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
160 161
161 162 #define VIRTIO_NET_CTRL_VLAN 2
162 163 #define VIRTIO_NET_CTRL_VLAN_ADD 0
163 164 #define VIRTIO_NET_CTRL_VLAN_DEL 1
164 165
165 166 #pragma pack(1)
166 167 struct virtio_net_ctrl_status {
167 168 uint8_t ack;
168 169 };
169 170
170 171 struct virtio_net_ctrl_rx {
171 172 uint8_t onoff;
172 173 };
173 174
174 175 struct virtio_net_ctrl_mac_tbl {
175 176 uint32_t nentries;
176 177 uint8_t macs[][ETHERADDRL];
177 178 };
178 179
179 180 struct virtio_net_ctrl_vlan {
180 181 uint16_t id;
181 182 };
182 183 #pragma pack()
183 184
184 185 static int vioif_quiesce(dev_info_t *);
185 186 static int vioif_attach(dev_info_t *, ddi_attach_cmd_t);
186 187 static int vioif_detach(dev_info_t *, ddi_detach_cmd_t);
187 188
188 189 DDI_DEFINE_STREAM_OPS(vioif_ops,
189 190 nulldev, /* identify */
190 191 nulldev, /* probe */
191 192 vioif_attach, /* attach */
192 193 vioif_detach, /* detach */
193 194 nodev, /* reset */
194 195 NULL, /* cb_ops */
195 196 D_MP, /* bus_ops */
196 197 NULL, /* power */
197 198 vioif_quiesce /* quiesce */);
198 199
199 200 static char vioif_ident[] = "VirtIO ethernet driver";
200 201
201 202 /* Standard Module linkage initialization for a Streams driver */
202 203 extern struct mod_ops mod_driverops;
203 204
204 205 static struct modldrv modldrv = {
205 206 &mod_driverops, /* Type of module. This one is a driver */
206 207 vioif_ident, /* short description */
207 208 &vioif_ops /* driver specific ops */
208 209 };
209 210
210 211 static struct modlinkage modlinkage = {
211 212 MODREV_1,
212 213 {
213 214 (void *)&modldrv,
214 215 NULL,
215 216 },
216 217 };
217 218
218 219 ddi_device_acc_attr_t vioif_attr = {
219 220 DDI_DEVICE_ATTR_V0,
220 221 DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
221 222 DDI_STORECACHING_OK_ACC,
222 223 DDI_DEFAULT_ACC
223 224 };
224 225
225 226 /*
226 227 * A mapping represents a binding for a single buffer that is contiguous in the
227 228 * virtual address space.
228 229 */
229 230 struct vioif_buf_mapping {
230 231 caddr_t vbm_buf;
231 232 ddi_dma_handle_t vbm_dmah;
232 233 ddi_acc_handle_t vbm_acch;
233 234 ddi_dma_cookie_t vbm_dmac;
234 235 unsigned int vbm_ncookies;
235 236 };
236 237
237 238 /*
238 239 * Rx buffers can be loaned upstream, so the code has
239 240 * to allocate them dynamically.
240 241 */
241 242 struct vioif_rx_buf {
242 243 struct vioif_softc *rb_sc;
243 244 frtn_t rb_frtn;
244 245
245 246 struct vioif_buf_mapping rb_mapping;
246 247 };
247 248
248 249 /*
249 250 * Tx buffers have two mapping types. One, "inline", is pre-allocated and is
250 251 * used to hold the virtio_net_header. Small packets also get copied there, as
251 252 * it's faster then mapping them. Bigger packets get mapped using the "external"
252 253 * mapping array. An array is used, because a packet may consist of muptiple
253 254 * fragments, so each fragment gets bound to an entry. According to my
254 255 * observations, the number of fragments does not exceed 2, but just in case,
255 256 * a bigger, up to VIOIF_INDIRECT_MAX - 1 array is allocated. To save resources,
256 257 * the dma handles are allocated lazily in the tx path.
257 258 */
258 259 struct vioif_tx_buf {
259 260 mblk_t *tb_mp;
260 261
261 262 /* inline buffer */
262 263 struct vioif_buf_mapping tb_inline_mapping;
263 264
264 265 /* External buffers */
265 266 struct vioif_buf_mapping *tb_external_mapping;
266 267 unsigned int tb_external_num;
267 268 };
268 269
269 270 struct vioif_softc {
270 271 dev_info_t *sc_dev; /* mirrors virtio_softc->sc_dev */
271 272 struct virtio_softc sc_virtio;
272 273
273 274 mac_handle_t sc_mac_handle;
274 275 mac_register_t *sc_macp;
275 276
276 277 struct virtqueue *sc_rx_vq;
|
↓ open down ↓ |
252 lines elided |
↑ open up ↑ |
277 278 struct virtqueue *sc_tx_vq;
278 279 struct virtqueue *sc_ctrl_vq;
279 280
280 281 unsigned int sc_tx_stopped:1;
281 282
282 283 /* Feature bits. */
283 284 unsigned int sc_rx_csum:1;
284 285 unsigned int sc_tx_csum:1;
285 286 unsigned int sc_tx_tso4:1;
286 287
288 + /*
289 + * For debugging, it is useful to know whether the MAC address we
290 + * are using came from the host (via VIRTIO_NET_CONFIG_MAC) or
291 + * was otherwise generated or set from within the guest.
292 + */
293 + unsigned int sc_mac_from_host:1;
294 +
287 295 int sc_mtu;
288 296 uint8_t sc_mac[ETHERADDRL];
289 297 /*
290 298 * For rx buffers, we keep a pointer array, because the buffers
291 299 * can be loaned upstream, and we have to repopulate the array with
292 300 * new members.
293 301 */
294 302 struct vioif_rx_buf **sc_rxbufs;
295 303
296 304 /*
297 305 * For tx, we just allocate an array of buffers. The packet can
298 306 * either be copied into the inline buffer, or the external mapping
299 307 * could be used to map the packet
300 308 */
301 309 struct vioif_tx_buf *sc_txbufs;
302 310
303 311 kstat_t *sc_intrstat;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
304 312 /*
305 313 * We "loan" rx buffers upstream and reuse them after they are
306 314 * freed. This lets us avoid allocations in the hot path.
307 315 */
308 316 kmem_cache_t *sc_rxbuf_cache;
309 317 ulong_t sc_rxloan;
310 318
311 319 /* Copying small packets turns out to be faster then mapping them. */
312 320 unsigned long sc_rxcopy_thresh;
313 321 unsigned long sc_txcopy_thresh;
314 - /* Some statistic coming here */
322 +
323 + /*
324 + * Statistics visible through mac:
325 + */
315 326 uint64_t sc_ipackets;
316 327 uint64_t sc_opackets;
317 328 uint64_t sc_rbytes;
318 329 uint64_t sc_obytes;
319 330 uint64_t sc_brdcstxmt;
320 331 uint64_t sc_brdcstrcv;
321 332 uint64_t sc_multixmt;
322 333 uint64_t sc_multircv;
323 334 uint64_t sc_norecvbuf;
324 335 uint64_t sc_notxbuf;
325 336 uint64_t sc_ierrors;
326 337 uint64_t sc_oerrors;
338 +
339 + /*
340 + * Internal debugging statistics:
341 + */
342 + uint64_t sc_rxfail_dma_handle;
343 + uint64_t sc_rxfail_dma_buffer;
344 + uint64_t sc_rxfail_dma_bind;
345 + uint64_t sc_rxfail_chain_undersize;
346 + uint64_t sc_rxfail_no_descriptors;
347 + uint64_t sc_txfail_dma_handle;
348 + uint64_t sc_txfail_dma_bind;
349 + uint64_t sc_txfail_indirect_limit;
327 350 };
328 351
329 352 #define ETHER_HEADER_LEN sizeof (struct ether_header)
330 353
331 354 /* MTU + the ethernet header. */
332 355 #define MAX_PAYLOAD 65535
333 356 #define MAX_MTU (MAX_PAYLOAD - ETHER_HEADER_LEN)
334 357 #define DEFAULT_MTU ETHERMTU
335 358
336 359 /*
337 360 * Yeah, we spend 8M per device. Turns out, there is no point
338 361 * being smart and using merged rx buffers (VIRTIO_NET_F_MRG_RXBUF),
339 362 * because vhost does not support them, and we expect to be used with
340 363 * vhost in production environment.
341 364 */
342 365 /* The buffer keeps both the packet data and the virtio_net_header. */
343 366 #define VIOIF_RX_SIZE (MAX_PAYLOAD + sizeof (struct virtio_net_hdr))
344 367
345 368 /*
346 369 * We win a bit on header alignment, but the host wins a lot
347 370 * more on moving aligned buffers. Might need more thought.
348 371 */
349 372 #define VIOIF_IP_ALIGN 0
350 373
351 374 /* Maximum number of indirect descriptors, somewhat arbitrary. */
352 375 #define VIOIF_INDIRECT_MAX 128
353 376
354 377 /*
355 378 * We pre-allocate a reasonably large buffer to copy small packets
356 379 * there. Bigger packets are mapped, packets with multiple
357 380 * cookies are mapped as indirect buffers.
358 381 */
359 382 #define VIOIF_TX_INLINE_SIZE 2048
360 383
361 384 /* Native queue size for all queues */
362 385 #define VIOIF_RX_QLEN 0
363 386 #define VIOIF_TX_QLEN 0
364 387 #define VIOIF_CTRL_QLEN 0
365 388
366 389 static uchar_t vioif_broadcast[ETHERADDRL] = {
367 390 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
368 391 };
369 392
370 393 #define VIOIF_TX_THRESH_MAX 640
371 394 #define VIOIF_RX_THRESH_MAX 640
372 395
373 396 #define CACHE_NAME_SIZE 32
374 397
375 398 static char vioif_txcopy_thresh[] =
376 399 "vioif_txcopy_thresh";
377 400 static char vioif_rxcopy_thresh[] =
378 401 "vioif_rxcopy_thresh";
379 402
380 403 static char *vioif_priv_props[] = {
381 404 vioif_txcopy_thresh,
382 405 vioif_rxcopy_thresh,
383 406 NULL
384 407 };
385 408
386 409 /* Add up to ddi? */
387 410 static ddi_dma_cookie_t *
388 411 vioif_dma_curr_cookie(ddi_dma_handle_t dmah)
389 412 {
390 413 ddi_dma_impl_t *dmah_impl = (void *) dmah;
391 414 ASSERT(dmah_impl->dmai_cookie);
392 415 return (dmah_impl->dmai_cookie);
393 416 }
394 417
395 418 static void
396 419 vioif_dma_reset_cookie(ddi_dma_handle_t dmah, ddi_dma_cookie_t *dmac)
397 420 {
398 421 ddi_dma_impl_t *dmah_impl = (void *) dmah;
399 422 dmah_impl->dmai_cookie = dmac;
400 423 }
401 424
402 425 static link_state_t
403 426 vioif_link_state(struct vioif_softc *sc)
404 427 {
405 428 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_STATUS) {
406 429 if (virtio_read_device_config_2(&sc->sc_virtio,
407 430 VIRTIO_NET_CONFIG_STATUS) & VIRTIO_NET_S_LINK_UP) {
408 431 return (LINK_STATE_UP);
409 432 } else {
410 433 return (LINK_STATE_DOWN);
411 434 }
412 435 }
413 436
414 437 return (LINK_STATE_UP);
415 438 }
416 439
417 440 static ddi_dma_attr_t vioif_inline_buf_dma_attr = {
418 441 DMA_ATTR_V0, /* Version number */
419 442 0, /* low address */
420 443 0xFFFFFFFFFFFFFFFF, /* high address */
421 444 0xFFFFFFFF, /* counter register max */
422 445 1, /* page alignment */
423 446 1, /* burst sizes: 1 - 32 */
424 447 1, /* minimum transfer size */
425 448 0xFFFFFFFF, /* max transfer size */
426 449 0xFFFFFFFFFFFFFFF, /* address register max */
427 450 1, /* scatter-gather capacity */
428 451 1, /* device operates on bytes */
429 452 0, /* attr flag: set to 0 */
430 453 };
431 454
432 455 static ddi_dma_attr_t vioif_mapped_buf_dma_attr = {
433 456 DMA_ATTR_V0, /* Version number */
434 457 0, /* low address */
435 458 0xFFFFFFFFFFFFFFFF, /* high address */
436 459 0xFFFFFFFF, /* counter register max */
437 460 1, /* page alignment */
438 461 1, /* burst sizes: 1 - 32 */
439 462 1, /* minimum transfer size */
440 463 0xFFFFFFFF, /* max transfer size */
441 464 0xFFFFFFFFFFFFFFF, /* address register max */
442 465
443 466 /* One entry is used for the virtio_net_hdr on the tx path */
444 467 VIOIF_INDIRECT_MAX - 1, /* scatter-gather capacity */
445 468 1, /* device operates on bytes */
446 469 0, /* attr flag: set to 0 */
447 470 };
448 471
449 472 static ddi_device_acc_attr_t vioif_bufattr = {
450 473 DDI_DEVICE_ATTR_V0,
451 474 DDI_NEVERSWAP_ACC,
452 475 DDI_STORECACHING_OK_ACC,
453 476 DDI_DEFAULT_ACC
454 477 };
455 478
456 479 static void
457 480 vioif_rx_free(caddr_t free_arg)
458 481 {
459 482 struct vioif_rx_buf *buf = (void *) free_arg;
460 483 struct vioif_softc *sc = buf->rb_sc;
461 484
462 485 kmem_cache_free(sc->sc_rxbuf_cache, buf);
463 486 atomic_dec_ulong(&sc->sc_rxloan);
464 487 }
465 488
|
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
466 489 static int
467 490 vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
468 491 {
469 492 _NOTE(ARGUNUSED(kmflags));
470 493 struct vioif_softc *sc = user_arg;
471 494 struct vioif_rx_buf *buf = buffer;
472 495 size_t len;
473 496
474 497 if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_mapped_buf_dma_attr,
475 498 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmah)) {
476 - dev_err(sc->sc_dev, CE_WARN,
477 - "Can't allocate dma handle for rx buffer");
499 + sc->sc_rxfail_dma_handle++;
478 500 goto exit_handle;
479 501 }
480 502
481 503 if (ddi_dma_mem_alloc(buf->rb_mapping.vbm_dmah,
482 504 VIOIF_RX_SIZE + sizeof (struct virtio_net_hdr),
483 505 &vioif_bufattr, DDI_DMA_STREAMING, DDI_DMA_SLEEP,
484 506 NULL, &buf->rb_mapping.vbm_buf, &len, &buf->rb_mapping.vbm_acch)) {
485 - dev_err(sc->sc_dev, CE_WARN,
486 - "Can't allocate rx buffer");
507 + sc->sc_rxfail_dma_buffer++;
487 508 goto exit_alloc;
488 509 }
489 510 ASSERT(len >= VIOIF_RX_SIZE);
490 511
491 512 if (ddi_dma_addr_bind_handle(buf->rb_mapping.vbm_dmah, NULL,
492 513 buf->rb_mapping.vbm_buf, len, DDI_DMA_READ | DDI_DMA_STREAMING,
493 514 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmac,
494 515 &buf->rb_mapping.vbm_ncookies)) {
495 - dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer");
496 -
516 + sc->sc_rxfail_dma_bind++;
497 517 goto exit_bind;
498 518 }
499 519
500 520 ASSERT(buf->rb_mapping.vbm_ncookies <= VIOIF_INDIRECT_MAX);
501 521
502 522 buf->rb_sc = sc;
503 523 buf->rb_frtn.free_arg = (void *) buf;
504 524 buf->rb_frtn.free_func = vioif_rx_free;
505 525
506 526 return (0);
507 527 exit_bind:
508 528 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
509 529 exit_alloc:
510 530 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
511 531 exit_handle:
512 532
513 533 return (ENOMEM);
514 534 }
515 535
516 536 static void
517 537 vioif_rx_destruct(void *buffer, void *user_arg)
518 538 {
519 539 _NOTE(ARGUNUSED(user_arg));
520 540 struct vioif_rx_buf *buf = buffer;
521 541
522 542 ASSERT(buf->rb_mapping.vbm_acch);
523 543 ASSERT(buf->rb_mapping.vbm_acch);
524 544
525 545 (void) ddi_dma_unbind_handle(buf->rb_mapping.vbm_dmah);
526 546 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
527 547 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
528 548 }
529 549
530 550 static void
531 551 vioif_free_mems(struct vioif_softc *sc)
532 552 {
533 553 int i;
534 554
535 555 for (i = 0; i < sc->sc_tx_vq->vq_num; i++) {
536 556 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
537 557 int j;
538 558
539 559 /* Tear down the internal mapping. */
540 560
541 561 ASSERT(buf->tb_inline_mapping.vbm_acch);
542 562 ASSERT(buf->tb_inline_mapping.vbm_dmah);
543 563
544 564 (void) ddi_dma_unbind_handle(buf->tb_inline_mapping.vbm_dmah);
545 565 ddi_dma_mem_free(&buf->tb_inline_mapping.vbm_acch);
546 566 ddi_dma_free_handle(&buf->tb_inline_mapping.vbm_dmah);
547 567
548 568 /* We should not see any in-flight buffers at this point. */
549 569 ASSERT(!buf->tb_mp);
550 570
551 571 /* Free all the dma hdnales we allocated lazily. */
552 572 for (j = 0; buf->tb_external_mapping[j].vbm_dmah; j++)
553 573 ddi_dma_free_handle(
554 574 &buf->tb_external_mapping[j].vbm_dmah);
555 575 /* Free the external mapping array. */
556 576 kmem_free(buf->tb_external_mapping,
557 577 sizeof (struct vioif_tx_buf) * VIOIF_INDIRECT_MAX - 1);
558 578 }
559 579
560 580 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) *
561 581 sc->sc_tx_vq->vq_num);
562 582
563 583 for (i = 0; i < sc->sc_rx_vq->vq_num; i++) {
564 584 struct vioif_rx_buf *buf = sc->sc_rxbufs[i];
565 585
566 586 if (buf)
567 587 kmem_cache_free(sc->sc_rxbuf_cache, buf);
568 588 }
569 589 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf *) *
570 590 sc->sc_rx_vq->vq_num);
571 591 }
572 592
573 593 static int
574 594 vioif_alloc_mems(struct vioif_softc *sc)
575 595 {
576 596 int i, txqsize, rxqsize;
577 597 size_t len;
578 598 unsigned int nsegments;
579 599
580 600 txqsize = sc->sc_tx_vq->vq_num;
581 601 rxqsize = sc->sc_rx_vq->vq_num;
582 602
583 603 sc->sc_txbufs = kmem_zalloc(sizeof (struct vioif_tx_buf) * txqsize,
584 604 KM_SLEEP);
585 605 if (sc->sc_txbufs == NULL) {
586 606 dev_err(sc->sc_dev, CE_WARN,
587 607 "Failed to allocate the tx buffers array");
588 608 goto exit_txalloc;
589 609 }
590 610
591 611 /*
592 612 * We don't allocate the rx vioif_bufs, just the pointers, as
593 613 * rx vioif_bufs can be loaned upstream, and we don't know the
594 614 * total number we need.
595 615 */
596 616 sc->sc_rxbufs = kmem_zalloc(sizeof (struct vioif_rx_buf *) * rxqsize,
597 617 KM_SLEEP);
598 618 if (sc->sc_rxbufs == NULL) {
599 619 dev_err(sc->sc_dev, CE_WARN,
600 620 "Failed to allocate the rx buffers pointer array");
601 621 goto exit_rxalloc;
602 622 }
603 623
604 624 for (i = 0; i < txqsize; i++) {
605 625 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
606 626
607 627 /* Allocate and bind an inline mapping. */
608 628
609 629 if (ddi_dma_alloc_handle(sc->sc_dev,
610 630 &vioif_inline_buf_dma_attr,
611 631 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_dmah)) {
612 632
613 633 dev_err(sc->sc_dev, CE_WARN,
614 634 "Can't allocate dma handle for tx buffer %d", i);
615 635 goto exit_tx;
616 636 }
617 637
618 638 if (ddi_dma_mem_alloc(buf->tb_inline_mapping.vbm_dmah,
619 639 VIOIF_TX_INLINE_SIZE, &vioif_bufattr, DDI_DMA_STREAMING,
620 640 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_buf,
621 641 &len, &buf->tb_inline_mapping.vbm_acch)) {
622 642
623 643 dev_err(sc->sc_dev, CE_WARN,
624 644 "Can't allocate tx buffer %d", i);
625 645 goto exit_tx;
626 646 }
627 647 ASSERT(len >= VIOIF_TX_INLINE_SIZE);
628 648
629 649 if (ddi_dma_addr_bind_handle(buf->tb_inline_mapping.vbm_dmah,
630 650 NULL, buf->tb_inline_mapping.vbm_buf, len,
631 651 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
632 652 &buf->tb_inline_mapping.vbm_dmac, &nsegments)) {
633 653
634 654 dev_err(sc->sc_dev, CE_WARN,
635 655 "Can't bind tx buffer %d", i);
636 656 goto exit_tx;
637 657 }
638 658
639 659 /* We asked for a single segment */
640 660 ASSERT(nsegments == 1);
641 661
642 662 /*
643 663 * We allow up to VIOIF_INDIRECT_MAX - 1 external mappings.
644 664 * In reality, I don't expect more then 2-3 used, but who
645 665 * knows.
646 666 */
647 667 buf->tb_external_mapping = kmem_zalloc(
648 668 sizeof (struct vioif_tx_buf) * VIOIF_INDIRECT_MAX - 1,
649 669 KM_SLEEP);
650 670
651 671 /*
652 672 * The external mapping's dma handles are allocate lazily,
653 673 * as we don't expect most of them to be used..
654 674 */
655 675 }
656 676
657 677 return (0);
658 678
659 679 exit_tx:
660 680 for (i = 0; i < txqsize; i++) {
661 681 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
662 682
663 683 if (buf->tb_inline_mapping.vbm_dmah)
664 684 (void) ddi_dma_unbind_handle(
665 685 buf->tb_inline_mapping.vbm_dmah);
666 686
667 687 if (buf->tb_inline_mapping.vbm_acch)
668 688 ddi_dma_mem_free(
669 689 &buf->tb_inline_mapping.vbm_acch);
670 690
671 691 if (buf->tb_inline_mapping.vbm_dmah)
672 692 ddi_dma_free_handle(
673 693 &buf->tb_inline_mapping.vbm_dmah);
674 694
675 695 if (buf->tb_external_mapping)
676 696 kmem_free(buf->tb_external_mapping,
677 697 sizeof (struct vioif_tx_buf) *
678 698 VIOIF_INDIRECT_MAX - 1);
679 699 }
680 700
681 701 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf) * rxqsize);
682 702
683 703 exit_rxalloc:
684 704 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) * txqsize);
685 705 exit_txalloc:
686 706 return (ENOMEM);
687 707 }
688 708
689 709 /* ARGSUSED */
690 710 int
691 711 vioif_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
692 712 {
693 713 return (DDI_SUCCESS);
694 714 }
695 715
696 716 /* ARGSUSED */
697 717 int
698 718 vioif_promisc(void *arg, boolean_t on)
699 719 {
700 720 return (DDI_SUCCESS);
701 721 }
702 722
703 723 /* ARGSUSED */
704 724 int
705 725 vioif_unicst(void *arg, const uint8_t *macaddr)
706 726 {
707 727 return (DDI_FAILURE);
708 728 }
709 729
|
↓ open down ↓ |
203 lines elided |
↑ open up ↑ |
710 730
711 731 static uint_t
712 732 vioif_add_rx(struct vioif_softc *sc, int kmflag)
713 733 {
714 734 uint_t num_added = 0;
715 735 struct vq_entry *ve;
716 736
717 737 while ((ve = vq_alloc_entry(sc->sc_rx_vq)) != NULL) {
718 738 struct vioif_rx_buf *buf = sc->sc_rxbufs[ve->qe_index];
719 739
720 - if (!buf) {
740 + if (buf == NULL) {
721 741 /* First run, allocate the buffer. */
722 742 buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag);
723 743 sc->sc_rxbufs[ve->qe_index] = buf;
724 744 }
725 745
726 746 /* Still nothing? Bye. */
727 - if (!buf) {
728 - dev_err(sc->sc_dev, CE_WARN,
729 - "Can't allocate rx buffer");
747 + if (buf == NULL) {
730 748 sc->sc_norecvbuf++;
731 749 vq_free_entry(sc->sc_rx_vq, ve);
732 750 break;
733 751 }
734 752
735 753 ASSERT(buf->rb_mapping.vbm_ncookies >= 1);
736 754
737 755 /*
738 756 * For an unknown reason, the virtio_net_hdr must be placed
739 757 * as a separate virtio queue entry.
740 758 */
741 759 virtio_ve_add_indirect_buf(ve,
742 760 buf->rb_mapping.vbm_dmac.dmac_laddress,
743 761 sizeof (struct virtio_net_hdr), B_FALSE);
744 762
745 763 /* Add the rest of the first cookie. */
746 764 virtio_ve_add_indirect_buf(ve,
747 765 buf->rb_mapping.vbm_dmac.dmac_laddress +
748 766 sizeof (struct virtio_net_hdr),
749 767 buf->rb_mapping.vbm_dmac.dmac_size -
750 768 sizeof (struct virtio_net_hdr), B_FALSE);
751 769
752 770 /*
753 771 * If the buffer consists of a single cookie (unlikely for a
754 772 * 64-k buffer), we are done. Otherwise, add the rest of the
755 773 * cookies using indirect entries.
756 774 */
757 775 if (buf->rb_mapping.vbm_ncookies > 1) {
758 776 ddi_dma_cookie_t *first_extra_dmac;
759 777 ddi_dma_cookie_t dmac;
760 778 first_extra_dmac =
761 779 vioif_dma_curr_cookie(buf->rb_mapping.vbm_dmah);
762 780
763 781 ddi_dma_nextcookie(buf->rb_mapping.vbm_dmah, &dmac);
764 782 virtio_ve_add_cookie(ve, buf->rb_mapping.vbm_dmah,
765 783 dmac, buf->rb_mapping.vbm_ncookies - 1, B_FALSE);
766 784 vioif_dma_reset_cookie(buf->rb_mapping.vbm_dmah,
767 785 first_extra_dmac);
768 786 }
769 787
770 788 virtio_push_chain(ve, B_FALSE);
771 789 num_added++;
772 790 }
773 791
774 792 return (num_added);
775 793 }
776 794
777 795 static uint_t
778 796 vioif_populate_rx(struct vioif_softc *sc, int kmflag)
779 797 {
780 798 uint_t num_added = vioif_add_rx(sc, kmflag);
781 799
782 800 if (num_added > 0)
783 801 virtio_sync_vq(sc->sc_rx_vq);
784 802
785 803 return (num_added);
786 804 }
787 805
788 806 static uint_t
789 807 vioif_process_rx(struct vioif_softc *sc)
790 808 {
791 809 struct vq_entry *ve;
792 810 struct vioif_rx_buf *buf;
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
793 811 mblk_t *mphead = NULL, *lastmp = NULL, *mp;
794 812 uint32_t len;
795 813 uint_t num_processed = 0;
796 814
797 815 while ((ve = virtio_pull_chain(sc->sc_rx_vq, &len))) {
798 816
799 817 buf = sc->sc_rxbufs[ve->qe_index];
800 818 ASSERT(buf);
801 819
802 820 if (len < sizeof (struct virtio_net_hdr)) {
803 - dev_err(sc->sc_dev, CE_WARN, "RX: Cnain too small: %u",
804 - len - (uint32_t)sizeof (struct virtio_net_hdr));
821 + sc->sc_rxfail_chain_undersize++;
805 822 sc->sc_ierrors++;
806 823 virtio_free_chain(ve);
807 824 continue;
808 825 }
809 826
810 827 len -= sizeof (struct virtio_net_hdr);
811 828 /*
812 829 * We copy small packets that happen to fit into a single
813 830 * cookie and reuse the buffers. For bigger ones, we loan
814 831 * the buffers upstream.
815 832 */
816 833 if (len < sc->sc_rxcopy_thresh) {
817 834 mp = allocb(len, 0);
818 - if (!mp) {
835 + if (mp == NULL) {
819 836 sc->sc_norecvbuf++;
820 837 sc->sc_ierrors++;
821 838
822 839 virtio_free_chain(ve);
823 840 break;
824 841 }
825 842
826 843 bcopy((char *)buf->rb_mapping.vbm_buf +
827 844 sizeof (struct virtio_net_hdr), mp->b_rptr, len);
828 845 mp->b_wptr = mp->b_rptr + len;
829 846
830 847 } else {
831 848 mp = desballoc((unsigned char *)
832 849 buf->rb_mapping.vbm_buf +
833 850 sizeof (struct virtio_net_hdr) +
834 851 VIOIF_IP_ALIGN, len, 0, &buf->rb_frtn);
835 - if (!mp) {
852 + if (mp == NULL) {
836 853 sc->sc_norecvbuf++;
837 854 sc->sc_ierrors++;
838 855
839 856 virtio_free_chain(ve);
840 857 break;
841 858 }
842 859 mp->b_wptr = mp->b_rptr + len;
843 860
844 861 atomic_inc_ulong(&sc->sc_rxloan);
845 862 /*
846 863 * Buffer loaned, we will have to allocate a new one
847 864 * for this slot.
848 865 */
849 866 sc->sc_rxbufs[ve->qe_index] = NULL;
850 867 }
851 868
852 869 /*
853 870 * virtio-net does not tell us if this packet is multicast
854 871 * or broadcast, so we have to check it.
855 872 */
856 873 if (mp->b_rptr[0] & 0x1) {
857 874 if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
858 875 sc->sc_multircv++;
859 876 else
860 877 sc->sc_brdcstrcv++;
861 878 }
862 879
863 880 sc->sc_rbytes += len;
864 881 sc->sc_ipackets++;
865 882
866 883 virtio_free_chain(ve);
867 884
868 885 if (lastmp == NULL) {
869 886 mphead = mp;
870 887 } else {
871 888 lastmp->b_next = mp;
872 889 }
873 890 lastmp = mp;
874 891 num_processed++;
875 892 }
876 893
877 894 if (mphead != NULL) {
878 895 mac_rx(sc->sc_mac_handle, NULL, mphead);
879 896 }
880 897
881 898 return (num_processed);
882 899 }
883 900
884 901 static uint_t
885 902 vioif_reclaim_used_tx(struct vioif_softc *sc)
886 903 {
887 904 struct vq_entry *ve;
888 905 struct vioif_tx_buf *buf;
889 906 uint32_t len;
890 907 mblk_t *mp;
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
891 908 uint_t num_reclaimed = 0;
892 909
893 910 while ((ve = virtio_pull_chain(sc->sc_tx_vq, &len))) {
894 911 /* We don't chain descriptors for tx, so don't expect any. */
895 912 ASSERT(!ve->qe_next);
896 913
897 914 buf = &sc->sc_txbufs[ve->qe_index];
898 915 mp = buf->tb_mp;
899 916 buf->tb_mp = NULL;
900 917
901 - if (mp) {
918 + if (mp != NULL) {
902 919 for (int i = 0; i < buf->tb_external_num; i++)
903 920 (void) ddi_dma_unbind_handle(
904 921 buf->tb_external_mapping[i].vbm_dmah);
905 922 }
906 923
907 924 virtio_free_chain(ve);
908 925
909 926 /* External mapping used, mp was not freed in vioif_send() */
910 - if (mp)
927 + if (mp != NULL)
911 928 freemsg(mp);
912 929 num_reclaimed++;
913 930 }
914 931
915 932 if (sc->sc_tx_stopped && num_reclaimed > 0) {
916 933 sc->sc_tx_stopped = 0;
917 934 mac_tx_update(sc->sc_mac_handle);
918 935 }
919 936
920 937 return (num_reclaimed);
921 938 }
922 939
923 940 /* sc will be used to update stat counters. */
924 941 /* ARGSUSED */
925 942 static inline void
926 943 vioif_tx_inline(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
927 944 size_t msg_size)
928 945 {
929 946 struct vioif_tx_buf *buf;
930 947 buf = &sc->sc_txbufs[ve->qe_index];
931 948
932 949 ASSERT(buf);
933 950
934 951 /* Frees mp */
935 952 mcopymsg(mp, buf->tb_inline_mapping.vbm_buf +
936 953 sizeof (struct virtio_net_hdr));
937 954
938 955 virtio_ve_add_indirect_buf(ve,
939 956 buf->tb_inline_mapping.vbm_dmac.dmac_laddress +
940 957 sizeof (struct virtio_net_hdr), msg_size, B_TRUE);
941 958 }
942 959
943 960 static inline int
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
944 961 vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf,
945 962 int i)
946 963 {
947 964 int ret = DDI_SUCCESS;
948 965
949 966 if (!buf->tb_external_mapping[i].vbm_dmah) {
950 967 ret = ddi_dma_alloc_handle(sc->sc_dev,
951 968 &vioif_mapped_buf_dma_attr, DDI_DMA_SLEEP, NULL,
952 969 &buf->tb_external_mapping[i].vbm_dmah);
953 970 if (ret != DDI_SUCCESS) {
954 - dev_err(sc->sc_dev, CE_WARN,
955 - "Can't allocate dma handle for external tx buffer");
971 + sc->sc_txfail_dma_handle++;
956 972 }
957 973 }
958 974
959 975 return (ret);
960 976 }
961 977
962 978 static inline int
963 979 vioif_tx_external(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
964 980 size_t msg_size)
965 981 {
966 982 _NOTE(ARGUNUSED(msg_size));
967 983
968 984 struct vioif_tx_buf *buf;
969 985 mblk_t *nmp;
970 986 int i, j;
971 987 int ret = DDI_SUCCESS;
972 988
973 989 buf = &sc->sc_txbufs[ve->qe_index];
974 990
975 991 ASSERT(buf);
976 992
977 993 buf->tb_external_num = 0;
978 994 i = 0;
979 995 nmp = mp;
980 996
981 997 while (nmp) {
982 998 size_t len;
983 999 ddi_dma_cookie_t dmac;
984 1000 unsigned int ncookies;
985 1001
986 1002 len = MBLKL(nmp);
987 1003 /*
988 1004 * For some reason, the network stack can
989 1005 * actually send us zero-length fragments.
990 1006 */
991 1007 if (len == 0) {
992 1008 nmp = nmp->b_cont;
993 1009 continue;
994 1010 }
995 1011
996 1012 ret = vioif_tx_lazy_handle_alloc(sc, buf, i);
997 1013 if (ret != DDI_SUCCESS) {
998 1014 sc->sc_notxbuf++;
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
999 1015 sc->sc_oerrors++;
1000 1016 goto exit_lazy_alloc;
1001 1017 }
1002 1018 ret = ddi_dma_addr_bind_handle(
1003 1019 buf->tb_external_mapping[i].vbm_dmah, NULL,
1004 1020 (caddr_t)nmp->b_rptr, len,
1005 1021 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1006 1022 DDI_DMA_SLEEP, NULL, &dmac, &ncookies);
1007 1023
1008 1024 if (ret != DDI_SUCCESS) {
1025 + sc->sc_txfail_dma_bind++;
1009 1026 sc->sc_oerrors++;
1010 - dev_err(sc->sc_dev, CE_NOTE,
1011 - "TX: Failed to bind external handle");
1012 1027 goto exit_bind;
1013 1028 }
1014 1029
1015 1030 /* Check if we still fit into the indirect table. */
1016 1031 if (virtio_ve_indirect_available(ve) < ncookies) {
1017 - dev_err(sc->sc_dev, CE_NOTE,
1018 - "TX: Indirect descriptor table limit reached."
1019 - " It took %d fragments.", i);
1032 + sc->sc_txfail_indirect_limit++;
1020 1033 sc->sc_notxbuf++;
1021 1034 sc->sc_oerrors++;
1022 1035
1023 1036 ret = DDI_FAILURE;
1024 1037 goto exit_limit;
1025 1038 }
1026 1039
1027 1040 virtio_ve_add_cookie(ve, buf->tb_external_mapping[i].vbm_dmah,
1028 1041 dmac, ncookies, B_TRUE);
1029 1042
1030 1043 nmp = nmp->b_cont;
1031 1044 i++;
1032 1045 }
1033 1046
1034 1047 buf->tb_external_num = i;
1035 1048 /* Save the mp to free it when the packet is sent. */
1036 1049 buf->tb_mp = mp;
1037 1050
1038 1051 return (DDI_SUCCESS);
1039 1052
1040 1053 exit_limit:
1041 1054 exit_bind:
1042 1055 exit_lazy_alloc:
1043 1056
1044 1057 for (j = 0; j < i; j++) {
1045 1058 (void) ddi_dma_unbind_handle(
1046 1059 buf->tb_external_mapping[j].vbm_dmah);
1047 1060 }
1048 1061
1049 1062 return (ret);
1050 1063 }
1051 1064
1052 1065 static boolean_t
1053 1066 vioif_send(struct vioif_softc *sc, mblk_t *mp)
1054 1067 {
1055 1068 struct vq_entry *ve;
1056 1069 struct vioif_tx_buf *buf;
1057 1070 struct virtio_net_hdr *net_header = NULL;
1058 1071 size_t msg_size = 0;
1059 1072 uint32_t csum_start;
1060 1073 uint32_t csum_stuff;
1061 1074 uint32_t csum_flags;
1062 1075 uint32_t lso_flags;
1063 1076 uint32_t lso_mss;
1064 1077 mblk_t *nmp;
1065 1078 int ret;
1066 1079 boolean_t lso_required = B_FALSE;
1067 1080
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
1068 1081 for (nmp = mp; nmp; nmp = nmp->b_cont)
1069 1082 msg_size += MBLKL(nmp);
1070 1083
1071 1084 if (sc->sc_tx_tso4) {
1072 1085 mac_lso_get(mp, &lso_mss, &lso_flags);
1073 1086 lso_required = (lso_flags & HW_LSO);
1074 1087 }
1075 1088
1076 1089 ve = vq_alloc_entry(sc->sc_tx_vq);
1077 1090
1078 - if (!ve) {
1091 + if (ve == NULL) {
1079 1092 sc->sc_notxbuf++;
1080 1093 /* Out of free descriptors - try later. */
1081 1094 return (B_FALSE);
1082 1095 }
1083 1096 buf = &sc->sc_txbufs[ve->qe_index];
1084 1097
1085 1098 /* Use the inline buffer of the first entry for the virtio_net_hdr. */
1086 1099 (void) memset(buf->tb_inline_mapping.vbm_buf, 0,
1087 1100 sizeof (struct virtio_net_hdr));
1088 1101
1089 1102 net_header = (struct virtio_net_hdr *)buf->tb_inline_mapping.vbm_buf;
1090 1103
1091 1104 mac_hcksum_get(mp, &csum_start, &csum_stuff, NULL,
1092 1105 NULL, &csum_flags);
1093 1106
1094 1107 /* They want us to do the TCP/UDP csum calculation. */
1095 1108 if (csum_flags & HCK_PARTIALCKSUM) {
1096 1109 struct ether_header *eth_header;
1097 1110 int eth_hsize;
1098 1111
1099 1112 /* Did we ask for it? */
1100 1113 ASSERT(sc->sc_tx_csum);
1101 1114
1102 1115 /* We only asked for partial csum packets. */
1103 1116 ASSERT(!(csum_flags & HCK_IPV4_HDRCKSUM));
1104 1117 ASSERT(!(csum_flags & HCK_FULLCKSUM));
1105 1118
1106 1119 eth_header = (void *) mp->b_rptr;
1107 1120 if (eth_header->ether_type == htons(ETHERTYPE_VLAN)) {
1108 1121 eth_hsize = sizeof (struct ether_vlan_header);
1109 1122 } else {
1110 1123 eth_hsize = sizeof (struct ether_header);
1111 1124 }
1112 1125 net_header->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1113 1126 net_header->csum_start = eth_hsize + csum_start;
1114 1127 net_header->csum_offset = csum_stuff - csum_start;
1115 1128 }
1116 1129
1117 1130 /* setup LSO fields if required */
1118 1131 if (lso_required) {
1119 1132 net_header->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
|
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
1120 1133 net_header->gso_size = (uint16_t)lso_mss;
1121 1134 }
1122 1135
1123 1136 virtio_ve_add_indirect_buf(ve,
1124 1137 buf->tb_inline_mapping.vbm_dmac.dmac_laddress,
1125 1138 sizeof (struct virtio_net_hdr), B_TRUE);
1126 1139
1127 1140 /* meanwhile update the statistic */
1128 1141 if (mp->b_rptr[0] & 0x1) {
1129 1142 if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
1130 - sc->sc_multixmt++;
1131 - else
1132 - sc->sc_brdcstxmt++;
1143 + sc->sc_multixmt++;
1144 + else
1145 + sc->sc_brdcstxmt++;
1133 1146 }
1134 1147
1135 1148 /*
1136 1149 * We copy small packets into the inline buffer. The bigger ones
1137 1150 * get mapped using the mapped buffer.
1138 1151 */
1139 1152 if (msg_size < sc->sc_txcopy_thresh) {
1140 1153 vioif_tx_inline(sc, ve, mp, msg_size);
1141 1154 } else {
1142 1155 /* statistic gets updated by vioif_tx_external when fail */
1143 1156 ret = vioif_tx_external(sc, ve, mp, msg_size);
1144 1157 if (ret != DDI_SUCCESS)
1145 1158 goto exit_tx_external;
1146 1159 }
1147 1160
1148 1161 virtio_push_chain(ve, B_TRUE);
1149 1162
1150 1163 sc->sc_opackets++;
1151 1164 sc->sc_obytes += msg_size;
1152 1165
1153 1166 return (B_TRUE);
1154 1167
1155 1168 exit_tx_external:
1156 1169
1157 1170 vq_free_entry(sc->sc_tx_vq, ve);
1158 1171 /*
1159 1172 * vioif_tx_external can fail when the buffer does not fit into the
1160 1173 * indirect descriptor table. Free the mp. I don't expect this ever
1161 1174 * to happen.
1162 1175 */
1163 1176 freemsg(mp);
1164 1177
1165 1178 return (B_TRUE);
1166 1179 }
1167 1180
1168 1181 mblk_t *
1169 1182 vioif_tx(void *arg, mblk_t *mp)
1170 1183 {
1171 1184 struct vioif_softc *sc = arg;
1172 1185 mblk_t *nmp;
1173 1186
1174 1187 while (mp != NULL) {
1175 1188 nmp = mp->b_next;
1176 1189 mp->b_next = NULL;
1177 1190
1178 1191 if (!vioif_send(sc, mp)) {
1179 1192 sc->sc_tx_stopped = 1;
1180 1193 mp->b_next = nmp;
1181 1194 break;
1182 1195 }
1183 1196 mp = nmp;
1184 1197 }
1185 1198
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
1186 1199 return (mp);
1187 1200 }
1188 1201
1189 1202 int
1190 1203 vioif_start(void *arg)
1191 1204 {
1192 1205 struct vioif_softc *sc = arg;
1193 1206 struct vq_entry *ve;
1194 1207 uint32_t len;
1195 1208
1196 - mac_link_update(sc->sc_mac_handle,
1197 - vioif_link_state(sc));
1209 + mac_link_update(sc->sc_mac_handle, vioif_link_state(sc));
1198 1210
1199 1211 virtio_start_vq_intr(sc->sc_rx_vq);
1200 1212
1201 1213 /*
1202 1214 * Don't start interrupts on sc_tx_vq. We use VIRTIO_F_NOTIFY_ON_EMPTY,
1203 1215 * so the device will send a transmit interrupt when the queue is empty
1204 1216 * and we can reclaim it in one sweep.
1205 1217 */
1206 1218
1207 1219 /*
1208 1220 * Clear any data that arrived early on the receive queue and populate
1209 1221 * it with free buffers that the device can use moving forward.
1210 1222 */
1211 1223 while ((ve = virtio_pull_chain(sc->sc_rx_vq, &len)) != NULL) {
1212 1224 virtio_free_chain(ve);
1213 1225 }
1214 1226 (void) vioif_populate_rx(sc, KM_SLEEP);
1215 1227
1216 1228 return (DDI_SUCCESS);
1217 1229 }
1218 1230
1219 1231 void
1220 1232 vioif_stop(void *arg)
1221 1233 {
1222 1234 struct vioif_softc *sc = arg;
1223 1235
1224 1236 virtio_stop_vq_intr(sc->sc_rx_vq);
1225 1237 }
1226 1238
1227 1239 /* ARGSUSED */
1228 1240 static int
1229 1241 vioif_stat(void *arg, uint_t stat, uint64_t *val)
1230 1242 {
1231 1243 struct vioif_softc *sc = arg;
1232 1244
1233 1245 switch (stat) {
1234 1246 case MAC_STAT_IERRORS:
1235 1247 *val = sc->sc_ierrors;
1236 1248 break;
1237 1249 case MAC_STAT_OERRORS:
1238 1250 *val = sc->sc_oerrors;
1239 1251 break;
1240 1252 case MAC_STAT_MULTIRCV:
1241 1253 *val = sc->sc_multircv;
1242 1254 break;
1243 1255 case MAC_STAT_BRDCSTRCV:
1244 1256 *val = sc->sc_brdcstrcv;
1245 1257 break;
1246 1258 case MAC_STAT_MULTIXMT:
1247 1259 *val = sc->sc_multixmt;
1248 1260 break;
1249 1261 case MAC_STAT_BRDCSTXMT:
1250 1262 *val = sc->sc_brdcstxmt;
1251 1263 break;
1252 1264 case MAC_STAT_IPACKETS:
1253 1265 *val = sc->sc_ipackets;
1254 1266 break;
1255 1267 case MAC_STAT_RBYTES:
1256 1268 *val = sc->sc_rbytes;
1257 1269 break;
1258 1270 case MAC_STAT_OPACKETS:
1259 1271 *val = sc->sc_opackets;
1260 1272 break;
1261 1273 case MAC_STAT_OBYTES:
1262 1274 *val = sc->sc_obytes;
1263 1275 break;
1264 1276 case MAC_STAT_NORCVBUF:
1265 1277 *val = sc->sc_norecvbuf;
1266 1278 break;
1267 1279 case MAC_STAT_NOXMTBUF:
1268 1280 *val = sc->sc_notxbuf;
1269 1281 break;
1270 1282 case MAC_STAT_IFSPEED:
1271 1283 /* always 1 Gbit */
1272 1284 *val = 1000000000ULL;
1273 1285 break;
1274 1286 case ETHER_STAT_LINK_DUPLEX:
1275 1287 /* virtual device, always full-duplex */
1276 1288 *val = LINK_DUPLEX_FULL;
1277 1289 break;
1278 1290
1279 1291 default:
1280 1292 return (ENOTSUP);
1281 1293 }
1282 1294
1283 1295 return (DDI_SUCCESS);
1284 1296 }
1285 1297
1286 1298 static int
1287 1299 vioif_set_prop_private(struct vioif_softc *sc, const char *pr_name,
1288 1300 uint_t pr_valsize, const void *pr_val)
1289 1301 {
1290 1302 _NOTE(ARGUNUSED(pr_valsize));
1291 1303
1292 1304 long result;
1293 1305
1294 1306 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1295 1307
1296 1308 if (pr_val == NULL)
1297 1309 return (EINVAL);
1298 1310
1299 1311 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1300 1312
1301 1313 if (result < 0 || result > VIOIF_TX_THRESH_MAX)
1302 1314 return (EINVAL);
1303 1315 sc->sc_txcopy_thresh = result;
1304 1316 }
1305 1317 if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1306 1318
1307 1319 if (pr_val == NULL)
1308 1320 return (EINVAL);
1309 1321
1310 1322 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1311 1323
1312 1324 if (result < 0 || result > VIOIF_RX_THRESH_MAX)
1313 1325 return (EINVAL);
1314 1326 sc->sc_rxcopy_thresh = result;
1315 1327 }
1316 1328 return (0);
1317 1329 }
1318 1330
1319 1331 static int
1320 1332 vioif_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1321 1333 uint_t pr_valsize, const void *pr_val)
1322 1334 {
1323 1335 struct vioif_softc *sc = arg;
1324 1336 const uint32_t *new_mtu;
1325 1337 int err;
1326 1338
1327 1339 switch (pr_num) {
1328 1340 case MAC_PROP_MTU:
1329 1341 new_mtu = pr_val;
1330 1342
1331 1343 if (*new_mtu > MAX_MTU) {
1332 1344 return (EINVAL);
1333 1345 }
1334 1346
1335 1347 err = mac_maxsdu_update(sc->sc_mac_handle, *new_mtu);
1336 1348 if (err) {
1337 1349 return (err);
1338 1350 }
1339 1351 break;
1340 1352 case MAC_PROP_PRIVATE:
1341 1353 err = vioif_set_prop_private(sc, pr_name,
1342 1354 pr_valsize, pr_val);
1343 1355 if (err)
1344 1356 return (err);
1345 1357 break;
1346 1358 default:
1347 1359 return (ENOTSUP);
1348 1360 }
1349 1361
1350 1362 return (0);
1351 1363 }
1352 1364
1353 1365 static int
1354 1366 vioif_get_prop_private(struct vioif_softc *sc, const char *pr_name,
1355 1367 uint_t pr_valsize, void *pr_val)
1356 1368 {
1357 1369 int err = ENOTSUP;
1358 1370 int value;
1359 1371
1360 1372 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1361 1373
1362 1374 value = sc->sc_txcopy_thresh;
1363 1375 err = 0;
1364 1376 goto done;
1365 1377 }
1366 1378 if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1367 1379
1368 1380 value = sc->sc_rxcopy_thresh;
1369 1381 err = 0;
1370 1382 goto done;
1371 1383 }
1372 1384 done:
1373 1385 if (err == 0) {
1374 1386 (void) snprintf(pr_val, pr_valsize, "%d", value);
1375 1387 }
1376 1388 return (err);
1377 1389 }
1378 1390
1379 1391 static int
1380 1392 vioif_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1381 1393 uint_t pr_valsize, void *pr_val)
1382 1394 {
1383 1395 struct vioif_softc *sc = arg;
1384 1396 int err = ENOTSUP;
1385 1397
1386 1398 switch (pr_num) {
1387 1399 case MAC_PROP_PRIVATE:
1388 1400 err = vioif_get_prop_private(sc, pr_name,
1389 1401 pr_valsize, pr_val);
1390 1402 break;
1391 1403 default:
1392 1404 break;
1393 1405 }
1394 1406 return (err);
1395 1407 }
1396 1408
1397 1409 static void
1398 1410 vioif_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1399 1411 mac_prop_info_handle_t prh)
1400 1412 {
1401 1413 struct vioif_softc *sc = arg;
1402 1414 char valstr[64];
|
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
1403 1415 int value;
1404 1416
1405 1417 switch (pr_num) {
1406 1418 case MAC_PROP_MTU:
1407 1419 mac_prop_info_set_range_uint32(prh, ETHERMIN, MAX_MTU);
1408 1420 break;
1409 1421
1410 1422 case MAC_PROP_PRIVATE:
1411 1423 bzero(valstr, sizeof (valstr));
1412 1424 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1413 -
1414 1425 value = sc->sc_txcopy_thresh;
1415 - } else if (strcmp(pr_name,
1416 - vioif_rxcopy_thresh) == 0) {
1426 + } else if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1417 1427 value = sc->sc_rxcopy_thresh;
1418 1428 } else {
1419 1429 return;
1420 1430 }
1421 1431 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1422 1432 break;
1423 1433
1424 1434 default:
1425 1435 break;
1426 1436 }
1427 1437 }
1428 1438
1429 1439 static boolean_t
1430 1440 vioif_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1431 1441 {
1432 1442 struct vioif_softc *sc = arg;
1433 1443
1434 1444 switch (cap) {
1435 1445 case MAC_CAPAB_HCKSUM:
1436 1446 if (sc->sc_tx_csum) {
1437 1447 uint32_t *txflags = cap_data;
1438 1448
1439 1449 *txflags = HCKSUM_INET_PARTIAL;
1440 1450 return (B_TRUE);
1441 1451 }
1442 1452 return (B_FALSE);
1443 1453 case MAC_CAPAB_LSO:
1444 1454 if (sc->sc_tx_tso4) {
1445 1455 mac_capab_lso_t *cap_lso = cap_data;
1446 1456
1447 1457 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1448 1458 cap_lso->lso_basic_tcp_ipv4.lso_max = MAX_MTU;
1449 1459 return (B_TRUE);
1450 1460 }
1451 1461 return (B_FALSE);
1452 1462 default:
1453 1463 break;
1454 1464 }
1455 1465 return (B_FALSE);
1456 1466 }
1457 1467
1458 1468 static mac_callbacks_t vioif_m_callbacks = {
1459 1469 .mc_callbacks = (MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO),
1460 1470 .mc_getstat = vioif_stat,
1461 1471 .mc_start = vioif_start,
1462 1472 .mc_stop = vioif_stop,
1463 1473 .mc_setpromisc = vioif_promisc,
1464 1474 .mc_multicst = vioif_multicst,
1465 1475 .mc_unicst = vioif_unicst,
1466 1476 .mc_tx = vioif_tx,
1467 1477 /* Optional callbacks */
1468 1478 .mc_reserved = NULL, /* reserved */
1469 1479 .mc_ioctl = NULL, /* mc_ioctl */
1470 1480 .mc_getcapab = vioif_getcapab, /* mc_getcapab */
1471 1481 .mc_open = NULL, /* mc_open */
1472 1482 .mc_close = NULL, /* mc_close */
1473 1483 .mc_setprop = vioif_setprop,
1474 1484 .mc_getprop = vioif_getprop,
1475 1485 .mc_propinfo = vioif_propinfo,
1476 1486 };
1477 1487
1478 1488 static void
1479 1489 vioif_show_features(struct vioif_softc *sc, const char *prefix,
1480 1490 uint32_t features)
1481 1491 {
|
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
1482 1492 char buf[512];
1483 1493 char *bufp = buf;
1484 1494 char *bufend = buf + sizeof (buf);
1485 1495
1486 1496 /* LINTED E_PTRDIFF_OVERFLOW */
1487 1497 bufp += snprintf(bufp, bufend - bufp, prefix);
1488 1498 /* LINTED E_PTRDIFF_OVERFLOW */
1489 1499 bufp += virtio_show_features(features, bufp, bufend - bufp);
1490 1500 *bufp = '\0';
1491 1501
1492 -
1493 1502 /* Using '!' to only CE_NOTE this to the system log. */
1494 1503 dev_err(sc->sc_dev, CE_NOTE, "!%s Vioif (%b)", buf, features,
1495 1504 VIRTIO_NET_FEATURE_BITS);
1496 1505 }
1497 1506
1498 1507 /*
1499 1508 * Find out which features are supported by the device and
1500 1509 * choose which ones we wish to use.
1501 1510 */
1502 1511 static int
1503 1512 vioif_dev_features(struct vioif_softc *sc)
1504 1513 {
1505 1514 uint32_t host_features;
1506 1515
1507 1516 host_features = virtio_negotiate_features(&sc->sc_virtio,
1508 1517 VIRTIO_NET_F_CSUM |
1509 1518 VIRTIO_NET_F_HOST_TSO4 |
1510 1519 VIRTIO_NET_F_HOST_ECN |
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1511 1520 VIRTIO_NET_F_MAC |
1512 1521 VIRTIO_NET_F_STATUS |
1513 1522 VIRTIO_F_RING_INDIRECT_DESC |
1514 1523 VIRTIO_F_NOTIFY_ON_EMPTY);
1515 1524
1516 1525 vioif_show_features(sc, "Host features: ", host_features);
1517 1526 vioif_show_features(sc, "Negotiated features: ",
1518 1527 sc->sc_virtio.sc_features);
1519 1528
1520 1529 if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
1521 - dev_err(sc->sc_dev, CE_NOTE,
1522 - "Host does not support RING_INDIRECT_DESC, bye.");
1530 + dev_err(sc->sc_dev, CE_WARN,
1531 + "Host does not support RING_INDIRECT_DESC. Cannot attach.");
1523 1532 return (DDI_FAILURE);
1524 1533 }
1525 1534
1526 1535 return (DDI_SUCCESS);
1527 1536 }
1528 1537
1529 1538 static int
1530 1539 vioif_has_feature(struct vioif_softc *sc, uint32_t feature)
1531 1540 {
1532 1541 return (virtio_has_feature(&sc->sc_virtio, feature));
1533 1542 }
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1534 1543
1535 1544 static void
1536 1545 vioif_set_mac(struct vioif_softc *sc)
1537 1546 {
1538 1547 int i;
1539 1548
1540 1549 for (i = 0; i < ETHERADDRL; i++) {
1541 1550 virtio_write_device_config_1(&sc->sc_virtio,
1542 1551 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
1543 1552 }
1553 + sc->sc_mac_from_host = 0;
1544 1554 }
1545 1555
1546 1556 /* Get the mac address out of the hardware, or make up one. */
1547 1557 static void
1548 1558 vioif_get_mac(struct vioif_softc *sc)
1549 1559 {
1550 1560 int i;
1551 1561 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_MAC) {
1552 1562 for (i = 0; i < ETHERADDRL; i++) {
1553 1563 sc->sc_mac[i] = virtio_read_device_config_1(
1554 1564 &sc->sc_virtio,
1555 1565 VIRTIO_NET_CONFIG_MAC + i);
1556 1566 }
1557 - dev_err(sc->sc_dev, CE_NOTE, "Got MAC address from host: %s",
1558 - ether_sprintf((struct ether_addr *)sc->sc_mac));
1567 + sc->sc_mac_from_host = 1;
1559 1568 } else {
1560 1569 /* Get a few random bytes */
1561 1570 (void) random_get_pseudo_bytes(sc->sc_mac, ETHERADDRL);
1562 1571 /* Make sure it's a unicast MAC */
1563 1572 sc->sc_mac[0] &= ~1;
1564 1573 /* Set the "locally administered" bit */
1565 1574 sc->sc_mac[1] |= 2;
1566 1575
1567 1576 vioif_set_mac(sc);
1568 1577
1569 1578 dev_err(sc->sc_dev, CE_NOTE,
1570 - "Generated a random MAC address: %s",
1579 + "!Generated a random MAC address: %s",
1571 1580 ether_sprintf((struct ether_addr *)sc->sc_mac));
1572 1581 }
1573 1582 }
1574 1583
1575 1584 /*
1576 1585 * Virtqueue interrupt handlers
1577 1586 */
1578 1587 /* ARGSUSED */
1579 1588 uint_t
1580 1589 vioif_rx_handler(caddr_t arg1, caddr_t arg2)
1581 1590 {
1582 1591 struct virtio_softc *vsc = (void *) arg1;
1583 1592 struct vioif_softc *sc = container_of(vsc,
1584 1593 struct vioif_softc, sc_virtio);
1585 1594
1586 1595 /*
1587 1596 * The return values of these functions are not needed but they make
1588 1597 * debugging interrupts simpler because you can use them to detect when
1589 1598 * stuff was processed and repopulated in this handler.
1590 1599 */
1591 1600 (void) vioif_process_rx(sc);
1592 1601 (void) vioif_populate_rx(sc, KM_NOSLEEP);
1593 1602
1594 1603 return (DDI_INTR_CLAIMED);
1595 1604 }
1596 1605
1597 1606 /* ARGSUSED */
1598 1607 uint_t
1599 1608 vioif_tx_handler(caddr_t arg1, caddr_t arg2)
1600 1609 {
1601 1610 struct virtio_softc *vsc = (void *)arg1;
1602 1611 struct vioif_softc *sc = container_of(vsc,
1603 1612 struct vioif_softc, sc_virtio);
1604 1613
1605 1614 /*
1606 1615 * The return value of this function is not needed but makes debugging
1607 1616 * interrupts simpler because you can use it to detect if anything was
1608 1617 * reclaimed in this handler.
1609 1618 */
1610 1619 (void) vioif_reclaim_used_tx(sc);
1611 1620
1612 1621 return (DDI_INTR_CLAIMED);
1613 1622 }
1614 1623
1615 1624 static int
1616 1625 vioif_register_ints(struct vioif_softc *sc)
1617 1626 {
1618 1627 int ret;
1619 1628
1620 1629 struct virtio_int_handler vioif_vq_h[] = {
1621 1630 { vioif_rx_handler },
1622 1631 { vioif_tx_handler },
1623 1632 { NULL }
1624 1633 };
1625 1634
1626 1635 ret = virtio_register_ints(&sc->sc_virtio, NULL, vioif_vq_h);
1627 1636
1628 1637 return (ret);
1629 1638 }
1630 1639
1631 1640
1632 1641 static void
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
1633 1642 vioif_check_features(struct vioif_softc *sc)
1634 1643 {
1635 1644 if (vioif_has_feature(sc, VIRTIO_NET_F_CSUM)) {
1636 1645 /* The GSO/GRO featured depend on CSUM, check them here. */
1637 1646 sc->sc_tx_csum = 1;
1638 1647 sc->sc_rx_csum = 1;
1639 1648
1640 1649 if (!vioif_has_feature(sc, VIRTIO_NET_F_GUEST_CSUM)) {
1641 1650 sc->sc_rx_csum = 0;
1642 1651 }
1643 - cmn_err(CE_NOTE, "Csum enabled.");
1652 + dev_err(sc->sc_dev, CE_NOTE, "!Csum enabled.");
1644 1653
1645 1654 if (vioif_has_feature(sc, VIRTIO_NET_F_HOST_TSO4)) {
1646 1655
1647 1656 sc->sc_tx_tso4 = 1;
1648 1657 /*
1649 1658 * We don't seem to have a way to ask the system
1650 1659 * not to send us LSO packets with Explicit
1651 1660 * Congestion Notification bit set, so we require
1652 1661 * the device to support it in order to do
1653 1662 * LSO.
1654 1663 */
1655 1664 if (!vioif_has_feature(sc, VIRTIO_NET_F_HOST_ECN)) {
1656 1665 dev_err(sc->sc_dev, CE_NOTE,
1657 - "TSO4 supported, but not ECN. "
1666 + "!TSO4 supported, but not ECN. "
1658 1667 "Not using LSO.");
1659 1668 sc->sc_tx_tso4 = 0;
1660 1669 } else {
1661 - cmn_err(CE_NOTE, "LSO enabled");
1670 + dev_err(sc->sc_dev, CE_NOTE, "!LSO enabled");
1662 1671 }
1663 1672 }
1664 1673 }
1665 1674 }
1666 1675
1667 1676 static int
1668 1677 vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1669 1678 {
1670 1679 int ret, instance;
1671 1680 struct vioif_softc *sc;
1672 1681 struct virtio_softc *vsc;
1673 1682 mac_register_t *macp;
1674 1683 char cache_name[CACHE_NAME_SIZE];
1675 1684
1676 1685 instance = ddi_get_instance(devinfo);
1677 1686
1678 1687 switch (cmd) {
1679 1688 case DDI_ATTACH:
1680 1689 break;
1681 1690
1682 1691 case DDI_RESUME:
1683 1692 case DDI_PM_RESUME:
1684 1693 /* We do not support suspend/resume for vioif. */
1685 1694 goto exit;
1686 1695
1687 1696 default:
1688 1697 goto exit;
1689 1698 }
1690 1699
1691 1700 sc = kmem_zalloc(sizeof (struct vioif_softc), KM_SLEEP);
1692 1701 ddi_set_driver_private(devinfo, sc);
1693 1702
1694 1703 vsc = &sc->sc_virtio;
1695 1704
1696 1705 /* Duplicate for less typing */
1697 1706 sc->sc_dev = devinfo;
1698 1707 vsc->sc_dev = devinfo;
1699 1708
1700 1709 /*
1701 1710 * Initialize interrupt kstat.
1702 1711 */
1703 1712 sc->sc_intrstat = kstat_create("vioif", instance, "intr", "controller",
1704 1713 KSTAT_TYPE_INTR, 1, 0);
1705 1714 if (sc->sc_intrstat == NULL) {
1706 1715 dev_err(devinfo, CE_WARN, "kstat_create failed");
1707 1716 goto exit_intrstat;
1708 1717 }
1709 1718 kstat_install(sc->sc_intrstat);
1710 1719
1711 1720 /* map BAR 0 */
1712 1721 ret = ddi_regs_map_setup(devinfo, 1,
1713 1722 (caddr_t *)&sc->sc_virtio.sc_io_addr,
1714 1723 0, 0, &vioif_attr, &sc->sc_virtio.sc_ioh);
1715 1724 if (ret != DDI_SUCCESS) {
1716 1725 dev_err(devinfo, CE_WARN, "unable to map bar 0: %d", ret);
1717 1726 goto exit_map;
1718 1727 }
1719 1728
1720 1729 virtio_device_reset(&sc->sc_virtio);
1721 1730 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
1722 1731 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
1723 1732
1724 1733 ret = vioif_dev_features(sc);
1725 1734 if (ret)
1726 1735 goto exit_features;
1727 1736
1728 1737 vsc->sc_nvqs = vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1729 1738
1730 1739 (void) snprintf(cache_name, CACHE_NAME_SIZE, "vioif%d_rx", instance);
1731 1740 sc->sc_rxbuf_cache = kmem_cache_create(cache_name,
1732 1741 sizeof (struct vioif_rx_buf), 0, vioif_rx_construct,
1733 1742 vioif_rx_destruct, NULL, sc, NULL, KM_SLEEP);
1734 1743 if (sc->sc_rxbuf_cache == NULL) {
1735 1744 dev_err(sc->sc_dev, CE_WARN, "Can't allocate the buffer cache");
1736 1745 goto exit_cache;
1737 1746 }
1738 1747
1739 1748 ret = vioif_register_ints(sc);
1740 1749 if (ret) {
1741 1750 dev_err(sc->sc_dev, CE_WARN,
1742 1751 "Failed to allocate interrupt(s)!");
1743 1752 goto exit_ints;
1744 1753 }
1745 1754
1746 1755 /*
1747 1756 * Register layout determined, can now access the
1748 1757 * device-specific bits
1749 1758 */
1750 1759 vioif_get_mac(sc);
1751 1760
1752 1761 sc->sc_rx_vq = virtio_alloc_vq(&sc->sc_virtio, 0,
1753 1762 VIOIF_RX_QLEN, VIOIF_INDIRECT_MAX, "rx");
1754 1763 if (!sc->sc_rx_vq)
1755 1764 goto exit_alloc1;
1756 1765 virtio_stop_vq_intr(sc->sc_rx_vq);
1757 1766
1758 1767 sc->sc_tx_vq = virtio_alloc_vq(&sc->sc_virtio, 1,
1759 1768 VIOIF_TX_QLEN, VIOIF_INDIRECT_MAX, "tx");
1760 1769 if (!sc->sc_tx_vq)
1761 1770 goto exit_alloc2;
1762 1771 virtio_stop_vq_intr(sc->sc_tx_vq);
1763 1772
1764 1773 if (vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ)) {
1765 1774 sc->sc_ctrl_vq = virtio_alloc_vq(&sc->sc_virtio, 2,
1766 1775 VIOIF_CTRL_QLEN, 0, "ctrl");
1767 1776 if (!sc->sc_ctrl_vq) {
1768 1777 goto exit_alloc3;
1769 1778 }
1770 1779 virtio_stop_vq_intr(sc->sc_ctrl_vq);
1771 1780 }
1772 1781
1773 1782 virtio_set_status(&sc->sc_virtio,
1774 1783 VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
|
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
1775 1784
1776 1785 sc->sc_rxloan = 0;
1777 1786
1778 1787 /* set some reasonable-small default values */
1779 1788 sc->sc_rxcopy_thresh = 300;
1780 1789 sc->sc_txcopy_thresh = 300;
1781 1790 sc->sc_mtu = ETHERMTU;
1782 1791
1783 1792 vioif_check_features(sc);
1784 1793
1785 - if (vioif_alloc_mems(sc))
1794 + if (vioif_alloc_mems(sc) != 0)
1786 1795 goto exit_alloc_mems;
1787 1796
1788 1797 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1789 1798 dev_err(devinfo, CE_WARN, "Failed to allocate a mac_register");
1790 1799 goto exit_macalloc;
1791 1800 }
1792 1801
1793 1802 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1794 1803 macp->m_driver = sc;
1795 1804 macp->m_dip = devinfo;
1796 1805 macp->m_src_addr = sc->sc_mac;
1797 1806 macp->m_callbacks = &vioif_m_callbacks;
1798 1807 macp->m_min_sdu = 0;
1799 1808 macp->m_max_sdu = sc->sc_mtu;
1800 1809 macp->m_margin = VLAN_TAGSZ;
1801 1810 macp->m_priv_props = vioif_priv_props;
1802 1811
1803 1812 sc->sc_macp = macp;
1804 1813
1805 1814 /* Pre-fill the rx ring. */
1806 1815 (void) vioif_populate_rx(sc, KM_SLEEP);
1807 1816
1808 1817 ret = mac_register(macp, &sc->sc_mac_handle);
1809 1818 if (ret != 0) {
1810 1819 dev_err(devinfo, CE_WARN, "vioif_attach: "
1811 1820 "mac_register() failed, ret=%d", ret);
1812 1821 goto exit_register;
1813 1822 }
1814 1823
1815 1824 ret = virtio_enable_ints(&sc->sc_virtio);
1816 1825 if (ret) {
1817 1826 dev_err(devinfo, CE_WARN, "Failed to enable interrupts");
1818 1827 goto exit_enable_ints;
1819 1828 }
1820 1829
1821 1830 mac_link_update(sc->sc_mac_handle, LINK_STATE_UP);
1822 1831 return (DDI_SUCCESS);
1823 1832
1824 1833 exit_enable_ints:
1825 1834 (void) mac_unregister(sc->sc_mac_handle);
1826 1835 exit_register:
1827 1836 mac_free(macp);
1828 1837 exit_macalloc:
1829 1838 vioif_free_mems(sc);
1830 1839 exit_alloc_mems:
1831 1840 virtio_release_ints(&sc->sc_virtio);
1832 1841 if (sc->sc_ctrl_vq)
1833 1842 virtio_free_vq(sc->sc_ctrl_vq);
1834 1843 exit_alloc3:
1835 1844 virtio_free_vq(sc->sc_tx_vq);
1836 1845 exit_alloc2:
1837 1846 virtio_free_vq(sc->sc_rx_vq);
1838 1847 exit_alloc1:
1839 1848 exit_ints:
1840 1849 kmem_cache_destroy(sc->sc_rxbuf_cache);
1841 1850 exit_cache:
1842 1851 exit_features:
1843 1852 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1844 1853 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1845 1854 exit_intrstat:
1846 1855 exit_map:
1847 1856 kstat_delete(sc->sc_intrstat);
1848 1857 kmem_free(sc, sizeof (struct vioif_softc));
1849 1858 exit:
1850 1859 return (DDI_FAILURE);
1851 1860 }
1852 1861
1853 1862 static int
1854 1863 vioif_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1855 1864 {
1856 1865 struct vioif_softc *sc;
1857 1866
1858 1867 if ((sc = ddi_get_driver_private(devinfo)) == NULL)
1859 1868 return (DDI_FAILURE);
1860 1869
1861 1870 switch (cmd) {
1862 1871 case DDI_DETACH:
|
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
1863 1872 break;
1864 1873
1865 1874 case DDI_PM_SUSPEND:
1866 1875 /* We do not support suspend/resume for vioif. */
1867 1876 return (DDI_FAILURE);
1868 1877
1869 1878 default:
1870 1879 return (DDI_FAILURE);
1871 1880 }
1872 1881
1873 - if (sc->sc_rxloan) {
1882 + if (sc->sc_rxloan > 0) {
1874 1883 dev_err(devinfo, CE_WARN, "!Some rx buffers are still upstream,"
1875 1884 " not detaching.");
1876 1885 return (DDI_FAILURE);
1877 1886 }
1878 1887
1879 1888 virtio_stop_vq_intr(sc->sc_rx_vq);
1880 1889 virtio_stop_vq_intr(sc->sc_tx_vq);
1881 1890
1882 1891 virtio_release_ints(&sc->sc_virtio);
1883 1892
1884 1893 if (mac_unregister(sc->sc_mac_handle)) {
1885 1894 return (DDI_FAILURE);
1886 1895 }
1887 1896
1888 1897 mac_free(sc->sc_macp);
1889 1898
1890 1899 vioif_free_mems(sc);
1891 1900 virtio_free_vq(sc->sc_rx_vq);
1892 1901 virtio_free_vq(sc->sc_tx_vq);
1893 1902
1894 1903 virtio_device_reset(&sc->sc_virtio);
1895 1904
1896 1905 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1897 1906
1898 1907 kmem_cache_destroy(sc->sc_rxbuf_cache);
1899 1908 kstat_delete(sc->sc_intrstat);
1900 1909 kmem_free(sc, sizeof (struct vioif_softc));
1901 1910
1902 1911 return (DDI_SUCCESS);
1903 1912 }
1904 1913
1905 1914 static int
1906 1915 vioif_quiesce(dev_info_t *devinfo)
1907 1916 {
1908 1917 struct vioif_softc *sc;
1909 1918
1910 1919 if ((sc = ddi_get_driver_private(devinfo)) == NULL)
1911 1920 return (DDI_FAILURE);
1912 1921
1913 1922 virtio_stop_vq_intr(sc->sc_rx_vq);
1914 1923 virtio_stop_vq_intr(sc->sc_tx_vq);
1915 1924 virtio_device_reset(&sc->sc_virtio);
1916 1925
1917 1926 return (DDI_SUCCESS);
1918 1927 }
1919 1928
1920 1929 int
1921 1930 _init(void)
1922 1931 {
1923 1932 int ret = 0;
1924 1933
1925 1934 mac_init_ops(&vioif_ops, "vioif");
1926 1935
1927 1936 ret = mod_install(&modlinkage);
1928 1937 if (ret != DDI_SUCCESS) {
1929 1938 mac_fini_ops(&vioif_ops);
1930 1939 return (ret);
1931 1940 }
1932 1941
1933 1942 return (0);
1934 1943 }
1935 1944
1936 1945 int
1937 1946 _fini(void)
1938 1947 {
1939 1948 int ret;
1940 1949
1941 1950 ret = mod_remove(&modlinkage);
1942 1951 if (ret == DDI_SUCCESS) {
1943 1952 mac_fini_ops(&vioif_ops);
1944 1953 }
1945 1954
1946 1955 return (ret);
1947 1956 }
1948 1957
1949 1958 int
1950 1959 _info(struct modinfo *pModinfo)
1951 1960 {
1952 1961 return (mod_info(&modlinkage, pModinfo));
1953 1962 }
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX