Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/vioif/vioif.c
+++ new/usr/src/uts/common/io/vioif/vioif.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 - * Copyright 2013 Nexenta Inc. All rights reserved.
13 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
14 14 * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
15 15 */
16 16
17 17 /* Based on the NetBSD virtio driver by Minoura Makoto. */
18 18 /*
19 19 * Copyright (c) 2010 Minoura Makoto.
20 20 * All rights reserved.
21 21 *
22 22 * Redistribution and use in source and binary forms, with or without
23 23 * modification, are permitted provided that the following conditions
24 24 * are met:
25 25 * 1. Redistributions of source code must retain the above copyright
26 26 * notice, this list of conditions and the following disclaimer.
27 27 * 2. Redistributions in binary form must reproduce the above copyright
28 28 * notice, this list of conditions and the following disclaimer in the
29 29 * documentation and/or other materials provided with the distribution.
30 30 *
31 31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 41 */
42 42
43 43 #include <sys/types.h>
44 44 #include <sys/errno.h>
45 45 #include <sys/param.h>
46 46 #include <sys/stropts.h>
47 47 #include <sys/stream.h>
48 48 #include <sys/strsubr.h>
49 49 #include <sys/kmem.h>
50 50 #include <sys/conf.h>
51 51 #include <sys/devops.h>
52 52 #include <sys/ksynch.h>
53 53 #include <sys/stat.h>
54 54 #include <sys/modctl.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/pci.h>
57 57 #include <sys/ethernet.h>
58 58
59 59 #define VLAN_TAGSZ 4
60 60
61 61 #include <sys/dlpi.h>
62 62 #include <sys/taskq.h>
63 63 #include <sys/cyclic.h>
64 64
65 65 #include <sys/pattr.h>
66 66 #include <sys/strsun.h>
67 67
68 68 #include <sys/random.h>
69 69 #include <sys/sysmacros.h>
70 70 #include <sys/stream.h>
71 71
72 72 #include <sys/mac.h>
73 73 #include <sys/mac_provider.h>
74 74 #include <sys/mac_ether.h>
75 75
76 76 #include "virtiovar.h"
77 77 #include "virtioreg.h"
78 78
79 79 #if !defined(__packed)
80 80 #define __packed __attribute__((packed))
81 81 #endif /* __packed */
82 82
83 83 /* Configuration registers */
84 84 #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
85 85 #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
86 86
87 87 /* Feature bits */
88 88 #define VIRTIO_NET_F_CSUM (1 << 0) /* Host handles pkts w/ partial csum */
89 89 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* Guest handles pkts w/ part csum */
90 90 #define VIRTIO_NET_F_MAC (1 << 5) /* Host has given MAC address. */
91 91 #define VIRTIO_NET_F_GSO (1 << 6) /* Host handles pkts w/ any GSO type */
92 92 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* Guest can handle TSOv4 in. */
93 93 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* Guest can handle TSOv6 in. */
94 94 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* Guest can handle TSO[6] w/ ECN in */
95 95 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* Guest can handle UFO in. */
96 96 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* Host can handle TSOv4 in. */
97 97 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* Host can handle TSOv6 in. */
98 98 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* Host can handle TSO[6] w/ ECN in */
99 99 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* Host can handle UFO in. */
100 100 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* Host can merge receive buffers. */
101 101 #define VIRTIO_NET_F_STATUS (1 << 16) /* Config.status available */
102 102 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* Control channel available */
103 103 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* Control channel RX mode support */
104 104 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* Control channel VLAN filtering */
105 105 #define VIRTIO_NET_F_CTRL_RX_EXTRA (1 << 20) /* Extra RX mode control support */
106 106
107 107 /* Status */
108 108 #define VIRTIO_NET_S_LINK_UP 1
109 109
110 110 /* Packet header structure */
111 111 struct virtio_net_hdr {
112 112 uint8_t flags;
113 113 uint8_t gso_type;
114 114 uint16_t hdr_len;
115 115 uint16_t gso_size;
116 116 uint16_t csum_start;
117 117 uint16_t csum_offset;
118 118 };
119 119
120 120 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
121 121 #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
122 122 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
123 123 #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
124 124 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
125 125 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
126 126
127 127
128 128 /* Control virtqueue */
129 129 struct virtio_net_ctrl_cmd {
130 130 uint8_t class;
131 131 uint8_t command;
132 132 } __packed;
133 133
134 134 #define VIRTIO_NET_CTRL_RX 0
135 135 #define VIRTIO_NET_CTRL_RX_PROMISC 0
136 136 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
137 137
138 138 #define VIRTIO_NET_CTRL_MAC 1
139 139 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
140 140
141 141 #define VIRTIO_NET_CTRL_VLAN 2
142 142 #define VIRTIO_NET_CTRL_VLAN_ADD 0
143 143 #define VIRTIO_NET_CTRL_VLAN_DEL 1
144 144
145 145 struct virtio_net_ctrl_status {
146 146 uint8_t ack;
147 147 } __packed;
148 148
149 149 struct virtio_net_ctrl_rx {
150 150 uint8_t onoff;
151 151 } __packed;
152 152
153 153 struct virtio_net_ctrl_mac_tbl {
154 154 uint32_t nentries;
155 155 uint8_t macs[][ETHERADDRL];
156 156 } __packed;
157 157
158 158 struct virtio_net_ctrl_vlan {
159 159 uint16_t id;
160 160 } __packed;
161 161
162 162 static int vioif_quiesce(dev_info_t *);
163 163 static int vioif_attach(dev_info_t *, ddi_attach_cmd_t);
164 164 static int vioif_detach(dev_info_t *, ddi_detach_cmd_t);
165 165
166 166 DDI_DEFINE_STREAM_OPS(vioif_ops,
167 167 nulldev, /* identify */
168 168 nulldev, /* probe */
169 169 vioif_attach, /* attach */
170 170 vioif_detach, /* detach */
171 171 nodev, /* reset */
172 172 NULL, /* cb_ops */
173 173 D_MP, /* bus_ops */
174 174 NULL, /* power */
175 175 vioif_quiesce /* quiesce */
176 176 );
177 177
178 178 static char vioif_ident[] = "VirtIO ethernet driver";
179 179
180 180 /* Standard Module linkage initialization for a Streams driver */
181 181 extern struct mod_ops mod_driverops;
182 182
183 183 static struct modldrv modldrv = {
184 184 &mod_driverops, /* Type of module. This one is a driver */
185 185 vioif_ident, /* short description */
186 186 &vioif_ops /* driver specific ops */
187 187 };
188 188
189 189 static struct modlinkage modlinkage = {
190 190 MODREV_1,
191 191 {
192 192 (void *)&modldrv,
193 193 NULL,
194 194 },
195 195 };
196 196
197 197 ddi_device_acc_attr_t vioif_attr = {
198 198 DDI_DEVICE_ATTR_V0,
199 199 DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
200 200 DDI_STORECACHING_OK_ACC,
201 201 DDI_DEFAULT_ACC
202 202 };
203 203
204 204 /*
205 205 * A mapping represents a binding for a single buffer that is contiguous in the
206 206 * virtual address space.
207 207 */
208 208 struct vioif_buf_mapping {
209 209 caddr_t vbm_buf;
210 210 ddi_dma_handle_t vbm_dmah;
211 211 ddi_acc_handle_t vbm_acch;
212 212 ddi_dma_cookie_t vbm_dmac;
213 213 unsigned int vbm_ncookies;
214 214 };
215 215
216 216 /*
217 217 * Rx buffers can be loaned upstream, so the code has
218 218 * to allocate them dynamically.
219 219 */
220 220 struct vioif_rx_buf {
221 221 struct vioif_softc *rb_sc;
222 222 frtn_t rb_frtn;
223 223
224 224 struct vioif_buf_mapping rb_mapping;
225 225 };
226 226
227 227 /*
228 228 * Tx buffers have two mapping types. One, "inline", is pre-allocated and is
229 229 * used to hold the virtio_net_header. Small packets also get copied there, as
230 230 * it's faster then mapping them. Bigger packets get mapped using the "external"
231 231 * mapping array. An array is used, because a packet may consist of muptiple
232 232 * fragments, so each fragment gets bound to an entry. According to my
233 233 * observations, the number of fragments does not exceed 2, but just in case,
234 234 * a bigger, up to VIOIF_INDIRECT_MAX - 1 array is allocated. To save resources,
235 235 * the dma handles are allocated lazily in the tx path.
236 236 */
237 237 struct vioif_tx_buf {
238 238 mblk_t *tb_mp;
239 239
240 240 /* inline buffer */
241 241 struct vioif_buf_mapping tb_inline_mapping;
242 242
243 243 /* External buffers */
244 244 struct vioif_buf_mapping *tb_external_mapping;
245 245 unsigned int tb_external_num;
246 246 };
247 247
248 248 struct vioif_softc {
249 249 dev_info_t *sc_dev; /* mirrors virtio_softc->sc_dev */
250 250 struct virtio_softc sc_virtio;
251 251
|
↓ open down ↓ |
228 lines elided |
↑ open up ↑ |
252 252 mac_handle_t sc_mac_handle;
253 253 mac_register_t *sc_macp;
254 254
255 255 struct virtqueue *sc_rx_vq;
256 256 struct virtqueue *sc_tx_vq;
257 257 struct virtqueue *sc_ctrl_vq;
258 258
259 259 unsigned int sc_tx_stopped:1;
260 260
261 261 /* Feature bits. */
262 - unsigned int sc_rx_csum:1;
263 - unsigned int sc_tx_csum:1;
262 + unsigned int sc_rx_csum:1;
263 + unsigned int sc_tx_csum:1;
264 264 unsigned int sc_tx_tso4:1;
265 265
266 - int sc_mtu;
266 + int sc_mtu;
267 267 uint8_t sc_mac[ETHERADDRL];
268 268 /*
269 269 * For rx buffers, we keep a pointer array, because the buffers
270 270 * can be loaned upstream, and we have to repopulate the array with
271 271 * new members.
272 272 */
273 273 struct vioif_rx_buf **sc_rxbufs;
274 274
275 275 /*
276 276 * For tx, we just allocate an array of buffers. The packet can
277 277 * either be copied into the inline buffer, or the external mapping
278 278 * could be used to map the packet
279 279 */
280 280 struct vioif_tx_buf *sc_txbufs;
281 281
282 282 kstat_t *sc_intrstat;
283 283 /*
284 284 * We "loan" rx buffers upstream and reuse them after they are
285 285 * freed. This lets us avoid allocations in the hot path.
286 286 */
287 287 kmem_cache_t *sc_rxbuf_cache;
288 288 ulong_t sc_rxloan;
289 289
290 290 /* Copying small packets turns out to be faster then mapping them. */
291 291 unsigned long sc_rxcopy_thresh;
292 292 unsigned long sc_txcopy_thresh;
293 293 /* Some statistic coming here */
294 294 uint64_t sc_ipackets;
295 295 uint64_t sc_opackets;
296 296 uint64_t sc_rbytes;
297 297 uint64_t sc_obytes;
298 298 uint64_t sc_brdcstxmt;
299 299 uint64_t sc_brdcstrcv;
300 300 uint64_t sc_multixmt;
301 301 uint64_t sc_multircv;
302 302 uint64_t sc_norecvbuf;
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
303 303 uint64_t sc_notxbuf;
304 304 uint64_t sc_ierrors;
305 305 uint64_t sc_oerrors;
306 306 };
307 307
308 308 #define ETHER_HEADER_LEN sizeof (struct ether_header)
309 309
310 310 /* MTU + the ethernet header. */
311 311 #define MAX_PAYLOAD 65535
312 312 #define MAX_MTU (MAX_PAYLOAD - ETHER_HEADER_LEN)
313 -#define DEFAULT_MTU ETHERMTU
313 +#define DEFAULT_MTU ETHERMTU
314 314
315 315 /*
316 316 * Yeah, we spend 8M per device. Turns out, there is no point
317 317 * being smart and using merged rx buffers (VIRTIO_NET_F_MRG_RXBUF),
318 318 * because vhost does not support them, and we expect to be used with
319 319 * vhost in production environment.
320 320 */
321 321 /* The buffer keeps both the packet data and the virtio_net_header. */
322 322 #define VIOIF_RX_SIZE (MAX_PAYLOAD + sizeof (struct virtio_net_hdr))
323 323
324 324 /*
325 325 * We win a bit on header alignment, but the host wins a lot
326 326 * more on moving aligned buffers. Might need more thought.
327 327 */
328 328 #define VIOIF_IP_ALIGN 0
329 329
330 330 /* Maximum number of indirect descriptors, somewhat arbitrary. */
331 331 #define VIOIF_INDIRECT_MAX 128
332 332
333 333 /*
334 334 * We pre-allocate a reasonably large buffer to copy small packets
335 335 * there. Bigger packets are mapped, packets with multiple
336 336 * cookies are mapped as indirect buffers.
337 337 */
338 338 #define VIOIF_TX_INLINE_SIZE 2048
339 339
340 340 /* Native queue size for all queues */
341 341 #define VIOIF_RX_QLEN 0
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
342 342 #define VIOIF_TX_QLEN 0
343 343 #define VIOIF_CTRL_QLEN 0
344 344
345 345 static uchar_t vioif_broadcast[ETHERADDRL] = {
346 346 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
347 347 };
348 348
349 349 #define VIOIF_TX_THRESH_MAX 640
350 350 #define VIOIF_RX_THRESH_MAX 640
351 351
352 -#define CACHE_NAME_SIZE 32
352 +#define CACHE_NAME_SIZE 32
353 353
354 354 static char vioif_txcopy_thresh[] =
355 355 "vioif_txcopy_thresh";
356 356 static char vioif_rxcopy_thresh[] =
357 357 "vioif_rxcopy_thresh";
358 358
359 359 static char *vioif_priv_props[] = {
360 360 vioif_txcopy_thresh,
361 361 vioif_rxcopy_thresh,
362 362 NULL
363 363 };
364 364
365 365 /* Add up to ddi? */
366 366 static ddi_dma_cookie_t *
367 367 vioif_dma_curr_cookie(ddi_dma_handle_t dmah)
368 368 {
369 369 ddi_dma_impl_t *dmah_impl = (void *) dmah;
370 370 ASSERT(dmah_impl->dmai_cookie);
371 371 return (dmah_impl->dmai_cookie);
372 372 }
373 373
374 374 static void
375 375 vioif_dma_reset_cookie(ddi_dma_handle_t dmah, ddi_dma_cookie_t *dmac)
376 376 {
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
377 377 ddi_dma_impl_t *dmah_impl = (void *) dmah;
378 378 dmah_impl->dmai_cookie = dmac;
379 379 }
380 380
381 381 static link_state_t
382 382 vioif_link_state(struct vioif_softc *sc)
383 383 {
384 384 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_STATUS) {
385 385 if (virtio_read_device_config_2(&sc->sc_virtio,
386 386 VIRTIO_NET_CONFIG_STATUS) & VIRTIO_NET_S_LINK_UP) {
387 +
387 388 return (LINK_STATE_UP);
388 389 } else {
389 390 return (LINK_STATE_DOWN);
390 391 }
391 392 }
392 393
393 394 return (LINK_STATE_UP);
394 395 }
395 396
396 397 static ddi_dma_attr_t vioif_inline_buf_dma_attr = {
397 398 DMA_ATTR_V0, /* Version number */
398 399 0, /* low address */
399 400 0xFFFFFFFFFFFFFFFF, /* high address */
400 401 0xFFFFFFFF, /* counter register max */
401 402 1, /* page alignment */
402 403 1, /* burst sizes: 1 - 32 */
403 404 1, /* minimum transfer size */
404 405 0xFFFFFFFF, /* max transfer size */
405 406 0xFFFFFFFFFFFFFFF, /* address register max */
406 407 1, /* scatter-gather capacity */
407 408 1, /* device operates on bytes */
408 409 0, /* attr flag: set to 0 */
409 410 };
410 411
411 412 static ddi_dma_attr_t vioif_mapped_buf_dma_attr = {
412 413 DMA_ATTR_V0, /* Version number */
413 414 0, /* low address */
414 415 0xFFFFFFFFFFFFFFFF, /* high address */
415 416 0xFFFFFFFF, /* counter register max */
416 417 1, /* page alignment */
417 418 1, /* burst sizes: 1 - 32 */
418 419 1, /* minimum transfer size */
419 420 0xFFFFFFFF, /* max transfer size */
420 421 0xFFFFFFFFFFFFFFF, /* address register max */
421 422
422 423 /* One entry is used for the virtio_net_hdr on the tx path */
423 424 VIOIF_INDIRECT_MAX - 1, /* scatter-gather capacity */
424 425 1, /* device operates on bytes */
425 426 0, /* attr flag: set to 0 */
426 427 };
427 428
428 429 static ddi_device_acc_attr_t vioif_bufattr = {
429 430 DDI_DEVICE_ATTR_V0,
430 431 DDI_NEVERSWAP_ACC,
431 432 DDI_STORECACHING_OK_ACC,
432 433 DDI_DEFAULT_ACC
433 434 };
434 435
435 436 static void
436 437 vioif_rx_free(caddr_t free_arg)
437 438 {
438 439 struct vioif_rx_buf *buf = (void *) free_arg;
439 440 struct vioif_softc *sc = buf->rb_sc;
440 441
441 442 kmem_cache_free(sc->sc_rxbuf_cache, buf);
442 443 atomic_dec_ulong(&sc->sc_rxloan);
443 444 }
444 445
445 446 static int
446 447 vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
447 448 {
448 449 _NOTE(ARGUNUSED(kmflags));
449 450 struct vioif_softc *sc = user_arg;
450 451 struct vioif_rx_buf *buf = buffer;
451 452 size_t len;
452 453
453 454 if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_mapped_buf_dma_attr,
454 455 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmah)) {
455 456 dev_err(sc->sc_dev, CE_WARN,
456 457 "Can't allocate dma handle for rx buffer");
457 458 goto exit_handle;
458 459 }
459 460
460 461 if (ddi_dma_mem_alloc(buf->rb_mapping.vbm_dmah,
461 462 VIOIF_RX_SIZE + sizeof (struct virtio_net_hdr),
462 463 &vioif_bufattr, DDI_DMA_STREAMING, DDI_DMA_SLEEP,
463 464 NULL, &buf->rb_mapping.vbm_buf, &len, &buf->rb_mapping.vbm_acch)) {
464 465 dev_err(sc->sc_dev, CE_WARN,
465 466 "Can't allocate rx buffer");
466 467 goto exit_alloc;
467 468 }
468 469 ASSERT(len >= VIOIF_RX_SIZE);
469 470
470 471 if (ddi_dma_addr_bind_handle(buf->rb_mapping.vbm_dmah, NULL,
471 472 buf->rb_mapping.vbm_buf, len, DDI_DMA_READ | DDI_DMA_STREAMING,
472 473 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmac,
473 474 &buf->rb_mapping.vbm_ncookies)) {
474 475 dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer");
475 476
476 477 goto exit_bind;
477 478 }
478 479
479 480 ASSERT(buf->rb_mapping.vbm_ncookies <= VIOIF_INDIRECT_MAX);
480 481
481 482 buf->rb_sc = sc;
482 483 buf->rb_frtn.free_arg = (void *) buf;
483 484 buf->rb_frtn.free_func = vioif_rx_free;
484 485
485 486 return (0);
486 487 exit_bind:
487 488 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
488 489 exit_alloc:
489 490 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
490 491 exit_handle:
491 492
492 493 return (ENOMEM);
493 494 }
494 495
495 496 static void
496 497 vioif_rx_destruct(void *buffer, void *user_arg)
497 498 {
498 499 _NOTE(ARGUNUSED(user_arg));
499 500 struct vioif_rx_buf *buf = buffer;
500 501
501 502 ASSERT(buf->rb_mapping.vbm_acch);
502 503 ASSERT(buf->rb_mapping.vbm_acch);
503 504
504 505 (void) ddi_dma_unbind_handle(buf->rb_mapping.vbm_dmah);
505 506 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
506 507 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
507 508 }
508 509
509 510 static void
510 511 vioif_free_mems(struct vioif_softc *sc)
511 512 {
512 513 int i;
513 514
514 515 for (i = 0; i < sc->sc_tx_vq->vq_num; i++) {
515 516 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
516 517 int j;
517 518
518 519 /* Tear down the internal mapping. */
519 520
520 521 ASSERT(buf->tb_inline_mapping.vbm_acch);
521 522 ASSERT(buf->tb_inline_mapping.vbm_dmah);
522 523
523 524 (void) ddi_dma_unbind_handle(buf->tb_inline_mapping.vbm_dmah);
524 525 ddi_dma_mem_free(&buf->tb_inline_mapping.vbm_acch);
525 526 ddi_dma_free_handle(&buf->tb_inline_mapping.vbm_dmah);
526 527
527 528 /* We should not see any in-flight buffers at this point. */
528 529 ASSERT(!buf->tb_mp);
529 530
530 531 /* Free all the dma hdnales we allocated lazily. */
531 532 for (j = 0; buf->tb_external_mapping[j].vbm_dmah; j++)
532 533 ddi_dma_free_handle(
533 534 &buf->tb_external_mapping[j].vbm_dmah);
534 535 /* Free the external mapping array. */
535 536 kmem_free(buf->tb_external_mapping,
536 537 sizeof (struct vioif_tx_buf) * VIOIF_INDIRECT_MAX - 1);
537 538 }
538 539
539 540 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) *
540 541 sc->sc_tx_vq->vq_num);
541 542
542 543 for (i = 0; i < sc->sc_rx_vq->vq_num; i++) {
543 544 struct vioif_rx_buf *buf = sc->sc_rxbufs[i];
544 545
545 546 if (buf)
546 547 kmem_cache_free(sc->sc_rxbuf_cache, buf);
547 548 }
548 549 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf *) *
549 550 sc->sc_rx_vq->vq_num);
550 551 }
551 552
552 553 static int
553 554 vioif_alloc_mems(struct vioif_softc *sc)
554 555 {
555 556 int i, txqsize, rxqsize;
556 557 size_t len;
557 558 unsigned int nsegments;
558 559
559 560 txqsize = sc->sc_tx_vq->vq_num;
560 561 rxqsize = sc->sc_rx_vq->vq_num;
561 562
562 563 sc->sc_txbufs = kmem_zalloc(sizeof (struct vioif_tx_buf) * txqsize,
563 564 KM_SLEEP);
564 565 if (sc->sc_txbufs == NULL) {
565 566 dev_err(sc->sc_dev, CE_WARN,
566 567 "Failed to allocate the tx buffers array");
567 568 goto exit_txalloc;
568 569 }
569 570
570 571 /*
571 572 * We don't allocate the rx vioif_bufs, just the pointers, as
572 573 * rx vioif_bufs can be loaned upstream, and we don't know the
573 574 * total number we need.
574 575 */
575 576 sc->sc_rxbufs = kmem_zalloc(sizeof (struct vioif_rx_buf *) * rxqsize,
576 577 KM_SLEEP);
577 578 if (sc->sc_rxbufs == NULL) {
578 579 dev_err(sc->sc_dev, CE_WARN,
579 580 "Failed to allocate the rx buffers pointer array");
580 581 goto exit_rxalloc;
581 582 }
582 583
583 584 for (i = 0; i < txqsize; i++) {
584 585 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
585 586
586 587 /* Allocate and bind an inline mapping. */
587 588
588 589 if (ddi_dma_alloc_handle(sc->sc_dev,
589 590 &vioif_inline_buf_dma_attr,
590 591 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_dmah)) {
591 592
592 593 dev_err(sc->sc_dev, CE_WARN,
593 594 "Can't allocate dma handle for tx buffer %d", i);
594 595 goto exit_tx;
595 596 }
596 597
597 598 if (ddi_dma_mem_alloc(buf->tb_inline_mapping.vbm_dmah,
598 599 VIOIF_TX_INLINE_SIZE, &vioif_bufattr, DDI_DMA_STREAMING,
599 600 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_buf,
600 601 &len, &buf->tb_inline_mapping.vbm_acch)) {
601 602
602 603 dev_err(sc->sc_dev, CE_WARN,
603 604 "Can't allocate tx buffer %d", i);
604 605 goto exit_tx;
605 606 }
606 607 ASSERT(len >= VIOIF_TX_INLINE_SIZE);
607 608
608 609 if (ddi_dma_addr_bind_handle(buf->tb_inline_mapping.vbm_dmah,
609 610 NULL, buf->tb_inline_mapping.vbm_buf, len,
610 611 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
611 612 &buf->tb_inline_mapping.vbm_dmac, &nsegments)) {
612 613
613 614 dev_err(sc->sc_dev, CE_WARN,
614 615 "Can't bind tx buffer %d", i);
615 616 goto exit_tx;
616 617 }
617 618
618 619 /* We asked for a single segment */
619 620 ASSERT(nsegments == 1);
620 621
621 622 /*
622 623 * We allow up to VIOIF_INDIRECT_MAX - 1 external mappings.
623 624 * In reality, I don't expect more then 2-3 used, but who
624 625 * knows.
625 626 */
626 627 buf->tb_external_mapping = kmem_zalloc(
627 628 sizeof (struct vioif_tx_buf) * VIOIF_INDIRECT_MAX - 1,
628 629 KM_SLEEP);
629 630
630 631 /*
631 632 * The external mapping's dma handles are allocate lazily,
632 633 * as we don't expect most of them to be used..
633 634 */
634 635 }
635 636
636 637 return (0);
637 638
638 639 exit_tx:
639 640 for (i = 0; i < txqsize; i++) {
640 641 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
641 642
642 643 if (buf->tb_inline_mapping.vbm_dmah)
643 644 (void) ddi_dma_unbind_handle(
644 645 buf->tb_inline_mapping.vbm_dmah);
645 646
646 647 if (buf->tb_inline_mapping.vbm_acch)
647 648 ddi_dma_mem_free(
648 649 &buf->tb_inline_mapping.vbm_acch);
649 650
650 651 if (buf->tb_inline_mapping.vbm_dmah)
651 652 ddi_dma_free_handle(
652 653 &buf->tb_inline_mapping.vbm_dmah);
653 654
654 655 if (buf->tb_external_mapping)
655 656 kmem_free(buf->tb_external_mapping,
656 657 sizeof (struct vioif_tx_buf) *
657 658 VIOIF_INDIRECT_MAX - 1);
658 659 }
659 660
660 661 kmem_free(sc->sc_rxbufs, sizeof (struct vioif_rx_buf) * rxqsize);
661 662
662 663 exit_rxalloc:
663 664 kmem_free(sc->sc_txbufs, sizeof (struct vioif_tx_buf) * txqsize);
664 665 exit_txalloc:
665 666 return (ENOMEM);
666 667 }
667 668
668 669 /* ARGSUSED */
669 670 int
670 671 vioif_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
671 672 {
672 673 return (DDI_SUCCESS);
673 674 }
674 675
675 676 /* ARGSUSED */
676 677 int
677 678 vioif_promisc(void *arg, boolean_t on)
678 679 {
679 680 return (DDI_SUCCESS);
680 681 }
681 682
682 683 /* ARGSUSED */
683 684 int
684 685 vioif_unicst(void *arg, const uint8_t *macaddr)
685 686 {
686 687 return (DDI_FAILURE);
687 688 }
|
↓ open down ↓ |
291 lines elided |
↑ open up ↑ |
688 689
689 690
690 691 static int
691 692 vioif_add_rx(struct vioif_softc *sc, int kmflag)
692 693 {
693 694 struct vq_entry *ve;
694 695 struct vioif_rx_buf *buf;
695 696
696 697 ve = vq_alloc_entry(sc->sc_rx_vq);
697 698 if (!ve) {
698 - /*
699 - * Out of free descriptors - ring already full.
700 - * It would be better to update sc_norxdescavail
701 - * but MAC does not ask for this info, hence we
702 - * update sc_norecvbuf.
699 + /* Out of free descriptors - ring already full.
700 + * would be better to update sc_norxdescavail
701 + * but MAC does not ask for this info
702 + * hence update sc_norecvbuf
703 703 */
704 704 sc->sc_norecvbuf++;
705 705 goto exit_vq;
706 706 }
707 707 buf = sc->sc_rxbufs[ve->qe_index];
708 708
709 709 if (!buf) {
710 710 /* First run, allocate the buffer. */
711 711 buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag);
712 712 sc->sc_rxbufs[ve->qe_index] = buf;
713 713 }
714 714
715 715 /* Still nothing? Bye. */
716 716 if (!buf) {
717 717 dev_err(sc->sc_dev, CE_WARN, "Can't allocate rx buffer");
718 718 sc->sc_norecvbuf++;
719 719 goto exit_buf;
720 720 }
721 721
722 722 ASSERT(buf->rb_mapping.vbm_ncookies >= 1);
723 723
724 724 /*
725 725 * For an unknown reason, the virtio_net_hdr must be placed
726 726 * as a separate virtio queue entry.
727 727 */
728 728 virtio_ve_add_indirect_buf(ve, buf->rb_mapping.vbm_dmac.dmac_laddress,
729 729 sizeof (struct virtio_net_hdr), B_FALSE);
730 730
731 731 /* Add the rest of the first cookie. */
732 732 virtio_ve_add_indirect_buf(ve,
733 733 buf->rb_mapping.vbm_dmac.dmac_laddress +
734 734 sizeof (struct virtio_net_hdr),
735 735 buf->rb_mapping.vbm_dmac.dmac_size -
736 736 sizeof (struct virtio_net_hdr), B_FALSE);
737 737
738 738 /*
739 739 * If the buffer consists of a single cookie (unlikely for a
740 740 * 64-k buffer), we are done. Otherwise, add the rest of the cookies
741 741 * using indirect entries.
742 742 */
743 743 if (buf->rb_mapping.vbm_ncookies > 1) {
744 744 ddi_dma_cookie_t *first_extra_dmac;
745 745 ddi_dma_cookie_t dmac;
746 746 first_extra_dmac =
747 747 vioif_dma_curr_cookie(buf->rb_mapping.vbm_dmah);
748 748
749 749 ddi_dma_nextcookie(buf->rb_mapping.vbm_dmah, &dmac);
750 750 virtio_ve_add_cookie(ve, buf->rb_mapping.vbm_dmah,
751 751 dmac, buf->rb_mapping.vbm_ncookies - 1, B_FALSE);
752 752 vioif_dma_reset_cookie(buf->rb_mapping.vbm_dmah,
753 753 first_extra_dmac);
754 754 }
755 755
756 756 virtio_push_chain(ve, B_FALSE);
757 757
758 758 return (DDI_SUCCESS);
759 759
760 760 exit_buf:
761 761 vq_free_entry(sc->sc_rx_vq, ve);
762 762 exit_vq:
763 763 return (DDI_FAILURE);
764 764 }
765 765
766 766 static int
767 767 vioif_populate_rx(struct vioif_softc *sc, int kmflag)
768 768 {
769 769 int i = 0;
770 770 int ret;
771 771
772 772 for (;;) {
773 773 ret = vioif_add_rx(sc, kmflag);
774 774 if (ret)
775 775 /*
776 776 * We could not allocate some memory. Try to work with
777 777 * what we've got.
778 778 */
779 779 break;
780 780 i++;
781 781 }
782 782
783 783 if (i)
784 784 virtio_sync_vq(sc->sc_rx_vq);
785 785
786 786 return (i);
787 787 }
788 788
789 789 static int
790 790 vioif_process_rx(struct vioif_softc *sc)
791 791 {
792 792 struct vq_entry *ve;
793 793 struct vioif_rx_buf *buf;
794 794 mblk_t *mp;
795 795 uint32_t len;
796 796 int i = 0;
797 797
798 798 while ((ve = virtio_pull_chain(sc->sc_rx_vq, &len))) {
799 799
800 800 buf = sc->sc_rxbufs[ve->qe_index];
801 801 ASSERT(buf);
802 802
803 803 if (len < sizeof (struct virtio_net_hdr)) {
804 804 dev_err(sc->sc_dev, CE_WARN, "RX: Cnain too small: %u",
805 805 len - (uint32_t)sizeof (struct virtio_net_hdr));
806 806 sc->sc_ierrors++;
807 807 virtio_free_chain(ve);
808 808 continue;
809 809 }
810 810
811 811 len -= sizeof (struct virtio_net_hdr);
812 812 /*
813 813 * We copy small packets that happenned to fit into a single
814 814 * cookie and reuse the buffers. For bigger ones, we loan
815 815 * the buffers upstream.
816 816 */
817 817 if (len < sc->sc_rxcopy_thresh) {
818 818 mp = allocb(len, 0);
819 819 if (!mp) {
820 820 sc->sc_norecvbuf++;
821 821 sc->sc_ierrors++;
822 822
823 823 virtio_free_chain(ve);
824 824 break;
825 825 }
826 826
827 827 bcopy((char *)buf->rb_mapping.vbm_buf +
828 828 sizeof (struct virtio_net_hdr), mp->b_rptr, len);
829 829 mp->b_wptr = mp->b_rptr + len;
830 830
831 831 } else {
832 832 mp = desballoc((unsigned char *)
833 833 buf->rb_mapping.vbm_buf +
834 834 sizeof (struct virtio_net_hdr) +
835 835 VIOIF_IP_ALIGN, len, 0, &buf->rb_frtn);
836 836 if (!mp) {
|
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
837 837 sc->sc_norecvbuf++;
838 838 sc->sc_ierrors++;
839 839
840 840 virtio_free_chain(ve);
841 841 break;
842 842 }
843 843 mp->b_wptr = mp->b_rptr + len;
844 844
845 845 atomic_inc_ulong(&sc->sc_rxloan);
846 846 /*
847 - * Buffer loaned, we will have to allocte a new one
847 + * Buffer loanded, we will have to allocate a new one
848 848 * for this slot.
849 849 */
850 850 sc->sc_rxbufs[ve->qe_index] = NULL;
851 851 }
852 -
853 - /*
854 - * virtio-net does not tell us if this packet is multicast
855 - * or broadcast, so we have to check it.
852 + /* virtio-net does not provide the info if this packet
853 + * is multicast or broadcast. So we have to check it
856 854 */
857 855 if (mp->b_rptr[0] & 0x1) {
858 856 if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
859 857 sc->sc_multircv++;
860 858 else
861 859 sc->sc_brdcstrcv++;
862 860 }
863 861
864 862 sc->sc_rbytes += len;
865 863 sc->sc_ipackets++;
866 864
867 865 virtio_free_chain(ve);
868 866 mac_rx(sc->sc_mac_handle, NULL, mp);
869 867 i++;
870 868 }
871 869
872 870 return (i);
873 871 }
874 872
875 873 static void
876 874 vioif_reclaim_used_tx(struct vioif_softc *sc)
877 875 {
878 876 struct vq_entry *ve;
879 877 struct vioif_tx_buf *buf;
880 878 uint32_t len;
881 879 mblk_t *mp;
882 880 int i = 0;
883 881
884 882 while ((ve = virtio_pull_chain(sc->sc_tx_vq, &len))) {
885 883 /* We don't chain descriptors for tx, so don't expect any. */
886 884 ASSERT(!ve->qe_next);
887 885
888 886 buf = &sc->sc_txbufs[ve->qe_index];
889 887 mp = buf->tb_mp;
890 888 buf->tb_mp = NULL;
891 889
892 890 if (mp) {
893 891 for (i = 0; i < buf->tb_external_num; i++)
894 892 (void) ddi_dma_unbind_handle(
895 893 buf->tb_external_mapping[i].vbm_dmah);
896 894 }
897 895
898 896 virtio_free_chain(ve);
899 897
900 898 /* External mapping used, mp was not freed in vioif_send() */
901 899 if (mp)
902 900 freemsg(mp);
903 901 i++;
904 902 }
905 903
906 904 if (sc->sc_tx_stopped && i) {
907 905 sc->sc_tx_stopped = 0;
908 906 mac_tx_update(sc->sc_mac_handle);
909 907 }
910 908 }
911 909
912 910 /* sc will be used to update stat counters. */
913 911 /* ARGSUSED */
914 912 static inline void
915 913 vioif_tx_inline(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
916 914 size_t msg_size)
917 915 {
918 916 struct vioif_tx_buf *buf;
919 917 buf = &sc->sc_txbufs[ve->qe_index];
920 918
921 919 ASSERT(buf);
922 920
923 921 /* Frees mp */
924 922 mcopymsg(mp, buf->tb_inline_mapping.vbm_buf +
925 923 sizeof (struct virtio_net_hdr));
926 924
927 925 virtio_ve_add_indirect_buf(ve,
928 926 buf->tb_inline_mapping.vbm_dmac.dmac_laddress +
929 927 sizeof (struct virtio_net_hdr), msg_size, B_TRUE);
930 928 }
931 929
932 930 static inline int
933 931 vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf,
934 932 int i)
935 933 {
936 934 int ret = DDI_SUCCESS;
937 935
938 936 if (!buf->tb_external_mapping[i].vbm_dmah) {
939 937 ret = ddi_dma_alloc_handle(sc->sc_dev,
940 938 &vioif_mapped_buf_dma_attr, DDI_DMA_SLEEP, NULL,
941 939 &buf->tb_external_mapping[i].vbm_dmah);
942 940 if (ret != DDI_SUCCESS) {
943 941 dev_err(sc->sc_dev, CE_WARN,
944 942 "Can't allocate dma handle for external tx buffer");
945 943 }
946 944 }
947 945
948 946 return (ret);
949 947 }
950 948
951 949 static inline int
952 950 vioif_tx_external(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
953 951 size_t msg_size)
954 952 {
955 953 _NOTE(ARGUNUSED(msg_size));
956 954
957 955 struct vioif_tx_buf *buf;
958 956 mblk_t *nmp;
959 957 int i, j;
960 958 int ret = DDI_SUCCESS;
961 959
962 960 buf = &sc->sc_txbufs[ve->qe_index];
963 961
964 962 ASSERT(buf);
965 963
966 964 buf->tb_external_num = 0;
967 965 i = 0;
968 966 nmp = mp;
969 967
970 968 while (nmp) {
971 969 size_t len;
972 970 ddi_dma_cookie_t dmac;
973 971 unsigned int ncookies;
974 972
975 973 len = MBLKL(nmp);
976 974 /*
977 975 * For some reason, the network stack can
978 976 * actually send us zero-length fragments.
979 977 */
980 978 if (len == 0) {
981 979 nmp = nmp->b_cont;
982 980 continue;
983 981 }
984 982
985 983 ret = vioif_tx_lazy_handle_alloc(sc, buf, i);
986 984 if (ret != DDI_SUCCESS) {
987 985 sc->sc_notxbuf++;
988 986 sc->sc_oerrors++;
989 987 goto exit_lazy_alloc;
990 988 }
991 989 ret = ddi_dma_addr_bind_handle(
992 990 buf->tb_external_mapping[i].vbm_dmah, NULL,
993 991 (caddr_t)nmp->b_rptr, len,
994 992 DDI_DMA_WRITE | DDI_DMA_STREAMING,
995 993 DDI_DMA_SLEEP, NULL, &dmac, &ncookies);
996 994
997 995 if (ret != DDI_SUCCESS) {
998 996 sc->sc_oerrors++;
999 997 dev_err(sc->sc_dev, CE_NOTE,
1000 998 "TX: Failed to bind external handle");
1001 999 goto exit_bind;
1002 1000 }
1003 1001
1004 1002 /* Check if we still fit into the indirect table. */
1005 1003 if (virtio_ve_indirect_available(ve) < ncookies) {
1006 1004 dev_err(sc->sc_dev, CE_NOTE,
1007 1005 "TX: Indirect descriptor table limit reached."
1008 1006 " It took %d fragments.", i);
1009 1007 sc->sc_notxbuf++;
1010 1008 sc->sc_oerrors++;
1011 1009
1012 1010 ret = DDI_FAILURE;
1013 1011 goto exit_limit;
1014 1012 }
1015 1013
1016 1014 virtio_ve_add_cookie(ve, buf->tb_external_mapping[i].vbm_dmah,
1017 1015 dmac, ncookies, B_TRUE);
1018 1016
1019 1017 nmp = nmp->b_cont;
1020 1018 i++;
1021 1019 }
1022 1020
1023 1021 buf->tb_external_num = i;
1024 1022 /* Save the mp to free it when the packet is sent. */
1025 1023 buf->tb_mp = mp;
1026 1024
1027 1025 return (DDI_SUCCESS);
1028 1026
1029 1027 exit_limit:
1030 1028 exit_bind:
1031 1029 exit_lazy_alloc:
1032 1030
1033 1031 for (j = 0; j < i; j++) {
1034 1032 (void) ddi_dma_unbind_handle(
1035 1033 buf->tb_external_mapping[j].vbm_dmah);
1036 1034 }
1037 1035
1038 1036 return (ret);
1039 1037 }
1040 1038
1041 1039 static boolean_t
1042 1040 vioif_send(struct vioif_softc *sc, mblk_t *mp)
1043 1041 {
1044 1042 struct vq_entry *ve;
1045 1043 struct vioif_tx_buf *buf;
1046 1044 struct virtio_net_hdr *net_header = NULL;
1047 1045 size_t msg_size = 0;
1048 1046 uint32_t csum_start;
1049 1047 uint32_t csum_stuff;
1050 1048 uint32_t csum_flags;
1051 1049 uint32_t lso_flags;
1052 1050 uint32_t lso_mss;
1053 1051 mblk_t *nmp;
1054 1052 int ret;
1055 1053 boolean_t lso_required = B_FALSE;
1056 1054
1057 1055 for (nmp = mp; nmp; nmp = nmp->b_cont)
1058 1056 msg_size += MBLKL(nmp);
1059 1057
1060 1058 if (sc->sc_tx_tso4) {
1061 1059 mac_lso_get(mp, &lso_mss, &lso_flags);
1062 1060 lso_required = (lso_flags & HW_LSO);
1063 1061 }
1064 1062
1065 1063 ve = vq_alloc_entry(sc->sc_tx_vq);
1066 1064
1067 1065 if (!ve) {
1068 1066 sc->sc_notxbuf++;
1069 1067 /* Out of free descriptors - try later. */
1070 1068 return (B_FALSE);
1071 1069 }
1072 1070 buf = &sc->sc_txbufs[ve->qe_index];
1073 1071
1074 1072 /* Use the inline buffer of the first entry for the virtio_net_hdr. */
1075 1073 (void) memset(buf->tb_inline_mapping.vbm_buf, 0,
1076 1074 sizeof (struct virtio_net_hdr));
1077 1075
1078 1076 /* LINTED E_BAD_PTR_CAST_ALIGN */
1079 1077 net_header = (struct virtio_net_hdr *)
1080 1078 buf->tb_inline_mapping.vbm_buf;
1081 1079
1082 1080 mac_hcksum_get(mp, &csum_start, &csum_stuff, NULL,
1083 1081 NULL, &csum_flags);
1084 1082
1085 1083 /* They want us to do the TCP/UDP csum calculation. */
1086 1084 if (csum_flags & HCK_PARTIALCKSUM) {
1087 1085 struct ether_header *eth_header;
1088 1086 int eth_hsize;
1089 1087
1090 1088 /* Did we ask for it? */
1091 1089 ASSERT(sc->sc_tx_csum);
1092 1090
1093 1091 /* We only asked for partial csum packets. */
1094 1092 ASSERT(!(csum_flags & HCK_IPV4_HDRCKSUM));
1095 1093 ASSERT(!(csum_flags & HCK_FULLCKSUM));
1096 1094
1097 1095 eth_header = (void *) mp->b_rptr;
1098 1096 if (eth_header->ether_type == htons(ETHERTYPE_VLAN)) {
1099 1097 eth_hsize = sizeof (struct ether_vlan_header);
1100 1098 } else {
1101 1099 eth_hsize = sizeof (struct ether_header);
1102 1100 }
1103 1101 net_header->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1104 1102 net_header->csum_start = eth_hsize + csum_start;
1105 1103 net_header->csum_offset = csum_stuff - csum_start;
1106 1104 }
1107 1105
1108 1106 /* setup LSO fields if required */
1109 1107 if (lso_required) {
1110 1108 net_header->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1111 1109 net_header->gso_size = (uint16_t)lso_mss;
1112 1110 }
1113 1111
1114 1112 virtio_ve_add_indirect_buf(ve,
1115 1113 buf->tb_inline_mapping.vbm_dmac.dmac_laddress,
1116 1114 sizeof (struct virtio_net_hdr), B_TRUE);
1117 1115
1118 1116 /* meanwhile update the statistic */
1119 1117 if (mp->b_rptr[0] & 0x1) {
1120 1118 if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
1121 1119 sc->sc_multixmt++;
1122 1120 else
1123 1121 sc->sc_brdcstxmt++;
1124 1122 }
1125 1123
1126 1124 /*
1127 1125 * We copy small packets into the inline buffer. The bigger ones
1128 1126 * get mapped using the mapped buffer.
1129 1127 */
1130 1128 if (msg_size < sc->sc_txcopy_thresh) {
1131 1129 vioif_tx_inline(sc, ve, mp, msg_size);
1132 1130 } else {
1133 1131 /* statistic gets updated by vioif_tx_external when fail */
1134 1132 ret = vioif_tx_external(sc, ve, mp, msg_size);
1135 1133 if (ret != DDI_SUCCESS)
1136 1134 goto exit_tx_external;
1137 1135 }
1138 1136
1139 1137 virtio_push_chain(ve, B_TRUE);
1140 1138
1141 1139 sc->sc_opackets++;
1142 1140 sc->sc_obytes += msg_size;
1143 1141
1144 1142 return (B_TRUE);
1145 1143
1146 1144 exit_tx_external:
1147 1145
1148 1146 vq_free_entry(sc->sc_tx_vq, ve);
1149 1147 /*
1150 1148 * vioif_tx_external can fail when the buffer does not fit into the
1151 1149 * indirect descriptor table. Free the mp. I don't expect this ever
1152 1150 * to happen.
1153 1151 */
1154 1152 freemsg(mp);
1155 1153
1156 1154 return (B_TRUE);
1157 1155 }
1158 1156
1159 1157 mblk_t *
1160 1158 vioif_tx(void *arg, mblk_t *mp)
1161 1159 {
1162 1160 struct vioif_softc *sc = arg;
1163 1161 mblk_t *nmp;
1164 1162
1165 1163 while (mp != NULL) {
1166 1164 nmp = mp->b_next;
1167 1165 mp->b_next = NULL;
1168 1166
1169 1167 if (!vioif_send(sc, mp)) {
1170 1168 sc->sc_tx_stopped = 1;
1171 1169 mp->b_next = nmp;
1172 1170 break;
1173 1171 }
1174 1172 mp = nmp;
1175 1173 }
1176 1174
1177 1175 return (mp);
1178 1176 }
1179 1177
1180 1178 int
1181 1179 vioif_start(void *arg)
1182 1180 {
1183 1181 struct vioif_softc *sc = arg;
1184 1182
1185 1183 mac_link_update(sc->sc_mac_handle,
1186 1184 vioif_link_state(sc));
1187 1185
1188 1186 virtio_start_vq_intr(sc->sc_rx_vq);
1189 1187
1190 1188 return (DDI_SUCCESS);
1191 1189 }
1192 1190
1193 1191 void
1194 1192 vioif_stop(void *arg)
1195 1193 {
1196 1194 struct vioif_softc *sc = arg;
1197 1195
1198 1196 virtio_stop_vq_intr(sc->sc_rx_vq);
1199 1197 }
1200 1198
1201 1199 /* ARGSUSED */
1202 1200 static int
1203 1201 vioif_stat(void *arg, uint_t stat, uint64_t *val)
1204 1202 {
1205 1203 struct vioif_softc *sc = arg;
1206 1204
1207 1205 switch (stat) {
1208 1206 case MAC_STAT_IERRORS:
1209 1207 *val = sc->sc_ierrors;
1210 1208 break;
1211 1209 case MAC_STAT_OERRORS:
1212 1210 *val = sc->sc_oerrors;
1213 1211 break;
1214 1212 case MAC_STAT_MULTIRCV:
1215 1213 *val = sc->sc_multircv;
1216 1214 break;
1217 1215 case MAC_STAT_BRDCSTRCV:
1218 1216 *val = sc->sc_brdcstrcv;
1219 1217 break;
1220 1218 case MAC_STAT_MULTIXMT:
1221 1219 *val = sc->sc_multixmt;
1222 1220 break;
1223 1221 case MAC_STAT_BRDCSTXMT:
1224 1222 *val = sc->sc_brdcstxmt;
1225 1223 break;
1226 1224 case MAC_STAT_IPACKETS:
1227 1225 *val = sc->sc_ipackets;
1228 1226 break;
1229 1227 case MAC_STAT_RBYTES:
1230 1228 *val = sc->sc_rbytes;
1231 1229 break;
1232 1230 case MAC_STAT_OPACKETS:
1233 1231 *val = sc->sc_opackets;
1234 1232 break;
1235 1233 case MAC_STAT_OBYTES:
1236 1234 *val = sc->sc_obytes;
1237 1235 break;
1238 1236 case MAC_STAT_NORCVBUF:
1239 1237 *val = sc->sc_norecvbuf;
1240 1238 break;
1241 1239 case MAC_STAT_NOXMTBUF:
1242 1240 *val = sc->sc_notxbuf;
1243 1241 break;
1244 1242 case MAC_STAT_IFSPEED:
1245 1243 /* always 1 Gbit */
1246 1244 *val = 1000000000ULL;
1247 1245 break;
1248 1246 case ETHER_STAT_LINK_DUPLEX:
1249 1247 /* virtual device, always full-duplex */
1250 1248 *val = LINK_DUPLEX_FULL;
1251 1249 break;
1252 1250
1253 1251 default:
1254 1252 return (ENOTSUP);
1255 1253 }
1256 1254
1257 1255 return (DDI_SUCCESS);
1258 1256 }
1259 1257
1260 1258 static int
1261 1259 vioif_set_prop_private(struct vioif_softc *sc, const char *pr_name,
1262 1260 uint_t pr_valsize, const void *pr_val)
1263 1261 {
1264 1262 _NOTE(ARGUNUSED(pr_valsize));
1265 1263
1266 1264 long result;
1267 1265
1268 1266 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1269 1267
1270 1268 if (pr_val == NULL)
1271 1269 return (EINVAL);
1272 1270
1273 1271 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1274 1272
1275 1273 if (result < 0 || result > VIOIF_TX_THRESH_MAX)
1276 1274 return (EINVAL);
1277 1275 sc->sc_txcopy_thresh = result;
1278 1276 }
1279 1277 if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1280 1278
1281 1279 if (pr_val == NULL)
1282 1280 return (EINVAL);
1283 1281
1284 1282 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1285 1283
1286 1284 if (result < 0 || result > VIOIF_RX_THRESH_MAX)
1287 1285 return (EINVAL);
1288 1286 sc->sc_rxcopy_thresh = result;
1289 1287 }
1290 1288 return (0);
1291 1289 }
1292 1290
1293 1291 static int
1294 1292 vioif_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1295 1293 uint_t pr_valsize, const void *pr_val)
1296 1294 {
1297 1295 struct vioif_softc *sc = arg;
1298 1296 const uint32_t *new_mtu;
1299 1297 int err;
1300 1298
1301 1299 switch (pr_num) {
1302 1300 case MAC_PROP_MTU:
1303 1301 new_mtu = pr_val;
1304 1302
1305 1303 if (*new_mtu > MAX_MTU) {
1306 1304 return (EINVAL);
1307 1305 }
1308 1306
|
↓ open down ↓ |
443 lines elided |
↑ open up ↑ |
1309 1307 err = mac_maxsdu_update(sc->sc_mac_handle, *new_mtu);
1310 1308 if (err) {
1311 1309 return (err);
1312 1310 }
1313 1311 break;
1314 1312 case MAC_PROP_PRIVATE:
1315 1313 err = vioif_set_prop_private(sc, pr_name,
1316 1314 pr_valsize, pr_val);
1317 1315 if (err)
1318 1316 return (err);
1319 - break;
1320 1317 default:
1321 1318 return (ENOTSUP);
1322 1319 }
1323 1320
1324 1321 return (0);
1325 1322 }
1326 1323
1327 1324 static int
1328 1325 vioif_get_prop_private(struct vioif_softc *sc, const char *pr_name,
1329 1326 uint_t pr_valsize, void *pr_val)
1330 1327 {
1331 1328 int err = ENOTSUP;
1332 1329 int value;
1333 1330
1334 1331 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1335 1332
1336 1333 value = sc->sc_txcopy_thresh;
1337 1334 err = 0;
1338 1335 goto done;
1339 1336 }
1340 1337 if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1341 1338
1342 1339 value = sc->sc_rxcopy_thresh;
1343 1340 err = 0;
1344 1341 goto done;
1345 1342 }
1346 1343 done:
1347 1344 if (err == 0) {
1348 1345 (void) snprintf(pr_val, pr_valsize, "%d", value);
1349 1346 }
1350 1347 return (err);
1351 1348 }
1352 1349
1353 1350 static int
1354 1351 vioif_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1355 1352 uint_t pr_valsize, void *pr_val)
1356 1353 {
1357 1354 struct vioif_softc *sc = arg;
1358 1355 int err = ENOTSUP;
1359 1356
1360 1357 switch (pr_num) {
1361 1358 case MAC_PROP_PRIVATE:
1362 1359 err = vioif_get_prop_private(sc, pr_name,
1363 1360 pr_valsize, pr_val);
1364 1361 break;
1365 1362 default:
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
1366 1363 break;
1367 1364 }
1368 1365 return (err);
1369 1366 }
1370 1367
1371 1368 static void
1372 1369 vioif_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1373 1370 mac_prop_info_handle_t prh)
1374 1371 {
1375 1372 struct vioif_softc *sc = arg;
1376 - char valstr[64];
1377 - int value;
1378 1373
1379 1374 switch (pr_num) {
1380 1375 case MAC_PROP_MTU:
1381 1376 mac_prop_info_set_range_uint32(prh, ETHERMIN, MAX_MTU);
1382 1377 break;
1383 1378
1384 - case MAC_PROP_PRIVATE:
1379 + case MAC_PROP_PRIVATE: {
1380 + char valstr[64];
1381 + int value;
1382 +
1385 1383 bzero(valstr, sizeof (valstr));
1386 1384 if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1387 1385
1388 1386 value = sc->sc_txcopy_thresh;
1389 1387 } else if (strcmp(pr_name,
1390 1388 vioif_rxcopy_thresh) == 0) {
1391 1389 value = sc->sc_rxcopy_thresh;
1392 1390 } else {
1393 1391 return;
1394 1392 }
1395 1393 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1396 - break;
1397 -
1394 + }
1398 1395 default:
1399 1396 break;
1400 1397 }
1401 1398 }
1402 1399
1403 1400 static boolean_t
1404 1401 vioif_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1405 1402 {
1406 1403 struct vioif_softc *sc = arg;
1407 1404
1408 1405 switch (cap) {
1409 1406 case MAC_CAPAB_HCKSUM:
1410 1407 if (sc->sc_tx_csum) {
1411 1408 uint32_t *txflags = cap_data;
1412 1409
1413 1410 *txflags = HCKSUM_INET_PARTIAL;
1414 1411 return (B_TRUE);
1415 1412 }
1416 1413 return (B_FALSE);
1417 1414 case MAC_CAPAB_LSO:
1418 1415 if (sc->sc_tx_tso4) {
1419 1416 mac_capab_lso_t *cap_lso = cap_data;
1420 1417
1421 1418 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1422 1419 cap_lso->lso_basic_tcp_ipv4.lso_max = MAX_MTU;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
1423 1420 return (B_TRUE);
1424 1421 }
1425 1422 return (B_FALSE);
1426 1423 default:
1427 1424 break;
1428 1425 }
1429 1426 return (B_FALSE);
1430 1427 }
1431 1428
1432 1429 static mac_callbacks_t vioif_m_callbacks = {
1433 - .mc_callbacks = (MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO),
1430 + .mc_callbacks = (MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO),
1434 1431 .mc_getstat = vioif_stat,
1435 1432 .mc_start = vioif_start,
1436 1433 .mc_stop = vioif_stop,
1437 1434 .mc_setpromisc = vioif_promisc,
1438 1435 .mc_multicst = vioif_multicst,
1439 1436 .mc_unicst = vioif_unicst,
1440 1437 .mc_tx = vioif_tx,
1441 1438 /* Optional callbacks */
1442 1439 .mc_reserved = NULL, /* reserved */
1443 1440 .mc_ioctl = NULL, /* mc_ioctl */
1444 1441 .mc_getcapab = vioif_getcapab, /* mc_getcapab */
1445 1442 .mc_open = NULL, /* mc_open */
1446 1443 .mc_close = NULL, /* mc_close */
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1447 1444 .mc_setprop = vioif_setprop,
1448 1445 .mc_getprop = vioif_getprop,
1449 1446 .mc_propinfo = vioif_propinfo,
1450 1447 };
1451 1448
1452 1449 static void
1453 1450 vioif_show_features(struct vioif_softc *sc, const char *prefix,
1454 1451 uint32_t features)
1455 1452 {
1456 1453 char buf[512];
1457 - char *bufp = buf;
1458 - char *bufend = buf + sizeof (buf);
1459 1454
1460 - /* LINTED E_PTRDIFF_OVERFLOW */
1461 - bufp += snprintf(bufp, bufend - bufp, prefix);
1462 -
1463 - /* LINTED E_PTRDIFF_OVERFLOW */
1464 - bufp += virtio_show_features(features, bufp, bufend - bufp);
1465 -
1466 - /* LINTED E_PTRDIFF_OVERFLOW */
1467 - bufp += snprintf(bufp, bufend - bufp, "Vioif ( ");
1468 -
1469 - if (features & VIRTIO_NET_F_CSUM)
1470 - /* LINTED E_PTRDIFF_OVERFLOW */
1471 - bufp += snprintf(bufp, bufend - bufp, "CSUM ");
1472 - if (features & VIRTIO_NET_F_GUEST_CSUM)
1473 - /* LINTED E_PTRDIFF_OVERFLOW */
1474 - bufp += snprintf(bufp, bufend - bufp, "GUEST_CSUM ");
1475 - if (features & VIRTIO_NET_F_MAC)
1476 - /* LINTED E_PTRDIFF_OVERFLOW */
1477 - bufp += snprintf(bufp, bufend - bufp, "MAC ");
1478 - if (features & VIRTIO_NET_F_GSO)
1479 - /* LINTED E_PTRDIFF_OVERFLOW */
1480 - bufp += snprintf(bufp, bufend - bufp, "GSO ");
1481 - if (features & VIRTIO_NET_F_GUEST_TSO4)
1482 - /* LINTED E_PTRDIFF_OVERFLOW */
1483 - bufp += snprintf(bufp, bufend - bufp, "GUEST_TSO4 ");
1484 - if (features & VIRTIO_NET_F_GUEST_TSO6)
1485 - /* LINTED E_PTRDIFF_OVERFLOW */
1486 - bufp += snprintf(bufp, bufend - bufp, "GUEST_TSO6 ");
1487 - if (features & VIRTIO_NET_F_GUEST_ECN)
1488 - /* LINTED E_PTRDIFF_OVERFLOW */
1489 - bufp += snprintf(bufp, bufend - bufp, "GUEST_ECN ");
1490 - if (features & VIRTIO_NET_F_GUEST_UFO)
1491 - /* LINTED E_PTRDIFF_OVERFLOW */
1492 - bufp += snprintf(bufp, bufend - bufp, "GUEST_UFO ");
1493 - if (features & VIRTIO_NET_F_HOST_TSO4)
1494 - /* LINTED E_PTRDIFF_OVERFLOW */
1495 - bufp += snprintf(bufp, bufend - bufp, "HOST_TSO4 ");
1496 - if (features & VIRTIO_NET_F_HOST_TSO6)
1497 - /* LINTED E_PTRDIFF_OVERFLOW */
1498 - bufp += snprintf(bufp, bufend - bufp, "HOST_TSO6 ");
1499 - if (features & VIRTIO_NET_F_HOST_ECN)
1500 - /* LINTED E_PTRDIFF_OVERFLOW */
1501 - bufp += snprintf(bufp, bufend - bufp, "HOST_ECN ");
1502 - if (features & VIRTIO_NET_F_HOST_UFO)
1503 - /* LINTED E_PTRDIFF_OVERFLOW */
1504 - bufp += snprintf(bufp, bufend - bufp, "HOST_UFO ");
1505 - if (features & VIRTIO_NET_F_MRG_RXBUF)
1506 - /* LINTED E_PTRDIFF_OVERFLOW */
1507 - bufp += snprintf(bufp, bufend - bufp, "MRG_RXBUF ");
1508 - if (features & VIRTIO_NET_F_STATUS)
1509 - /* LINTED E_PTRDIFF_OVERFLOW */
1510 - bufp += snprintf(bufp, bufend - bufp, "STATUS ");
1511 - if (features & VIRTIO_NET_F_CTRL_VQ)
1512 - /* LINTED E_PTRDIFF_OVERFLOW */
1513 - bufp += snprintf(bufp, bufend - bufp, "CTRL_VQ ");
1514 - if (features & VIRTIO_NET_F_CTRL_RX)
1515 - /* LINTED E_PTRDIFF_OVERFLOW */
1516 - bufp += snprintf(bufp, bufend - bufp, "CTRL_RX ");
1517 - if (features & VIRTIO_NET_F_CTRL_VLAN)
1518 - /* LINTED E_PTRDIFF_OVERFLOW */
1519 - bufp += snprintf(bufp, bufend - bufp, "CTRL_VLAN ");
1520 - if (features & VIRTIO_NET_F_CTRL_RX_EXTRA)
1521 - /* LINTED E_PTRDIFF_OVERFLOW */
1522 - bufp += snprintf(bufp, bufend - bufp, "CTRL_RX_EXTRA ");
1523 -
1524 - /* LINTED E_PTRDIFF_OVERFLOW */
1525 - bufp += snprintf(bufp, bufend - bufp, ")");
1526 - *bufp = '\0';
1527 -
1528 - dev_err(sc->sc_dev, CE_NOTE, "%s", buf);
1455 + dev_err(sc->sc_dev, CE_NOTE, "%s %s Vioif (%b)", prefix, virtio_show_features(...), features, "\020\1CSUM\2GUEST_CSUM\3MAC\4GSO\5GUEST_TSO4\5GUEST_TSO6\6GUEST_ECN\7GUEST_UFO\8HOST_TSO4\9HOST_TSO6\10HOST_ECN\11HOST_UFO\12MRG_RXBUF\13STATUS\14CTRL_VQ\15CTRL_RX\16CTRL_VLAN\17CTRL_RX_EXTRA", buf);
1529 1456 }
1530 1457
1531 1458 /*
1532 1459 * Find out which features are supported by the device and
1533 1460 * choose which ones we wish to use.
1534 1461 */
1535 1462 static int
1536 1463 vioif_dev_features(struct vioif_softc *sc)
1537 1464 {
1538 1465 uint32_t host_features;
1539 1466
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1540 1467 host_features = virtio_negotiate_features(&sc->sc_virtio,
1541 1468 VIRTIO_NET_F_CSUM |
1542 1469 VIRTIO_NET_F_HOST_TSO4 |
1543 1470 VIRTIO_NET_F_HOST_ECN |
1544 1471 VIRTIO_NET_F_MAC |
1545 1472 VIRTIO_NET_F_STATUS |
1546 1473 VIRTIO_F_RING_INDIRECT_DESC |
1547 1474 VIRTIO_F_NOTIFY_ON_EMPTY);
1548 1475
1549 1476 vioif_show_features(sc, "Host features: ", host_features);
1550 - vioif_show_features(sc, "Negotiated features: ",
1477 + vioif_show_features(sc, ",
1551 1478 sc->sc_virtio.sc_features);
1552 1479
1553 1480 if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
1554 1481 dev_err(sc->sc_dev, CE_NOTE,
1555 1482 "Host does not support RING_INDIRECT_DESC, bye.");
1556 1483 return (DDI_FAILURE);
1557 1484 }
1558 1485
1559 1486 return (DDI_SUCCESS);
1560 1487 }
1561 1488
1562 1489 static int
1563 1490 vioif_has_feature(struct vioif_softc *sc, uint32_t feature)
1564 1491 {
1565 1492 return (virtio_has_feature(&sc->sc_virtio, feature));
1566 1493 }
1567 1494
1568 1495 static void
1569 1496 vioif_set_mac(struct vioif_softc *sc)
1570 1497 {
1571 1498 int i;
1572 1499
1573 1500 for (i = 0; i < ETHERADDRL; i++) {
1574 1501 virtio_write_device_config_1(&sc->sc_virtio,
1575 1502 VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
1576 1503 }
1577 1504 }
1578 1505
1579 1506 /* Get the mac address out of the hardware, or make up one. */
1580 1507 static void
1581 1508 vioif_get_mac(struct vioif_softc *sc)
1582 1509 {
1583 1510 int i;
1584 1511 if (sc->sc_virtio.sc_features & VIRTIO_NET_F_MAC) {
1585 1512 for (i = 0; i < ETHERADDRL; i++) {
1586 1513 sc->sc_mac[i] = virtio_read_device_config_1(
1587 1514 &sc->sc_virtio,
1588 1515 VIRTIO_NET_CONFIG_MAC + i);
1589 1516 }
1590 1517 dev_err(sc->sc_dev, CE_NOTE, "Got MAC address from host: %s",
1591 1518 ether_sprintf((struct ether_addr *)sc->sc_mac));
1592 1519 } else {
1593 1520 /* Get a few random bytes */
1594 1521 (void) random_get_pseudo_bytes(sc->sc_mac, ETHERADDRL);
1595 1522 /* Make sure it's a unicast MAC */
1596 1523 sc->sc_mac[0] &= ~1;
1597 1524 /* Set the "locally administered" bit */
1598 1525 sc->sc_mac[1] |= 2;
1599 1526
1600 1527 vioif_set_mac(sc);
1601 1528
1602 1529 dev_err(sc->sc_dev, CE_NOTE,
1603 1530 "Generated a random MAC address: %s",
1604 1531 ether_sprintf((struct ether_addr *)sc->sc_mac));
1605 1532 }
1606 1533 }
1607 1534
1608 1535 /*
1609 1536 * Virtqueue interrupt handlers
1610 1537 */
1611 1538 /* ARGSUSED */
1612 1539 uint_t
1613 1540 vioif_rx_handler(caddr_t arg1, caddr_t arg2)
1614 1541 {
1615 1542 struct virtio_softc *vsc = (void *) arg1;
1616 1543 struct vioif_softc *sc = container_of(vsc,
1617 1544 struct vioif_softc, sc_virtio);
1618 1545
1619 1546 (void) vioif_process_rx(sc);
1620 1547
1621 1548 (void) vioif_populate_rx(sc, KM_NOSLEEP);
1622 1549
1623 1550 return (DDI_INTR_CLAIMED);
1624 1551 }
1625 1552
1626 1553 /* ARGSUSED */
1627 1554 uint_t
1628 1555 vioif_tx_handler(caddr_t arg1, caddr_t arg2)
1629 1556 {
1630 1557 struct virtio_softc *vsc = (void *)arg1;
1631 1558 struct vioif_softc *sc = container_of(vsc,
1632 1559 struct vioif_softc, sc_virtio);
1633 1560
1634 1561 vioif_reclaim_used_tx(sc);
1635 1562 return (DDI_INTR_CLAIMED);
1636 1563 }
1637 1564
1638 1565 static int
1639 1566 vioif_register_ints(struct vioif_softc *sc)
1640 1567 {
1641 1568 int ret;
1642 1569
1643 1570 struct virtio_int_handler vioif_vq_h[] = {
1644 1571 { vioif_rx_handler },
1645 1572 { vioif_tx_handler },
1646 1573 { NULL }
1647 1574 };
1648 1575
1649 1576 ret = virtio_register_ints(&sc->sc_virtio, NULL, vioif_vq_h);
1650 1577
1651 1578 return (ret);
1652 1579 }
1653 1580
1654 1581
1655 1582 static void
1656 1583 vioif_check_features(struct vioif_softc *sc)
1657 1584 {
1658 1585 if (vioif_has_feature(sc, VIRTIO_NET_F_CSUM)) {
1659 1586 /* The GSO/GRO featured depend on CSUM, check them here. */
1660 1587 sc->sc_tx_csum = 1;
1661 1588 sc->sc_rx_csum = 1;
1662 1589
1663 1590 if (!vioif_has_feature(sc, VIRTIO_NET_F_GUEST_CSUM)) {
1664 1591 sc->sc_rx_csum = 0;
1665 1592 }
1666 1593 cmn_err(CE_NOTE, "Csum enabled.");
1667 1594
1668 1595 if (vioif_has_feature(sc, VIRTIO_NET_F_HOST_TSO4)) {
1669 1596
1670 1597 sc->sc_tx_tso4 = 1;
1671 1598 /*
1672 1599 * We don't seem to have a way to ask the system
1673 1600 * not to send us LSO packets with Explicit
1674 1601 * Congestion Notification bit set, so we require
1675 1602 * the device to support it in order to do
1676 1603 * LSO.
1677 1604 */
1678 1605 if (!vioif_has_feature(sc, VIRTIO_NET_F_HOST_ECN)) {
1679 1606 dev_err(sc->sc_dev, CE_NOTE,
1680 1607 "TSO4 supported, but not ECN. "
1681 1608 "Not using LSO.");
1682 1609 sc->sc_tx_tso4 = 0;
1683 1610 } else {
1684 1611 cmn_err(CE_NOTE, "LSO enabled");
1685 1612 }
1686 1613 }
1687 1614 }
1688 1615 }
1689 1616
1690 1617 static int
1691 1618 vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1692 1619 {
1693 1620 int ret, instance;
1694 1621 struct vioif_softc *sc;
1695 1622 struct virtio_softc *vsc;
1696 1623 mac_register_t *macp;
|
↓ open down ↓ |
136 lines elided |
↑ open up ↑ |
1697 1624 char cache_name[CACHE_NAME_SIZE];
1698 1625
1699 1626 instance = ddi_get_instance(devinfo);
1700 1627
1701 1628 switch (cmd) {
1702 1629 case DDI_ATTACH:
1703 1630 break;
1704 1631
1705 1632 case DDI_RESUME:
1706 1633 case DDI_PM_RESUME:
1707 - /* not supported yet */
1634 + dev_err(devinfo, CE_WARN, "resume not supported yet");
1708 1635 goto exit;
1709 1636
1710 1637 default:
1711 - /* unrecognized command */
1638 + dev_err(devinfo, CE_WARN, "cmd 0x%x unrecognized", cmd);
1712 1639 goto exit;
1713 1640 }
1714 1641
1715 1642 sc = kmem_zalloc(sizeof (struct vioif_softc), KM_SLEEP);
1716 1643 ddi_set_driver_private(devinfo, sc);
1717 1644
1718 1645 vsc = &sc->sc_virtio;
1719 1646
1720 1647 /* Duplicate for less typing */
1721 1648 sc->sc_dev = devinfo;
1722 1649 vsc->sc_dev = devinfo;
1723 1650
1724 1651 /*
1725 1652 * Initialize interrupt kstat.
1726 1653 */
1727 1654 sc->sc_intrstat = kstat_create("vioif", instance, "intr", "controller",
1728 1655 KSTAT_TYPE_INTR, 1, 0);
1729 1656 if (sc->sc_intrstat == NULL) {
1730 1657 dev_err(devinfo, CE_WARN, "kstat_create failed");
1731 1658 goto exit_intrstat;
1732 1659 }
1733 1660 kstat_install(sc->sc_intrstat);
1734 1661
1735 1662 /* map BAR 0 */
1736 1663 ret = ddi_regs_map_setup(devinfo, 1,
1737 1664 (caddr_t *)&sc->sc_virtio.sc_io_addr,
1738 1665 0, 0, &vioif_attr, &sc->sc_virtio.sc_ioh);
1739 1666 if (ret != DDI_SUCCESS) {
1740 1667 dev_err(devinfo, CE_WARN, "unable to map bar 0: %d", ret);
1741 1668 goto exit_map;
1742 1669 }
1743 1670
1744 1671 virtio_device_reset(&sc->sc_virtio);
1745 1672 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
1746 1673 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
1747 1674
1748 1675 ret = vioif_dev_features(sc);
1749 1676 if (ret)
1750 1677 goto exit_features;
1751 1678
1752 1679 vsc->sc_nvqs = vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1753 1680
1754 1681 (void) snprintf(cache_name, CACHE_NAME_SIZE, "vioif%d_rx", instance);
1755 1682 sc->sc_rxbuf_cache = kmem_cache_create(cache_name,
1756 1683 sizeof (struct vioif_rx_buf), 0, vioif_rx_construct,
1757 1684 vioif_rx_destruct, NULL, sc, NULL, KM_SLEEP);
1758 1685 if (sc->sc_rxbuf_cache == NULL) {
1759 1686 dev_err(sc->sc_dev, CE_WARN, "Can't allocate the buffer cache");
1760 1687 goto exit_cache;
1761 1688 }
1762 1689
1763 1690 ret = vioif_register_ints(sc);
1764 1691 if (ret) {
1765 1692 dev_err(sc->sc_dev, CE_WARN,
1766 1693 "Failed to allocate interrupt(s)!");
1767 1694 goto exit_ints;
1768 1695 }
1769 1696
1770 1697 /*
1771 1698 * Register layout determined, can now access the
1772 1699 * device-specific bits
1773 1700 */
1774 1701 vioif_get_mac(sc);
1775 1702
1776 1703 sc->sc_rx_vq = virtio_alloc_vq(&sc->sc_virtio, 0,
1777 1704 VIOIF_RX_QLEN, VIOIF_INDIRECT_MAX, "rx");
1778 1705 if (!sc->sc_rx_vq)
1779 1706 goto exit_alloc1;
1780 1707 virtio_stop_vq_intr(sc->sc_rx_vq);
1781 1708
1782 1709 sc->sc_tx_vq = virtio_alloc_vq(&sc->sc_virtio, 1,
1783 1710 VIOIF_TX_QLEN, VIOIF_INDIRECT_MAX, "tx");
1784 1711 if (!sc->sc_rx_vq)
1785 1712 goto exit_alloc2;
1786 1713 virtio_stop_vq_intr(sc->sc_tx_vq);
1787 1714
1788 1715 if (vioif_has_feature(sc, VIRTIO_NET_F_CTRL_VQ)) {
1789 1716 sc->sc_ctrl_vq = virtio_alloc_vq(&sc->sc_virtio, 2,
1790 1717 VIOIF_CTRL_QLEN, 0, "ctrl");
1791 1718 if (!sc->sc_ctrl_vq) {
1792 1719 goto exit_alloc3;
1793 1720 }
1794 1721 virtio_stop_vq_intr(sc->sc_ctrl_vq);
1795 1722 }
1796 1723
1797 1724 virtio_set_status(&sc->sc_virtio,
1798 1725 VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1799 1726
1800 1727 sc->sc_rxloan = 0;
1801 1728
1802 1729 /* set some reasonable-small default values */
1803 1730 sc->sc_rxcopy_thresh = 300;
1804 1731 sc->sc_txcopy_thresh = 300;
1805 1732 sc->sc_mtu = ETHERMTU;
1806 1733
1807 1734 vioif_check_features(sc);
1808 1735
1809 1736 if (vioif_alloc_mems(sc))
1810 1737 goto exit_alloc_mems;
1811 1738
1812 1739 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1813 1740 dev_err(devinfo, CE_WARN, "Failed to allocate a mac_register");
1814 1741 goto exit_macalloc;
1815 1742 }
1816 1743
1817 1744 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1818 1745 macp->m_driver = sc;
1819 1746 macp->m_dip = devinfo;
1820 1747 macp->m_src_addr = sc->sc_mac;
1821 1748 macp->m_callbacks = &vioif_m_callbacks;
1822 1749 macp->m_min_sdu = 0;
1823 1750 macp->m_max_sdu = sc->sc_mtu;
1824 1751 macp->m_margin = VLAN_TAGSZ;
1825 1752 macp->m_priv_props = vioif_priv_props;
1826 1753
1827 1754 sc->sc_macp = macp;
1828 1755
1829 1756 /* Pre-fill the rx ring. */
1830 1757 (void) vioif_populate_rx(sc, KM_SLEEP);
1831 1758
1832 1759 ret = mac_register(macp, &sc->sc_mac_handle);
1833 1760 if (ret != 0) {
1834 1761 dev_err(devinfo, CE_WARN, "vioif_attach: "
1835 1762 "mac_register() failed, ret=%d", ret);
1836 1763 goto exit_register;
1837 1764 }
1838 1765
1839 1766 ret = virtio_enable_ints(&sc->sc_virtio);
1840 1767 if (ret) {
1841 1768 dev_err(devinfo, CE_WARN, "Failed to enable interrupts");
1842 1769 goto exit_enable_ints;
1843 1770 }
1844 1771
1845 1772 mac_link_update(sc->sc_mac_handle, LINK_STATE_UP);
1846 1773 return (DDI_SUCCESS);
1847 1774
1848 1775 exit_enable_ints:
1849 1776 (void) mac_unregister(sc->sc_mac_handle);
1850 1777 exit_register:
1851 1778 mac_free(macp);
1852 1779 exit_macalloc:
1853 1780 vioif_free_mems(sc);
1854 1781 exit_alloc_mems:
1855 1782 virtio_release_ints(&sc->sc_virtio);
1856 1783 if (sc->sc_ctrl_vq)
1857 1784 virtio_free_vq(sc->sc_ctrl_vq);
1858 1785 exit_alloc3:
1859 1786 virtio_free_vq(sc->sc_tx_vq);
1860 1787 exit_alloc2:
1861 1788 virtio_free_vq(sc->sc_rx_vq);
1862 1789 exit_alloc1:
1863 1790 exit_ints:
1864 1791 kmem_cache_destroy(sc->sc_rxbuf_cache);
1865 1792 exit_cache:
1866 1793 exit_features:
1867 1794 virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1868 1795 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1869 1796 exit_intrstat:
1870 1797 exit_map:
1871 1798 kstat_delete(sc->sc_intrstat);
1872 1799 kmem_free(sc, sizeof (struct vioif_softc));
1873 1800 exit:
1874 1801 return (DDI_FAILURE);
1875 1802 }
1876 1803
1877 1804 static int
1878 1805 vioif_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1879 1806 {
|
↓ open down ↓ |
158 lines elided |
↑ open up ↑ |
1880 1807 struct vioif_softc *sc;
1881 1808
1882 1809 if ((sc = ddi_get_driver_private(devinfo)) == NULL)
1883 1810 return (DDI_FAILURE);
1884 1811
1885 1812 switch (cmd) {
1886 1813 case DDI_DETACH:
1887 1814 break;
1888 1815
1889 1816 case DDI_PM_SUSPEND:
1890 - /* not supported yet */
1817 + cmn_err(CE_WARN, "suspend not supported yet");
1891 1818 return (DDI_FAILURE);
1892 1819
1893 1820 default:
1894 - /* unrecognized command */
1821 + cmn_err(CE_WARN, "cmd 0x%x unrecognized", cmd);
1895 1822 return (DDI_FAILURE);
1896 1823 }
1897 1824
1898 1825 if (sc->sc_rxloan) {
1899 - cmn_err(CE_NOTE, "Some rx buffers are still upstream, "
1826 + cmn_err(CE_WARN, "Some rx buffers are still upstream, "
1900 1827 "Not detaching");
1901 1828 return (DDI_FAILURE);
1902 1829 }
1903 1830
1904 1831 virtio_stop_vq_intr(sc->sc_rx_vq);
1905 1832 virtio_stop_vq_intr(sc->sc_tx_vq);
1906 1833
1907 1834 virtio_release_ints(&sc->sc_virtio);
1908 1835
1909 1836 if (mac_unregister(sc->sc_mac_handle)) {
1910 1837 return (DDI_FAILURE);
1911 1838 }
1912 1839
1913 1840 mac_free(sc->sc_macp);
1914 1841
1915 1842 vioif_free_mems(sc);
1916 1843 virtio_free_vq(sc->sc_rx_vq);
1917 1844 virtio_free_vq(sc->sc_tx_vq);
1918 1845
1919 1846 virtio_device_reset(&sc->sc_virtio);
1920 1847
1921 1848 ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1922 1849
1923 1850 kmem_cache_destroy(sc->sc_rxbuf_cache);
1924 1851 kstat_delete(sc->sc_intrstat);
1925 1852 kmem_free(sc, sizeof (struct vioif_softc));
1926 1853
1927 1854 return (DDI_SUCCESS);
1928 1855 }
1929 1856
1930 1857 static int
1931 1858 vioif_quiesce(dev_info_t *devinfo)
1932 1859 {
1933 1860 struct vioif_softc *sc;
1934 1861
1935 1862 if ((sc = ddi_get_driver_private(devinfo)) == NULL)
1936 1863 return (DDI_FAILURE);
1937 1864
1938 1865 virtio_stop_vq_intr(sc->sc_rx_vq);
1939 1866 virtio_stop_vq_intr(sc->sc_tx_vq);
1940 1867 virtio_device_reset(&sc->sc_virtio);
1941 1868
1942 1869 return (DDI_SUCCESS);
1943 1870 }
1944 1871
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
1945 1872 int
1946 1873 _init(void)
1947 1874 {
1948 1875 int ret = 0;
1949 1876
1950 1877 mac_init_ops(&vioif_ops, "vioif");
1951 1878
1952 1879 ret = mod_install(&modlinkage);
1953 1880 if (ret != DDI_SUCCESS) {
1954 1881 mac_fini_ops(&vioif_ops);
1882 + cmn_err(CE_WARN, "Unable to install the driver");
1955 1883 return (ret);
1956 1884 }
1957 1885
1958 1886 return (0);
1959 1887 }
1960 1888
1961 1889 int
1962 1890 _fini(void)
1963 1891 {
1964 1892 int ret;
1965 1893
1966 1894 ret = mod_remove(&modlinkage);
1967 1895 if (ret == DDI_SUCCESS) {
1968 1896 mac_fini_ops(&vioif_ops);
1969 1897 }
1970 1898
1971 1899 return (ret);
1972 1900 }
1973 1901
1974 1902 int
1975 1903 _info(struct modinfo *pModinfo)
1976 1904 {
1977 1905 return (mod_info(&modlinkage, pModinfo));
1978 1906 }
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX