Print this page
NEX-16215 xvdi_init_dev() truncates the unit address to 7 characters
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
8019 Some PV devices should not be configured in HVM mode
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Basil Crow <basil.crow@delphix.com>
Approved by: Dan McDonald <danmcd@omniti.com>
re #13140 rb4270 hvm_sd module missing dependencies on scsi and cmlb
re #13166 rb4270 Check for Xen HVM even if CPUID signature returns Microsoft Hv
re #13187 rb4270 Fix Xen HVM related warnings
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/xen/os/xvdi.c
+++ new/usr/src/uts/common/xen/os/xvdi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2014 by Delphix. All rights reserved.
29 + * Copyright 2018 Nexenta Systems, Inc.
29 30 */
30 31
31 32 /*
32 33 * Xen virtual device driver interfaces
33 34 */
34 35
35 36 /*
36 37 * todo:
37 38 * + name space clean up:
38 39 * xvdi_* - public xen interfaces, for use by all leaf drivers
39 40 * xd_* - public xen data structures
40 41 * i_xvdi_* - implementation private functions
41 42 * xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
42 43 * + add mdb dcmds to dump ring status
43 44 * + implement xvdi_xxx to wrap xenbus_xxx read/write function
44 45 * + convert (xendev_ring_t *) into xvdi_ring_handle_t
45 46 */
46 47 #include <sys/conf.h>
47 48 #include <sys/param.h>
48 49 #include <sys/kmem.h>
49 50 #include <vm/seg_kmem.h>
50 51 #include <sys/debug.h>
51 52 #include <sys/modctl.h>
52 53 #include <sys/autoconf.h>
53 54 #include <sys/ddi_impldefs.h>
54 55 #include <sys/ddi_subrdefs.h>
55 56 #include <sys/ddi.h>
56 57 #include <sys/sunddi.h>
57 58 #include <sys/sunndi.h>
58 59 #include <sys/sunldi.h>
59 60 #include <sys/fs/dv_node.h>
60 61 #include <sys/avintr.h>
61 62 #include <sys/psm.h>
62 63 #include <sys/spl.h>
63 64 #include <sys/promif.h>
64 65 #include <sys/list.h>
65 66 #include <sys/bootconf.h>
66 67 #include <sys/bootsvcs.h>
67 68 #include <sys/bootinfo.h>
68 69 #include <sys/note.h>
69 70 #include <sys/sysmacros.h>
70 71 #ifdef XPV_HVM_DRIVER
71 72 #include <sys/xpv_support.h>
72 73 #include <sys/hypervisor.h>
73 74 #include <public/grant_table.h>
74 75 #include <public/xen.h>
75 76 #include <public/io/xenbus.h>
76 77 #include <public/io/xs_wire.h>
77 78 #include <public/event_channel.h>
78 79 #include <public/io/xenbus.h>
79 80 #else /* XPV_HVM_DRIVER */
80 81 #include <sys/hypervisor.h>
81 82 #include <sys/xen_mmu.h>
82 83 #include <xen/sys/xenbus_impl.h>
83 84 #include <sys/evtchn_impl.h>
84 85 #endif /* XPV_HVM_DRIVER */
85 86 #include <sys/gnttab.h>
86 87 #include <xen/sys/xendev.h>
87 88 #include <vm/hat_i86.h>
88 89 #include <sys/scsi/generic/inquiry.h>
89 90 #include <util/sscanf.h>
90 91 #include <xen/public/io/xs_wire.h>
91 92
92 93
93 94 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
94 95 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
95 96 ((ch) >= 'A' && (ch) <= 'F'))
96 97
97 98 static void xvdi_ring_init_sring(xendev_ring_t *);
98 99 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
99 100 #ifndef XPV_HVM_DRIVER
100 101 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
101 102 #endif
102 103 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
103 104
104 105 static int i_xvdi_add_watches(dev_info_t *);
105 106 static void i_xvdi_rem_watches(dev_info_t *);
106 107
107 108 static int i_xvdi_add_watch_oestate(dev_info_t *);
108 109 static void i_xvdi_rem_watch_oestate(dev_info_t *);
109 110 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
110 111 static void i_xvdi_oestate_handler(void *);
111 112
112 113 static int i_xvdi_add_watch_hpstate(dev_info_t *);
113 114 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
114 115 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
115 116 unsigned int);
116 117 static void i_xvdi_hpstate_handler(void *);
117 118
118 119 static int i_xvdi_add_watch_bepath(dev_info_t *);
119 120 static void i_xvdi_rem_watch_bepath(dev_info_t *);
120 121 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
121 122 unsigned in);
122 123
123 124 static void xendev_offline_device(void *);
124 125
125 126 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
126 127 unsigned int);
127 128 static void i_xvdi_probe_path_handler(void *);
128 129
129 130 typedef struct oestate_evt {
130 131 dev_info_t *dip;
131 132 XenbusState state;
132 133 } i_oestate_evt_t;
133 134
134 135 typedef struct xd_cfg {
135 136 xendev_devclass_t devclass;
136 137 char *xsdev;
137 138 char *xs_path_fe;
138 139 char *xs_path_be;
139 140 char *node_fe;
140 141 char *node_be;
141 142 char *device_type;
142 143 int xd_ipl;
143 144 int flags;
144 145 } i_xd_cfg_t;
145 146
146 147 #define XD_DOM_ZERO 0x01 /* dom0 only. */
147 148 #define XD_DOM_GUEST 0x02 /* Guest domains (i.e. non-dom0). */
148 149 #define XD_DOM_IO 0x04 /* IO domains. */
149 150
150 151 #define XD_DOM_ALL (XD_DOM_ZERO | XD_DOM_GUEST)
151 152
152 153 static i_xd_cfg_t xdci[] = {
153 154 #ifndef XPV_HVM_DRIVER
154 155 { XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
155 156 "console", IPL_CONS, XD_DOM_ALL, },
156 157 #endif
157 158
158 159 { XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
159 160 "network", IPL_VIF, XD_DOM_ALL, },
160 161
161 162 { XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
162 163 "block", IPL_VBD, XD_DOM_ALL, },
163 164
164 165 { XEN_BLKTAP, "tap", NULL, "backend/tap", NULL, "xpvtap",
165 166 "block", IPL_VBD, XD_DOM_ALL, },
166 167
167 168 #ifndef XPV_HVM_DRIVER
168 169 { XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
169 170 NULL, 0, XD_DOM_ALL, },
170 171
171 172 { XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
172 173 NULL, 0, XD_DOM_ALL, },
173 174
174 175 { XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
175 176 NULL, 0, XD_DOM_ALL, },
176 177 #endif
177 178
178 179 { XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
179 180 NULL, 0, XD_DOM_ZERO, },
180 181
181 182 { XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
182 183 NULL, 0, XD_DOM_ZERO, },
183 184 };
184 185 #define NXDC (sizeof (xdci) / sizeof (xdci[0]))
185 186
186 187 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
187 188 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
188 189 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
189 190
190 191 /*
191 192 * Xen device channel device access and DMA attributes
192 193 */
193 194 static ddi_device_acc_attr_t xendev_dc_accattr = {
194 195 DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
195 196 };
196 197
197 198 static ddi_dma_attr_t xendev_dc_dmaattr = {
198 199 DMA_ATTR_V0, /* version of this structure */
199 200 0, /* lowest usable address */
200 201 0xffffffffffffffffULL, /* highest usable address */
201 202 0x7fffffff, /* maximum DMAable byte count */
202 203 MMU_PAGESIZE, /* alignment in bytes */
203 204 0x7ff, /* bitmap of burst sizes */
204 205 1, /* minimum transfer */
205 206 0xffffffffU, /* maximum transfer */
206 207 0xffffffffffffffffULL, /* maximum segment length */
207 208 1, /* maximum number of segments */
208 209 1, /* granularity */
209 210 0, /* flags (reserved) */
210 211 };
211 212
212 213 static dev_info_t *xendev_dip = NULL;
213 214
214 215 #define XVDI_DBG_STATE 0x01
215 216 #define XVDI_DBG_PROBE 0x02
216 217
217 218 #ifdef DEBUG
218 219 int i_xvdi_debug = 0;
219 220
220 221 #define XVDI_DPRINTF(flag, format, ...) \
221 222 { \
222 223 if (i_xvdi_debug & (flag)) \
223 224 prom_printf((format), __VA_ARGS__); \
224 225 }
225 226 #else
226 227 #define XVDI_DPRINTF(flag, format, ...)
227 228 #endif /* DEBUG */
228 229
229 230 static i_xd_cfg_t *
230 231 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
231 232 {
232 233 i_xd_cfg_t *xdcp;
233 234 int i;
234 235
235 236 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
236 237 if (xdcp->devclass == devclass)
237 238 return (xdcp);
238 239
239 240 return (NULL);
240 241 }
241 242
242 243 int
243 244 xvdi_init_dev(dev_info_t *dip)
244 245 {
|
↓ open down ↓ |
206 lines elided |
↑ open up ↑ |
245 246 xendev_devclass_t devcls;
246 247 int vdevnum;
247 248 domid_t domid;
248 249 struct xendev_ppd *pdp;
249 250 i_xd_cfg_t *xdcp;
250 251 boolean_t backend;
251 252 char xsnamebuf[TYPICALMAXPATHLEN];
252 253 char *xsname;
253 254 void *prop_str;
254 255 unsigned int prop_len;
255 - char unitaddr[8];
256 + char unitaddr[16];
256 257
257 258 devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
258 259 DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
259 260 vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
260 261 DDI_PROP_DONTPASS, "vdev", VDEV_NOXS);
261 262 domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
262 263 DDI_PROP_DONTPASS, "domain", DOMID_SELF);
263 264
264 265 backend = (domid != DOMID_SELF);
265 266 xdcp = i_xvdi_devclass2cfg(devcls);
266 267 if (xdcp->device_type != NULL)
267 268 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
268 269 "device_type", xdcp->device_type);
269 270
270 271 pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
271 272 pdp->xd_domain = domid;
272 273 pdp->xd_vdevnum = vdevnum;
273 274 pdp->xd_devclass = devcls;
274 275 pdp->xd_evtchn = INVALID_EVTCHN;
275 276 list_create(&pdp->xd_xb_watches, sizeof (xd_xb_watches_t),
276 277 offsetof(xd_xb_watches_t, xxw_list));
277 278 mutex_init(&pdp->xd_evt_lk, NULL, MUTEX_DRIVER, NULL);
278 279 mutex_init(&pdp->xd_ndi_lk, NULL, MUTEX_DRIVER, NULL);
279 280 ddi_set_parent_data(dip, pdp);
280 281
281 282 /*
282 283 * devices that do not need to interact with xenstore
283 284 */
284 285 if (vdevnum == VDEV_NOXS) {
285 286 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
286 287 "unit-address", "0");
287 288 if (devcls == XEN_CONSOLE)
288 289 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
289 290 "pm-hardware-state", "needs-suspend-resume");
290 291 return (DDI_SUCCESS);
291 292 }
292 293
293 294 /*
294 295 * PV devices that need to probe xenstore
295 296 */
296 297
297 298 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
298 299 "pm-hardware-state", "needs-suspend-resume");
299 300
300 301 xsname = xsnamebuf;
301 302 if (!backend)
302 303 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
303 304 "%s/%d", xdcp->xs_path_fe, vdevnum);
304 305 else
305 306 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
306 307 "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
307 308 if ((xenbus_read_driver_state(xsname) >= XenbusStateClosing)) {
308 309 /* Don't try to init a dev that may be closing */
309 310 mutex_destroy(&pdp->xd_ndi_lk);
310 311 mutex_destroy(&pdp->xd_evt_lk);
311 312 kmem_free(pdp, sizeof (*pdp));
312 313 ddi_set_parent_data(dip, NULL);
313 314 return (DDI_FAILURE);
314 315 }
315 316
316 317 pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
317 318 pdp->xd_xsdev.devicetype = xdcp->xsdev;
318 319 pdp->xd_xsdev.frontend = (backend ? 0 : 1);
319 320 pdp->xd_xsdev.data = dip;
320 321 pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
321 322 if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
322 323 cmn_err(CE_WARN, "xvdi_init_dev: "
323 324 "cannot add watches for %s", xsname);
324 325 xvdi_uninit_dev(dip);
325 326 return (DDI_FAILURE);
|
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
326 327 }
327 328
328 329 if (backend)
329 330 return (DDI_SUCCESS);
330 331
331 332 /*
332 333 * The unit-address for frontend devices is the name of the
333 334 * of the xenstore node containing the device configuration
334 335 * and is contained in the 'vdev' property.
335 336 * VIF devices are named using an incrementing integer.
336 - * VBD devices are either named using the 16-bit dev_t value
337 + * VBD devices are either named using the 32-bit dev_t value
337 338 * for linux 'hd' and 'xvd' devices, or a simple integer value
338 339 * in the range 0..767. 768 is the base value of the linux
339 340 * dev_t namespace, the dev_t value for 'hda'.
340 341 */
341 342 (void) snprintf(unitaddr, sizeof (unitaddr), "%d", vdevnum);
342 343 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "unit-address",
343 344 unitaddr);
344 345
345 346 switch (devcls) {
346 347 case XEN_VNET:
347 348 if (xenbus_read(XBT_NULL, xsname, "mac", (void *)&prop_str,
348 349 &prop_len) != 0)
349 350 break;
350 351 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip, "mac",
351 352 prop_str);
352 353 kmem_free(prop_str, prop_len);
353 354 break;
354 355 case XEN_VBLK:
355 356 /*
356 357 * cache a copy of the otherend name
357 358 * for ease of observeability
358 359 */
359 360 if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend, "dev",
360 361 &prop_str, &prop_len) != 0)
361 362 break;
362 363 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
363 364 "dev-address", prop_str);
364 365 kmem_free(prop_str, prop_len);
365 366 break;
366 367 default:
367 368 break;
368 369 }
369 370
370 371 return (DDI_SUCCESS);
371 372 }
372 373
373 374 void
374 375 xvdi_uninit_dev(dev_info_t *dip)
375 376 {
376 377 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
377 378
378 379 if (pdp != NULL) {
379 380 /* Remove any registered callbacks. */
380 381 xvdi_remove_event_handler(dip, NULL);
381 382
382 383 /* Remove any registered watches. */
383 384 i_xvdi_rem_watches(dip);
384 385
385 386 /* tell other end to close */
386 387 if (pdp->xd_xsdev.otherend_id != (domid_t)-1)
387 388 (void) xvdi_switch_state(dip, XBT_NULL,
388 389 XenbusStateClosed);
389 390
390 391 if (pdp->xd_xsdev.nodename != NULL)
391 392 kmem_free((char *)(pdp->xd_xsdev.nodename),
392 393 strlen(pdp->xd_xsdev.nodename) + 1);
393 394
394 395 ddi_set_parent_data(dip, NULL);
395 396
396 397 mutex_destroy(&pdp->xd_ndi_lk);
397 398 mutex_destroy(&pdp->xd_evt_lk);
398 399 kmem_free(pdp, sizeof (*pdp));
399 400 }
400 401 }
401 402
402 403 /*
403 404 * Bind the event channel for this device instance.
404 405 * Currently we only support one evtchn per device instance.
405 406 */
406 407 int
407 408 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
408 409 {
409 410 struct xendev_ppd *pdp;
410 411 domid_t oeid;
411 412 int r;
412 413
413 414 pdp = ddi_get_parent_data(dip);
414 415 ASSERT(pdp != NULL);
415 416 ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
416 417
417 418 mutex_enter(&pdp->xd_evt_lk);
418 419 if (pdp->xd_devclass == XEN_CONSOLE) {
419 420 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
420 421 pdp->xd_evtchn = xen_info->console.domU.evtchn;
421 422 } else {
422 423 pdp->xd_evtchn = INVALID_EVTCHN;
423 424 mutex_exit(&pdp->xd_evt_lk);
424 425 return (DDI_SUCCESS);
425 426 }
426 427 } else {
427 428 oeid = pdp->xd_xsdev.otherend_id;
428 429 if (oeid == (domid_t)-1) {
429 430 mutex_exit(&pdp->xd_evt_lk);
430 431 return (DDI_FAILURE);
431 432 }
432 433
433 434 if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
434 435 xvdi_dev_error(dip, r, "bind event channel");
435 436 mutex_exit(&pdp->xd_evt_lk);
436 437 return (DDI_FAILURE);
437 438 }
438 439 }
439 440 #ifndef XPV_HVM_DRIVER
440 441 pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
441 442 #endif
442 443 mutex_exit(&pdp->xd_evt_lk);
443 444
444 445 return (DDI_SUCCESS);
445 446 }
446 447
447 448 /*
448 449 * Allocate an event channel for this device instance.
449 450 * Currently we only support one evtchn per device instance.
450 451 */
451 452 int
452 453 xvdi_alloc_evtchn(dev_info_t *dip)
453 454 {
454 455 struct xendev_ppd *pdp;
455 456 domid_t oeid;
456 457 int rv;
457 458
458 459 pdp = ddi_get_parent_data(dip);
459 460 ASSERT(pdp != NULL);
460 461 ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
461 462
462 463 mutex_enter(&pdp->xd_evt_lk);
463 464 if (pdp->xd_devclass == XEN_CONSOLE) {
464 465 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
465 466 pdp->xd_evtchn = xen_info->console.domU.evtchn;
466 467 } else {
467 468 pdp->xd_evtchn = INVALID_EVTCHN;
468 469 mutex_exit(&pdp->xd_evt_lk);
469 470 return (DDI_SUCCESS);
470 471 }
471 472 } else {
472 473 oeid = pdp->xd_xsdev.otherend_id;
473 474 if (oeid == (domid_t)-1) {
474 475 mutex_exit(&pdp->xd_evt_lk);
475 476 return (DDI_FAILURE);
476 477 }
477 478
478 479 if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
479 480 xvdi_dev_error(dip, rv, "bind event channel");
480 481 mutex_exit(&pdp->xd_evt_lk);
481 482 return (DDI_FAILURE);
482 483 }
483 484 }
484 485 #ifndef XPV_HVM_DRIVER
485 486 pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
486 487 #endif
487 488 mutex_exit(&pdp->xd_evt_lk);
488 489
489 490 return (DDI_SUCCESS);
490 491 }
491 492
492 493 /*
493 494 * Unbind the event channel for this device instance.
494 495 * Currently we only support one evtchn per device instance.
495 496 */
496 497 void
497 498 xvdi_free_evtchn(dev_info_t *dip)
498 499 {
499 500 struct xendev_ppd *pdp;
500 501
501 502 pdp = ddi_get_parent_data(dip);
502 503 ASSERT(pdp != NULL);
503 504
504 505 mutex_enter(&pdp->xd_evt_lk);
505 506 if (pdp->xd_evtchn != INVALID_EVTCHN) {
506 507 #ifndef XPV_HVM_DRIVER
507 508 ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
508 509 pdp->xd_ispec.intrspec_vec = 0;
509 510 #endif
510 511 pdp->xd_evtchn = INVALID_EVTCHN;
511 512 }
512 513 mutex_exit(&pdp->xd_evt_lk);
513 514 }
514 515
515 516 #ifndef XPV_HVM_DRIVER
516 517 /*
517 518 * Map an inter-domain communication ring for a virtual device.
518 519 * This is used by backend drivers.
519 520 */
520 521 int
521 522 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
522 523 grant_ref_t gref, xendev_ring_t **ringpp)
523 524 {
524 525 domid_t oeid;
525 526 gnttab_map_grant_ref_t mapop;
526 527 gnttab_unmap_grant_ref_t unmapop;
527 528 caddr_t ringva;
528 529 ddi_acc_hdl_t *ap;
529 530 ddi_acc_impl_t *iap;
530 531 xendev_ring_t *ring;
531 532 int err;
532 533 char errstr[] = "mapping in ring buffer";
533 534
534 535 ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
535 536 oeid = xvdi_get_oeid(dip);
536 537
537 538 /* alloc va in backend dom for ring buffer */
538 539 ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
539 540 0, 0, 0, 0, VM_SLEEP);
540 541
541 542 /* map in ring page */
542 543 hat_prepare_mapping(kas.a_hat, ringva, NULL);
543 544 mapop.host_addr = (uint64_t)(uintptr_t)ringva;
544 545 mapop.flags = GNTMAP_host_map;
545 546 mapop.ref = gref;
546 547 mapop.dom = oeid;
547 548 err = xen_map_gref(GNTTABOP_map_grant_ref, &mapop, 1, B_FALSE);
548 549 if (err) {
549 550 xvdi_fatal_error(dip, err, errstr);
550 551 goto errout1;
551 552 }
552 553
553 554 if (mapop.status != 0) {
554 555 xvdi_fatal_error(dip, err, errstr);
555 556 goto errout2;
556 557 }
557 558 ring->xr_vaddr = ringva;
558 559 ring->xr_grant_hdl = mapop.handle;
559 560 ring->xr_gref = gref;
560 561
561 562 /*
562 563 * init an acc handle and associate it w/ this ring
563 564 * this is only for backend drivers. we get the memory by calling
564 565 * vmem_xalloc(), instead of calling any ddi function, so we have
565 566 * to init an acc handle by ourselves
566 567 */
567 568 ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
568 569 ap = impl_acc_hdl_get(ring->xr_acc_hdl);
569 570 ap->ah_vers = VERS_ACCHDL;
570 571 ap->ah_dip = dip;
571 572 ap->ah_xfermodes = DDI_DMA_CONSISTENT;
572 573 ap->ah_acc = xendev_dc_accattr;
573 574 iap = (ddi_acc_impl_t *)ap->ah_platform_private;
574 575 iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
575 576 impl_acc_hdl_init(ap);
576 577 ap->ah_offset = 0;
577 578 ap->ah_len = (off_t)PAGESIZE;
578 579 ap->ah_addr = ring->xr_vaddr;
579 580
580 581 /* init backend ring */
581 582 xvdi_ring_init_back_ring(ring, nentry, entrysize);
582 583
583 584 *ringpp = ring;
584 585
585 586 return (DDI_SUCCESS);
586 587
587 588 errout2:
588 589 /* unmap ring page */
589 590 unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
590 591 unmapop.handle = ring->xr_grant_hdl;
591 592 unmapop.dev_bus_addr = NULL;
592 593 (void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
593 594 hat_release_mapping(kas.a_hat, ringva);
594 595 errout1:
595 596 vmem_xfree(heap_arena, ringva, PAGESIZE);
596 597 kmem_free(ring, sizeof (xendev_ring_t));
597 598 return (DDI_FAILURE);
598 599 }
599 600
600 601 /*
601 602 * Unmap a ring for a virtual device.
602 603 * This is used by backend drivers.
603 604 */
604 605 void
605 606 xvdi_unmap_ring(xendev_ring_t *ring)
606 607 {
607 608 gnttab_unmap_grant_ref_t unmapop;
608 609
609 610 ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
610 611
611 612 impl_acc_hdl_free(ring->xr_acc_hdl);
612 613 unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
613 614 unmapop.handle = ring->xr_grant_hdl;
614 615 unmapop.dev_bus_addr = NULL;
615 616 (void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
616 617 hat_release_mapping(kas.a_hat, ring->xr_vaddr);
617 618 vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
618 619 kmem_free(ring, sizeof (xendev_ring_t));
619 620 }
620 621 #endif /* XPV_HVM_DRIVER */
621 622
622 623 /*
623 624 * Re-initialise an inter-domain communications ring for the backend domain.
624 625 * ring will be re-initialized after re-grant succeed
625 626 * ring will be freed if fails to re-grant access to backend domain
626 627 * so, don't keep useful data in the ring
627 628 * used only in frontend driver
628 629 */
629 630 static void
630 631 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
631 632 {
632 633 paddr_t rpaddr;
633 634 maddr_t rmaddr;
634 635
635 636 ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
636 637 rpaddr = ringp->xr_paddr;
637 638
638 639 rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
639 640 gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
640 641 rmaddr >> PAGESHIFT, 0);
641 642 *gref = ringp->xr_gref;
642 643
643 644 /* init frontend ring */
644 645 xvdi_ring_init_sring(ringp);
645 646 xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
646 647 ringp->xr_entry_size);
647 648 }
648 649
649 650 /*
650 651 * allocate Xen inter-domain communications ring for Xen virtual devices
651 652 * used only in frontend driver
652 653 * if *ringpp is not NULL, we'll simply re-init it
653 654 */
654 655 int
655 656 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
656 657 grant_ref_t *gref, xendev_ring_t **ringpp)
657 658 {
658 659 size_t len;
659 660 xendev_ring_t *ring;
660 661 ddi_dma_cookie_t dma_cookie;
661 662 uint_t ncookies;
662 663 grant_ref_t ring_gref;
663 664 domid_t oeid;
664 665 maddr_t rmaddr;
665 666
666 667 if (*ringpp) {
667 668 xvdi_reinit_ring(dip, gref, *ringpp);
668 669 return (DDI_SUCCESS);
669 670 }
670 671
671 672 *ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
672 673 oeid = xvdi_get_oeid(dip);
673 674
674 675 /*
675 676 * Allocate page for this ring buffer
676 677 */
677 678 if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
678 679 0, &ring->xr_dma_hdl) != DDI_SUCCESS)
679 680 goto err;
680 681
681 682 if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
682 683 &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
683 684 &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
684 685 ddi_dma_free_handle(&ring->xr_dma_hdl);
685 686 goto err;
686 687 }
687 688
688 689 if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
689 690 ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
690 691 DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
691 692 ddi_dma_mem_free(&ring->xr_acc_hdl);
692 693 ring->xr_vaddr = NULL;
693 694 ddi_dma_free_handle(&ring->xr_dma_hdl);
694 695 goto err;
695 696 }
696 697 ASSERT(ncookies == 1);
697 698 ring->xr_paddr = dma_cookie.dmac_laddress;
698 699 rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
699 700 pa_to_ma(ring->xr_paddr);
700 701
701 702 if ((ring_gref = gnttab_grant_foreign_access(oeid,
702 703 rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
703 704 (void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
704 705 ddi_dma_mem_free(&ring->xr_acc_hdl);
705 706 ring->xr_vaddr = NULL;
706 707 ddi_dma_free_handle(&ring->xr_dma_hdl);
707 708 goto err;
708 709 }
709 710 *gref = ring->xr_gref = ring_gref;
710 711
711 712 /* init frontend ring */
712 713 xvdi_ring_init_sring(ring);
713 714 xvdi_ring_init_front_ring(ring, nentry, entrysize);
714 715
715 716 return (DDI_SUCCESS);
716 717
717 718 err:
718 719 kmem_free(ring, sizeof (xendev_ring_t));
719 720 return (DDI_FAILURE);
720 721 }
721 722
722 723 /*
723 724 * Release ring buffers allocated for Xen devices
724 725 * used for frontend driver
725 726 */
726 727 void
727 728 xvdi_free_ring(xendev_ring_t *ring)
728 729 {
729 730 ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
730 731
731 732 (void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
732 733 (void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
733 734 ddi_dma_mem_free(&ring->xr_acc_hdl);
734 735 ddi_dma_free_handle(&ring->xr_dma_hdl);
735 736 kmem_free(ring, sizeof (xendev_ring_t));
736 737 }
737 738
738 739 dev_info_t *
739 740 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
740 741 domid_t dom, int vdev)
741 742 {
742 743 dev_info_t *dip;
743 744 boolean_t backend;
744 745 i_xd_cfg_t *xdcp;
745 746 char xsnamebuf[TYPICALMAXPATHLEN];
746 747 char *type, *node = NULL, *xsname = NULL;
747 748 unsigned int tlen;
748 749 int ret;
749 750
750 751 ASSERT(DEVI_BUSY_OWNED(parent));
751 752
752 753 backend = (dom != DOMID_SELF);
753 754 xdcp = i_xvdi_devclass2cfg(devclass);
754 755 ASSERT(xdcp != NULL);
755 756
756 757 if (vdev != VDEV_NOXS) {
757 758 if (!backend) {
758 759 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
759 760 "%s/%d", xdcp->xs_path_fe, vdev);
760 761 xsname = xsnamebuf;
761 762 node = xdcp->node_fe;
762 763 } else {
763 764 (void) snprintf(xsnamebuf, sizeof (xsnamebuf),
764 765 "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
765 766 xsname = xsnamebuf;
766 767 node = xdcp->node_be;
767 768 }
768 769 } else {
769 770 node = xdcp->node_fe;
770 771 }
771 772
772 773 /* Must have a driver to use. */
773 774 if (node == NULL)
774 775 return (NULL);
775 776
776 777 /*
777 778 * We need to check the state of this device before we go
778 779 * further, otherwise we'll end up with a dead loop if
779 780 * anything goes wrong.
780 781 */
781 782 if ((xsname != NULL) &&
782 783 (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
783 784 return (NULL);
784 785
785 786 ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
786 787
787 788 /*
788 789 * Driver binding uses the compatible property _before_ the
789 790 * node name, so we set the node name to the 'model' of the
790 791 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
791 792 * encode both the model and the type in a compatible property
792 793 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac'). This allows a
793 794 * driver binding based on the <model,type> pair _before_ a
794 795 * binding based on the node name.
795 796 */
796 797 if ((xsname != NULL) &&
797 798 (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
798 799 == 0)) {
799 800 size_t clen;
800 801 char *c[1];
801 802
802 803 clen = strlen(node) + strlen(type) + 2;
803 804 c[0] = kmem_alloc(clen, KM_SLEEP);
804 805 (void) snprintf(c[0], clen, "%s,%s", node, type);
805 806
806 807 (void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
807 808 dip, "compatible", (char **)c, 1);
808 809
809 810 kmem_free(c[0], clen);
810 811 kmem_free(type, tlen);
811 812 }
812 813
813 814 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
814 815 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
815 816 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
816 817
817 818 if (i_ddi_devi_attached(parent))
818 819 ret = ndi_devi_online(dip, 0);
819 820 else
820 821 ret = ndi_devi_bind_driver(dip, 0);
821 822 if (ret != NDI_SUCCESS)
822 823 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
823 824
824 825 return (dip);
825 826 }
826 827
827 828 /*
828 829 * xendev_enum_class()
829 830 */
830 831 void
831 832 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
832 833 {
833 834 boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
834 835 boolean_t domU = !dom0;
835 836 i_xd_cfg_t *xdcp;
836 837
837 838 xdcp = i_xvdi_devclass2cfg(devclass);
838 839 ASSERT(xdcp != NULL);
839 840
840 841 if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
841 842 return;
842 843
843 844 if (domU && !(xdcp->flags & XD_DOM_GUEST))
844 845 return;
845 846
846 847 if (xdcp->xsdev == NULL) {
847 848 int circ;
848 849
849 850 /*
850 851 * Don't need to probe this kind of device from the
851 852 * store, just create one if it doesn't exist.
852 853 */
853 854
854 855 ndi_devi_enter(parent, &circ);
855 856 if (xvdi_find_dev(parent, devclass, DOMID_SELF, VDEV_NOXS)
856 857 == NULL)
857 858 (void) xvdi_create_dev(parent, devclass,
858 859 DOMID_SELF, VDEV_NOXS);
859 860 ndi_devi_exit(parent, circ);
860 861 } else {
861 862 /*
862 863 * Probe this kind of device from the store, both
863 864 * frontend and backend.
864 865 */
865 866 if (xdcp->node_fe != NULL) {
866 867 i_xvdi_enum_fe(parent, xdcp);
867 868 }
868 869 if (xdcp->node_be != NULL) {
869 870 i_xvdi_enum_be(parent, xdcp);
870 871 }
871 872 }
872 873 }
873 874
874 875 /*
875 876 * xendev_enum_all()
876 877 */
877 878 void
878 879 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
879 880 {
880 881 int i;
881 882 i_xd_cfg_t *xdcp;
882 883 boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
883 884
884 885 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
885 886 /*
886 887 * Dom0 relies on watchpoints to create non-soft
887 888 * devices - don't attempt to iterate over the store.
888 889 */
889 890 if (dom0 && (xdcp->xsdev != NULL))
890 891 continue;
891 892
892 893 /*
893 894 * If the store is not yet available, don't attempt to
894 895 * iterate.
895 896 */
896 897 if (store_unavailable && (xdcp->xsdev != NULL))
897 898 continue;
898 899
899 900 xendev_enum_class(parent, xdcp->devclass);
900 901 }
901 902 }
902 903
903 904 xendev_devclass_t
904 905 xendev_nodename_to_devclass(char *nodename)
905 906 {
906 907 int i;
907 908 i_xd_cfg_t *xdcp;
908 909
909 910 /*
910 911 * This relies on the convention that variants of a base
911 912 * driver share the same prefix and that there are no drivers
912 913 * which share a common prefix with the name of any other base
913 914 * drivers.
914 915 *
915 916 * So for a base driver 'xnb' (which is the name listed in
916 917 * xdci) the variants all begin with the string 'xnb' (in fact
917 918 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
918 919 * base drivers which have the prefix 'xnb'.
919 920 */
920 921 ASSERT(nodename != NULL);
921 922 for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
922 923 if (((xdcp->node_fe != NULL) &&
923 924 (strncmp(nodename, xdcp->node_fe,
924 925 strlen(xdcp->node_fe)) == 0)) ||
925 926 ((xdcp->node_be != NULL) &&
926 927 (strncmp(nodename, xdcp->node_be,
927 928 strlen(xdcp->node_be)) == 0)))
928 929
929 930 return (xdcp->devclass);
930 931 }
931 932 return (XEN_INVAL);
932 933 }
933 934
934 935 int
935 936 xendev_devclass_ipl(xendev_devclass_t devclass)
936 937 {
937 938 i_xd_cfg_t *xdcp;
938 939
939 940 xdcp = i_xvdi_devclass2cfg(devclass);
940 941 ASSERT(xdcp != NULL);
941 942
942 943 return (xdcp->xd_ipl);
943 944 }
944 945
945 946 /*
946 947 * Determine if a devinfo instance exists of a particular device
947 948 * class, domain and xenstore virtual device number.
948 949 */
949 950 dev_info_t *
950 951 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
951 952 domid_t dom, int vdev)
952 953 {
953 954 dev_info_t *dip;
954 955
955 956 ASSERT(DEVI_BUSY_OWNED(parent));
956 957
957 958 switch (devclass) {
958 959 case XEN_CONSOLE:
959 960 case XEN_XENBUS:
960 961 case XEN_DOMCAPS:
961 962 case XEN_BALLOON:
962 963 case XEN_EVTCHN:
963 964 case XEN_PRIVCMD:
964 965 /* Console and soft devices have no vdev. */
965 966 vdev = VDEV_NOXS;
966 967 break;
967 968 default:
968 969 break;
969 970 }
970 971
971 972 for (dip = ddi_get_child(parent); dip != NULL;
972 973 dip = ddi_get_next_sibling(dip)) {
973 974 int *vdevnump, *domidp, *devclsp, vdevnum;
974 975 uint_t ndomid, nvdevnum, ndevcls;
975 976 xendev_devclass_t devcls;
976 977 domid_t domid;
977 978 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
978 979
979 980 if (pdp == NULL) {
980 981 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
981 982 DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
982 983 DDI_PROP_SUCCESS)
983 984 continue;
984 985 ASSERT(ndomid == 1);
985 986 domid = (domid_t)*domidp;
986 987 ddi_prop_free(domidp);
987 988
988 989 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
989 990 DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
990 991 DDI_PROP_SUCCESS)
991 992 continue;
992 993 ASSERT(nvdevnum == 1);
993 994 vdevnum = *vdevnump;
994 995 ddi_prop_free(vdevnump);
995 996
996 997 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
997 998 DDI_PROP_DONTPASS, "devclass", &devclsp,
998 999 &ndevcls) != DDI_PROP_SUCCESS)
999 1000 continue;
1000 1001 ASSERT(ndevcls == 1);
1001 1002 devcls = (xendev_devclass_t)*devclsp;
1002 1003 ddi_prop_free(devclsp);
1003 1004 } else {
1004 1005 domid = pdp->xd_domain;
1005 1006 vdevnum = pdp->xd_vdevnum;
1006 1007 devcls = pdp->xd_devclass;
1007 1008 }
1008 1009
1009 1010 if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
1010 1011 return (dip);
1011 1012 }
1012 1013 return (NULL);
1013 1014 }
1014 1015
1015 1016 int
1016 1017 xvdi_get_evtchn(dev_info_t *xdip)
1017 1018 {
1018 1019 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1019 1020
1020 1021 ASSERT(pdp != NULL);
1021 1022 return (pdp->xd_evtchn);
1022 1023 }
1023 1024
1024 1025 int
1025 1026 xvdi_get_vdevnum(dev_info_t *xdip)
1026 1027 {
1027 1028 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1028 1029
1029 1030 ASSERT(pdp != NULL);
1030 1031 return (pdp->xd_vdevnum);
1031 1032 }
1032 1033
1033 1034 char *
1034 1035 xvdi_get_xsname(dev_info_t *xdip)
1035 1036 {
1036 1037 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1037 1038
1038 1039 ASSERT(pdp != NULL);
1039 1040 return ((char *)(pdp->xd_xsdev.nodename));
1040 1041 }
1041 1042
1042 1043 char *
1043 1044 xvdi_get_oename(dev_info_t *xdip)
1044 1045 {
1045 1046 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1046 1047
1047 1048 ASSERT(pdp != NULL);
1048 1049 if (pdp->xd_devclass == XEN_CONSOLE)
1049 1050 return (NULL);
1050 1051 return ((char *)(pdp->xd_xsdev.otherend));
1051 1052 }
1052 1053
1053 1054 struct xenbus_device *
1054 1055 xvdi_get_xsd(dev_info_t *xdip)
1055 1056 {
1056 1057 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1057 1058
1058 1059 ASSERT(pdp != NULL);
1059 1060 return (&pdp->xd_xsdev);
1060 1061 }
1061 1062
1062 1063 domid_t
1063 1064 xvdi_get_oeid(dev_info_t *xdip)
1064 1065 {
1065 1066 struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1066 1067
1067 1068 ASSERT(pdp != NULL);
1068 1069 if (pdp->xd_devclass == XEN_CONSOLE)
1069 1070 return ((domid_t)-1);
1070 1071 return ((domid_t)(pdp->xd_xsdev.otherend_id));
1071 1072 }
1072 1073
1073 1074 void
1074 1075 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1075 1076 {
1076 1077 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1077 1078
1078 1079 ASSERT(pdp != NULL);
1079 1080 xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1080 1081 }
1081 1082
1082 1083 void
1083 1084 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1084 1085 {
1085 1086 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1086 1087
1087 1088 ASSERT(pdp != NULL);
1088 1089 xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1089 1090 }
1090 1091
1091 1092 static void
1092 1093 i_xvdi_oestate_handler(void *arg)
1093 1094 {
1094 1095 i_oestate_evt_t *evt = (i_oestate_evt_t *)arg;
1095 1096 dev_info_t *dip = evt->dip;
1096 1097 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1097 1098 XenbusState oestate = pdp->xd_xsdev.otherend_state;
1098 1099 XenbusState curr_oestate = evt->state;
1099 1100 ddi_eventcookie_t evc;
1100 1101
1101 1102 /* evt is alloc'ed in i_xvdi_oestate_cb */
1102 1103 kmem_free(evt, sizeof (i_oestate_evt_t));
1103 1104
1104 1105 /*
1105 1106 * If the oestate we're handling is not the latest one,
1106 1107 * it does not make any sense to continue handling it.
1107 1108 */
1108 1109 if (curr_oestate != oestate)
1109 1110 return;
1110 1111
1111 1112 mutex_enter(&pdp->xd_ndi_lk);
1112 1113
1113 1114 if (pdp->xd_oe_ehid != NULL) {
1114 1115 /* send notification to driver */
1115 1116 if (ddi_get_eventcookie(dip, XS_OE_STATE,
1116 1117 &evc) == DDI_SUCCESS) {
1117 1118 mutex_exit(&pdp->xd_ndi_lk);
1118 1119 (void) ndi_post_event(dip, dip, evc, &oestate);
1119 1120 mutex_enter(&pdp->xd_ndi_lk);
1120 1121 }
1121 1122 } else {
1122 1123 /*
1123 1124 * take default action, if driver hasn't registered its
1124 1125 * event handler yet
1125 1126 */
1126 1127 if (oestate == XenbusStateClosing) {
1127 1128 (void) xvdi_switch_state(dip, XBT_NULL,
1128 1129 XenbusStateClosed);
1129 1130 } else if (oestate == XenbusStateClosed) {
1130 1131 (void) xvdi_switch_state(dip, XBT_NULL,
1131 1132 XenbusStateClosed);
1132 1133 (void) xvdi_post_event(dip, XEN_HP_REMOVE);
1133 1134 }
1134 1135 }
1135 1136
1136 1137 mutex_exit(&pdp->xd_ndi_lk);
1137 1138
1138 1139 /*
1139 1140 * We'll try to remove the devinfo node of this device if the
1140 1141 * other end has closed.
1141 1142 */
1142 1143 if (oestate == XenbusStateClosed)
1143 1144 (void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1144 1145 xendev_offline_device, dip, DDI_SLEEP);
1145 1146 }
1146 1147
1147 1148 static void
1148 1149 i_xvdi_hpstate_handler(void *arg)
1149 1150 {
1150 1151 dev_info_t *dip = (dev_info_t *)arg;
1151 1152 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1152 1153 ddi_eventcookie_t evc;
1153 1154 char *hp_status;
1154 1155 unsigned int hpl;
1155 1156
1156 1157 mutex_enter(&pdp->xd_ndi_lk);
1157 1158 if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1158 1159 (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1159 1160 (void *)&hp_status, &hpl) == 0)) {
1160 1161
1161 1162 xendev_hotplug_state_t new_state = Unrecognized;
1162 1163
1163 1164 if (strcmp(hp_status, "connected") == 0)
1164 1165 new_state = Connected;
1165 1166
1166 1167 mutex_exit(&pdp->xd_ndi_lk);
1167 1168
1168 1169 (void) ndi_post_event(dip, dip, evc, &new_state);
1169 1170 kmem_free(hp_status, hpl);
1170 1171 return;
1171 1172 }
1172 1173 mutex_exit(&pdp->xd_ndi_lk);
1173 1174 }
1174 1175
1175 1176 void
1176 1177 xvdi_notify_oe(dev_info_t *dip)
1177 1178 {
1178 1179 struct xendev_ppd *pdp;
1179 1180
1180 1181 pdp = ddi_get_parent_data(dip);
1181 1182 ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1182 1183 ec_notify_via_evtchn(pdp->xd_evtchn);
1183 1184 }
1184 1185
1185 1186 static void
1186 1187 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1187 1188 {
1188 1189 dev_info_t *dip = (dev_info_t *)w->dev;
1189 1190 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1190 1191 char *be = NULL;
1191 1192 unsigned int bel;
1192 1193
1193 1194 ASSERT(len > XS_WATCH_PATH);
1194 1195 ASSERT(vec[XS_WATCH_PATH] != NULL);
1195 1196
1196 1197 /*
1197 1198 * If the backend is not the same as that we already stored,
1198 1199 * re-set our watch for its' state.
1199 1200 */
1200 1201 if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1201 1202 == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1202 1203 (void) i_xvdi_add_watch_oestate(dip);
1203 1204
1204 1205 if (be != NULL) {
1205 1206 ASSERT(bel > 0);
1206 1207 kmem_free(be, bel);
1207 1208 }
1208 1209 }
1209 1210
1210 1211 static void
1211 1212 i_xvdi_xb_watch_free(xd_xb_watches_t *xxwp)
1212 1213 {
1213 1214 ASSERT(xxwp->xxw_ref == 0);
1214 1215 strfree((char *)xxwp->xxw_watch.node);
1215 1216 kmem_free(xxwp, sizeof (*xxwp));
1216 1217 }
1217 1218
1218 1219 static void
1219 1220 i_xvdi_xb_watch_release(xd_xb_watches_t *xxwp)
1220 1221 {
1221 1222 ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1222 1223 ASSERT(xxwp->xxw_ref > 0);
1223 1224 if (--xxwp->xxw_ref == 0)
1224 1225 i_xvdi_xb_watch_free(xxwp);
1225 1226 }
1226 1227
1227 1228 static void
1228 1229 i_xvdi_xb_watch_hold(xd_xb_watches_t *xxwp)
1229 1230 {
1230 1231 ASSERT(MUTEX_HELD(&xxwp->xxw_xppd->xd_ndi_lk));
1231 1232 ASSERT(xxwp->xxw_ref > 0);
1232 1233 xxwp->xxw_ref++;
1233 1234 }
1234 1235
1235 1236 static void
1236 1237 i_xvdi_xb_watch_cb_tq(void *arg)
1237 1238 {
1238 1239 xd_xb_watches_t *xxwp = (xd_xb_watches_t *)arg;
1239 1240 dev_info_t *dip = (dev_info_t *)xxwp->xxw_watch.dev;
1240 1241 struct xendev_ppd *pdp = xxwp->xxw_xppd;
1241 1242
1242 1243 xxwp->xxw_cb(dip, xxwp->xxw_watch.node, xxwp->xxw_arg);
1243 1244
1244 1245 mutex_enter(&pdp->xd_ndi_lk);
1245 1246 i_xvdi_xb_watch_release(xxwp);
1246 1247 mutex_exit(&pdp->xd_ndi_lk);
1247 1248 }
1248 1249
1249 1250 static void
1250 1251 i_xvdi_xb_watch_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1251 1252 {
1252 1253 dev_info_t *dip = (dev_info_t *)w->dev;
1253 1254 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1254 1255 xd_xb_watches_t *xxwp;
1255 1256
1256 1257 ASSERT(len > XS_WATCH_PATH);
1257 1258 ASSERT(vec[XS_WATCH_PATH] != NULL);
1258 1259
1259 1260 mutex_enter(&pdp->xd_ndi_lk);
1260 1261 for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1261 1262 xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1262 1263 if (w == &xxwp->xxw_watch)
1263 1264 break;
1264 1265 }
1265 1266
1266 1267 if (xxwp == NULL) {
1267 1268 mutex_exit(&pdp->xd_ndi_lk);
1268 1269 return;
1269 1270 }
1270 1271
1271 1272 i_xvdi_xb_watch_hold(xxwp);
1272 1273 (void) ddi_taskq_dispatch(pdp->xd_xb_watch_taskq,
1273 1274 i_xvdi_xb_watch_cb_tq, xxwp, DDI_SLEEP);
1274 1275 mutex_exit(&pdp->xd_ndi_lk);
1275 1276 }
1276 1277
1277 1278 /*
1278 1279 * Any watches registered with xvdi_add_xb_watch_handler() get torn down during
1279 1280 * a suspend operation. So if a frontend driver want's to use these interfaces,
1280 1281 * that driver is responsible for re-registering any watches it had before
1281 1282 * the suspend operation.
1282 1283 */
1283 1284 int
1284 1285 xvdi_add_xb_watch_handler(dev_info_t *dip, const char *dir, const char *node,
1285 1286 xvdi_xb_watch_cb_t cb, void *arg)
1286 1287 {
1287 1288 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1288 1289 xd_xb_watches_t *xxw_new, *xxwp;
1289 1290 char *path;
1290 1291 int n;
1291 1292
1292 1293 ASSERT((dip != NULL) && (dir != NULL) && (node != NULL));
1293 1294 ASSERT(cb != NULL);
1294 1295
1295 1296 n = strlen(dir) + 1 + strlen(node) + 1;
1296 1297 path = kmem_zalloc(n, KM_SLEEP);
1297 1298 (void) strlcat(path, dir, n);
1298 1299 (void) strlcat(path, "/", n);
1299 1300 (void) strlcat(path, node, n);
1300 1301 ASSERT((strlen(path) + 1) == n);
1301 1302
1302 1303 xxw_new = kmem_zalloc(sizeof (*xxw_new), KM_SLEEP);
1303 1304 xxw_new->xxw_ref = 1;
1304 1305 xxw_new->xxw_watch.node = path;
1305 1306 xxw_new->xxw_watch.callback = i_xvdi_xb_watch_cb;
1306 1307 xxw_new->xxw_watch.dev = (struct xenbus_device *)dip;
1307 1308 xxw_new->xxw_xppd = pdp;
1308 1309 xxw_new->xxw_cb = cb;
1309 1310 xxw_new->xxw_arg = arg;
1310 1311
1311 1312 mutex_enter(&pdp->xd_ndi_lk);
1312 1313
1313 1314 /*
1314 1315 * If this is the first watch we're setting up, create a taskq
1315 1316 * to dispatch watch events and initialize the watch list.
1316 1317 */
1317 1318 if (pdp->xd_xb_watch_taskq == NULL) {
1318 1319 char tq_name[TASKQ_NAMELEN];
1319 1320
1320 1321 ASSERT(list_is_empty(&pdp->xd_xb_watches));
1321 1322
1322 1323 (void) snprintf(tq_name, sizeof (tq_name),
1323 1324 "%s_xb_watch_tq", ddi_get_name(dip));
1324 1325
1325 1326 if ((pdp->xd_xb_watch_taskq = ddi_taskq_create(dip, tq_name,
1326 1327 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1327 1328 i_xvdi_xb_watch_release(xxw_new);
1328 1329 mutex_exit(&pdp->xd_ndi_lk);
1329 1330 return (DDI_FAILURE);
1330 1331 }
1331 1332 }
1332 1333
1333 1334 /* Don't allow duplicate watches to be registered */
1334 1335 for (xxwp = list_head(&pdp->xd_xb_watches); xxwp != NULL;
1335 1336 xxwp = list_next(&pdp->xd_xb_watches, xxwp)) {
1336 1337
1337 1338 ASSERT(strcmp(xxwp->xxw_watch.node, path) != 0);
1338 1339 if (strcmp(xxwp->xxw_watch.node, path) != 0)
1339 1340 continue;
1340 1341 i_xvdi_xb_watch_release(xxw_new);
1341 1342 mutex_exit(&pdp->xd_ndi_lk);
1342 1343 return (DDI_FAILURE);
1343 1344 }
1344 1345
1345 1346 if (register_xenbus_watch(&xxw_new->xxw_watch) != 0) {
1346 1347 if (list_is_empty(&pdp->xd_xb_watches)) {
1347 1348 ddi_taskq_destroy(pdp->xd_xb_watch_taskq);
1348 1349 pdp->xd_xb_watch_taskq = NULL;
1349 1350 }
1350 1351 i_xvdi_xb_watch_release(xxw_new);
1351 1352 mutex_exit(&pdp->xd_ndi_lk);
1352 1353 return (DDI_FAILURE);
1353 1354 }
1354 1355
1355 1356 list_insert_head(&pdp->xd_xb_watches, xxw_new);
1356 1357 mutex_exit(&pdp->xd_ndi_lk);
1357 1358 return (DDI_SUCCESS);
1358 1359 }
1359 1360
1360 1361 /*
1361 1362 * Tear down all xenbus watches registered by the specified dip.
1362 1363 */
1363 1364 void
1364 1365 xvdi_remove_xb_watch_handlers(dev_info_t *dip)
1365 1366 {
1366 1367 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1367 1368 xd_xb_watches_t *xxwp;
1368 1369 ddi_taskq_t *tq;
1369 1370
1370 1371 mutex_enter(&pdp->xd_ndi_lk);
1371 1372
1372 1373 while ((xxwp = list_remove_head(&pdp->xd_xb_watches)) != NULL) {
1373 1374 mutex_exit(&pdp->xd_ndi_lk);
1374 1375 unregister_xenbus_watch(&xxwp->xxw_watch);
1375 1376 mutex_enter(&pdp->xd_ndi_lk);
1376 1377 i_xvdi_xb_watch_release(xxwp);
1377 1378 }
1378 1379 ASSERT(list_is_empty(&pdp->xd_xb_watches));
1379 1380
1380 1381 /*
1381 1382 * We can't hold xd_ndi_lk while we destroy the xd_xb_watch_taskq.
1382 1383 * This is because if there are currently any executing taskq threads,
1383 1384 * we will block until they are finished, and to finish they need
1384 1385 * to aquire xd_ndi_lk in i_xvdi_xb_watch_cb_tq() so they can release
1385 1386 * their reference on their corresponding xxwp structure.
1386 1387 */
1387 1388 tq = pdp->xd_xb_watch_taskq;
1388 1389 pdp->xd_xb_watch_taskq = NULL;
1389 1390 mutex_exit(&pdp->xd_ndi_lk);
1390 1391 if (tq != NULL)
1391 1392 ddi_taskq_destroy(tq);
1392 1393 }
1393 1394
1394 1395 static int
1395 1396 i_xvdi_add_watch_oestate(dev_info_t *dip)
1396 1397 {
1397 1398 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1398 1399
1399 1400 ASSERT(pdp != NULL);
1400 1401 ASSERT(pdp->xd_xsdev.nodename != NULL);
1401 1402 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1402 1403
1403 1404 /*
1404 1405 * Create taskq for delivering other end state change event to
1405 1406 * this device later.
1406 1407 *
1407 1408 * Set nthreads to 1 to make sure that events can be delivered
1408 1409 * in order.
1409 1410 *
1410 1411 * Note: It is _not_ guaranteed that driver can see every
1411 1412 * xenstore change under the path that it is watching. If two
1412 1413 * changes happen consecutively in a very short amount of
1413 1414 * time, it is likely that the driver will see only the last
1414 1415 * one.
1415 1416 */
1416 1417 if (pdp->xd_oe_taskq == NULL)
1417 1418 if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1418 1419 "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1419 1420 return (DDI_FAILURE);
1420 1421
1421 1422 /*
1422 1423 * Watch for changes to the XenbusState of otherend.
1423 1424 */
1424 1425 pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1425 1426 pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1426 1427
1427 1428 if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1428 1429 i_xvdi_rem_watch_oestate(dip);
1429 1430 return (DDI_FAILURE);
1430 1431 }
1431 1432
1432 1433 return (DDI_SUCCESS);
1433 1434 }
1434 1435
1435 1436 static void
1436 1437 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1437 1438 {
1438 1439 struct xendev_ppd *pdp;
1439 1440 struct xenbus_device *dev;
1440 1441
1441 1442 pdp = ddi_get_parent_data(dip);
1442 1443 ASSERT(pdp != NULL);
1443 1444 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1444 1445
1445 1446 dev = &pdp->xd_xsdev;
1446 1447
1447 1448 /* Unwatch for changes to XenbusState of otherend */
1448 1449 if (dev->otherend_watch.node != NULL) {
1449 1450 mutex_exit(&pdp->xd_ndi_lk);
1450 1451 unregister_xenbus_watch(&dev->otherend_watch);
1451 1452 mutex_enter(&pdp->xd_ndi_lk);
1452 1453 }
1453 1454
1454 1455 /* make sure no event handler is running */
1455 1456 if (pdp->xd_oe_taskq != NULL) {
1456 1457 mutex_exit(&pdp->xd_ndi_lk);
1457 1458 ddi_taskq_destroy(pdp->xd_oe_taskq);
1458 1459 mutex_enter(&pdp->xd_ndi_lk);
1459 1460 pdp->xd_oe_taskq = NULL;
1460 1461 }
1461 1462
1462 1463 /* clean up */
1463 1464 dev->otherend_state = XenbusStateUnknown;
1464 1465 dev->otherend_id = (domid_t)-1;
1465 1466 if (dev->otherend_watch.node != NULL)
1466 1467 kmem_free((void *)dev->otherend_watch.node,
1467 1468 strlen(dev->otherend_watch.node) + 1);
1468 1469 dev->otherend_watch.node = NULL;
1469 1470 if (dev->otherend != NULL)
1470 1471 kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1471 1472 dev->otherend = NULL;
1472 1473 }
1473 1474
1474 1475 static int
1475 1476 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1476 1477 {
1477 1478 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1478 1479
1479 1480 ASSERT(pdp != NULL);
1480 1481 ASSERT(pdp->xd_xsdev.frontend == 0);
1481 1482 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1482 1483
1483 1484 /*
1484 1485 * Create taskq for delivering hotplug status change event to
1485 1486 * this device later.
1486 1487 *
1487 1488 * Set nthreads to 1 to make sure that events can be delivered
1488 1489 * in order.
1489 1490 *
1490 1491 * Note: It is _not_ guaranteed that driver can see every
1491 1492 * hotplug status change under the path that it is
1492 1493 * watching. If two changes happen consecutively in a very
1493 1494 * short amount of time, it is likely that the driver only
1494 1495 * sees the last one.
1495 1496 */
1496 1497 if (pdp->xd_hp_taskq == NULL)
1497 1498 if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1498 1499 "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1499 1500 return (DDI_FAILURE);
1500 1501
1501 1502 if (pdp->xd_hp_watch.node == NULL) {
1502 1503 size_t len;
1503 1504 char *path;
1504 1505
1505 1506 ASSERT(pdp->xd_xsdev.nodename != NULL);
1506 1507
1507 1508 len = strlen(pdp->xd_xsdev.nodename) +
1508 1509 strlen("/hotplug-status") + 1;
1509 1510 path = kmem_alloc(len, KM_SLEEP);
1510 1511 (void) snprintf(path, len, "%s/hotplug-status",
1511 1512 pdp->xd_xsdev.nodename);
1512 1513
1513 1514 pdp->xd_hp_watch.node = path;
1514 1515 pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1515 1516 pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1516 1517 if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1517 1518 i_xvdi_rem_watch_hpstate(dip);
1518 1519 return (DDI_FAILURE);
1519 1520 }
1520 1521 }
1521 1522
1522 1523 return (DDI_SUCCESS);
1523 1524 }
1524 1525
1525 1526 static void
1526 1527 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1527 1528 {
1528 1529 struct xendev_ppd *pdp;
1529 1530 pdp = ddi_get_parent_data(dip);
1530 1531
1531 1532 ASSERT(pdp != NULL);
1532 1533 ASSERT(pdp->xd_xsdev.frontend == 0);
1533 1534 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1534 1535
1535 1536 /* Unwatch for changes to "hotplug-status" node for backend device. */
1536 1537 if (pdp->xd_hp_watch.node != NULL) {
1537 1538 mutex_exit(&pdp->xd_ndi_lk);
1538 1539 unregister_xenbus_watch(&pdp->xd_hp_watch);
1539 1540 mutex_enter(&pdp->xd_ndi_lk);
1540 1541 }
1541 1542
1542 1543 /* Make sure no event handler is running. */
1543 1544 if (pdp->xd_hp_taskq != NULL) {
1544 1545 mutex_exit(&pdp->xd_ndi_lk);
1545 1546 ddi_taskq_destroy(pdp->xd_hp_taskq);
1546 1547 mutex_enter(&pdp->xd_ndi_lk);
1547 1548 pdp->xd_hp_taskq = NULL;
1548 1549 }
1549 1550
1550 1551 /* Clean up. */
1551 1552 if (pdp->xd_hp_watch.node != NULL) {
1552 1553 kmem_free((void *)pdp->xd_hp_watch.node,
1553 1554 strlen(pdp->xd_hp_watch.node) + 1);
1554 1555 pdp->xd_hp_watch.node = NULL;
1555 1556 }
1556 1557 }
1557 1558
1558 1559 static int
1559 1560 i_xvdi_add_watches(dev_info_t *dip)
1560 1561 {
1561 1562 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1562 1563
1563 1564 ASSERT(pdp != NULL);
1564 1565
1565 1566 mutex_enter(&pdp->xd_ndi_lk);
1566 1567
1567 1568 if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1568 1569 mutex_exit(&pdp->xd_ndi_lk);
1569 1570 return (DDI_FAILURE);
1570 1571 }
1571 1572
1572 1573 if (pdp->xd_xsdev.frontend == 1) {
1573 1574 /*
1574 1575 * Frontend devices must watch for the backend path
1575 1576 * changing.
1576 1577 */
1577 1578 if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1578 1579 goto unwatch_and_fail;
1579 1580 } else {
1580 1581 /*
1581 1582 * Backend devices must watch for hotplug events.
1582 1583 */
1583 1584 if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1584 1585 goto unwatch_and_fail;
1585 1586 }
1586 1587
1587 1588 mutex_exit(&pdp->xd_ndi_lk);
1588 1589
1589 1590 return (DDI_SUCCESS);
1590 1591
1591 1592 unwatch_and_fail:
1592 1593 i_xvdi_rem_watch_oestate(dip);
1593 1594 mutex_exit(&pdp->xd_ndi_lk);
1594 1595
1595 1596 return (DDI_FAILURE);
1596 1597 }
1597 1598
1598 1599 static void
1599 1600 i_xvdi_rem_watches(dev_info_t *dip)
1600 1601 {
1601 1602 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1602 1603
1603 1604 ASSERT(pdp != NULL);
1604 1605
1605 1606 mutex_enter(&pdp->xd_ndi_lk);
1606 1607
1607 1608 i_xvdi_rem_watch_oestate(dip);
1608 1609
1609 1610 if (pdp->xd_xsdev.frontend == 1)
1610 1611 i_xvdi_rem_watch_bepath(dip);
1611 1612 else
1612 1613 i_xvdi_rem_watch_hpstate(dip);
1613 1614
1614 1615 mutex_exit(&pdp->xd_ndi_lk);
1615 1616
1616 1617 xvdi_remove_xb_watch_handlers(dip);
1617 1618 }
1618 1619
1619 1620 static int
1620 1621 i_xvdi_add_watch_bepath(dev_info_t *dip)
1621 1622 {
1622 1623 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1623 1624
1624 1625 ASSERT(pdp != NULL);
1625 1626 ASSERT(pdp->xd_xsdev.frontend == 1);
1626 1627
1627 1628 /*
1628 1629 * Frontend devices need to watch for the backend path changing.
1629 1630 */
1630 1631 if (pdp->xd_bepath_watch.node == NULL) {
1631 1632 size_t len;
1632 1633 char *path;
1633 1634
1634 1635 ASSERT(pdp->xd_xsdev.nodename != NULL);
1635 1636
1636 1637 len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1637 1638 path = kmem_alloc(len, KM_SLEEP);
1638 1639 (void) snprintf(path, len, "%s/backend",
1639 1640 pdp->xd_xsdev.nodename);
1640 1641
1641 1642 pdp->xd_bepath_watch.node = path;
1642 1643 pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1643 1644 pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1644 1645 if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1645 1646 kmem_free(path, len);
1646 1647 pdp->xd_bepath_watch.node = NULL;
1647 1648 return (DDI_FAILURE);
1648 1649 }
1649 1650 }
1650 1651
1651 1652 return (DDI_SUCCESS);
1652 1653 }
1653 1654
1654 1655 static void
1655 1656 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1656 1657 {
1657 1658 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1658 1659
1659 1660 ASSERT(pdp != NULL);
1660 1661 ASSERT(pdp->xd_xsdev.frontend == 1);
1661 1662 ASSERT(mutex_owned(&pdp->xd_ndi_lk));
1662 1663
1663 1664 if (pdp->xd_bepath_watch.node != NULL) {
1664 1665 mutex_exit(&pdp->xd_ndi_lk);
1665 1666 unregister_xenbus_watch(&pdp->xd_bepath_watch);
1666 1667 mutex_enter(&pdp->xd_ndi_lk);
1667 1668
1668 1669 kmem_free((void *)(pdp->xd_bepath_watch.node),
1669 1670 strlen(pdp->xd_bepath_watch.node) + 1);
1670 1671 pdp->xd_bepath_watch.node = NULL;
1671 1672 }
1672 1673 }
1673 1674
1674 1675 int
1675 1676 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1676 1677 XenbusState newState)
1677 1678 {
1678 1679 int rv;
1679 1680 struct xendev_ppd *pdp;
1680 1681
1681 1682 pdp = ddi_get_parent_data(dip);
1682 1683 ASSERT(pdp != NULL);
1683 1684
1684 1685 XVDI_DPRINTF(XVDI_DBG_STATE,
1685 1686 "xvdi_switch_state: %s@%s's xenbus state moves to %d\n",
1686 1687 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
1687 1688 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
1688 1689 newState);
1689 1690
1690 1691 rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1691 1692 if (rv > 0)
1692 1693 cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1693 1694
1694 1695 return (rv);
1695 1696 }
1696 1697
1697 1698 /*
1698 1699 * Notify hotplug script running in userland
1699 1700 */
1700 1701 int
1701 1702 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1702 1703 {
1703 1704 struct xendev_ppd *pdp;
1704 1705 nvlist_t *attr_list = NULL;
1705 1706 i_xd_cfg_t *xdcp;
1706 1707 sysevent_id_t eid;
1707 1708 int err;
1708 1709 char devname[256]; /* XXPV dme: ? */
1709 1710
1710 1711 pdp = ddi_get_parent_data(dip);
1711 1712 ASSERT(pdp != NULL);
1712 1713
1713 1714 xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1714 1715 ASSERT(xdcp != NULL);
1715 1716
1716 1717 (void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1717 1718 ddi_driver_name(dip), ddi_get_instance(dip));
1718 1719
1719 1720 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1720 1721 if (err != DDI_SUCCESS)
1721 1722 goto failure;
1722 1723
1723 1724 err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1724 1725 if (err != DDI_SUCCESS)
1725 1726 goto failure;
1726 1727 err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1727 1728 if (err != DDI_SUCCESS)
1728 1729 goto failure;
1729 1730 err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1730 1731 if (err != DDI_SUCCESS)
1731 1732 goto failure;
1732 1733 err = nvlist_add_string(attr_list, "device", devname);
1733 1734 if (err != DDI_SUCCESS)
1734 1735 goto failure;
1735 1736 err = nvlist_add_string(attr_list, "fob",
1736 1737 ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1737 1738 if (err != DDI_SUCCESS)
1738 1739 goto failure;
1739 1740
1740 1741 switch (hpc) {
1741 1742 case XEN_HP_ADD:
1742 1743 err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1743 1744 "add", attr_list, &eid, DDI_NOSLEEP);
1744 1745 break;
1745 1746 case XEN_HP_REMOVE:
1746 1747 err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1747 1748 "remove", attr_list, &eid, DDI_NOSLEEP);
1748 1749 break;
1749 1750 default:
1750 1751 err = DDI_FAILURE;
1751 1752 goto failure;
1752 1753 }
1753 1754
1754 1755 failure:
1755 1756 nvlist_free(attr_list);
1756 1757
1757 1758 return (err);
1758 1759 }
1759 1760
1760 1761 /* ARGSUSED */
1761 1762 static void
1762 1763 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1763 1764 unsigned int len)
1764 1765 {
1765 1766 char *path;
1766 1767
1767 1768 if (xendev_dip == NULL)
1768 1769 xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1769 1770
1770 1771 path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1771 1772
1772 1773 (void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1773 1774 i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1774 1775 }
1775 1776
1776 1777 static void
1777 1778 i_xvdi_watch_device(char *path)
1778 1779 {
1779 1780 struct xenbus_watch *w;
1780 1781
1781 1782 ASSERT(path != NULL);
1782 1783
1783 1784 w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1784 1785 w->node = path;
1785 1786 w->callback = &i_xvdi_probe_path_cb;
1786 1787 w->dev = NULL;
1787 1788
1788 1789 if (register_xenbus_watch(w) != 0) {
1789 1790 cmn_err(CE_WARN, "i_xvdi_watch_device: "
1790 1791 "cannot set watch on %s", path);
1791 1792 kmem_free(w, sizeof (*w));
1792 1793 return;
1793 1794 }
1794 1795 }
1795 1796
1796 1797 void
1797 1798 xvdi_watch_devices(int newstate)
1798 1799 {
1799 1800 int devclass;
1800 1801
1801 1802 /*
1802 1803 * Watch for devices being created in the store.
1803 1804 */
1804 1805 if (newstate == XENSTORE_DOWN)
1805 1806 return;
1806 1807 for (devclass = 0; devclass < NXDC; devclass++) {
1807 1808 if (xdci[devclass].xs_path_fe != NULL)
1808 1809 i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1809 1810 if (xdci[devclass].xs_path_be != NULL)
1810 1811 i_xvdi_watch_device(xdci[devclass].xs_path_be);
1811 1812 }
1812 1813 }
1813 1814
1814 1815 /*
1815 1816 * Iterate over the store looking for backend devices to create.
1816 1817 */
1817 1818 static void
1818 1819 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1819 1820 {
1820 1821 char **domains;
1821 1822 unsigned int ndomains;
1822 1823 int ldomains, i;
1823 1824
1824 1825 if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1825 1826 &ndomains)) == NULL)
1826 1827 return;
1827 1828
1828 1829 for (i = 0, ldomains = 0; i < ndomains; i++) {
1829 1830 ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1830 1831
1831 1832 i_xvdi_enum_worker(parent, xdcp, domains[i]);
1832 1833 }
1833 1834 kmem_free(domains, ldomains);
1834 1835 }
1835 1836
1836 1837 /*
1837 1838 * Iterate over the store looking for frontend devices to create.
1838 1839 */
1839 1840 static void
1840 1841 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1841 1842 {
1842 1843 i_xvdi_enum_worker(parent, xdcp, NULL);
1843 1844 }
1844 1845
1845 1846 static void
1846 1847 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1847 1848 char *domain)
1848 1849 {
1849 1850 char *path, *domain_path, *ep;
1850 1851 char **devices;
1851 1852 unsigned int ndevices;
1852 1853 int ldevices, j, circ;
1853 1854 domid_t dom;
1854 1855 long tmplong;
1855 1856
1856 1857 if (domain == NULL) {
1857 1858 dom = DOMID_SELF;
1858 1859 path = xdcp->xs_path_fe;
1859 1860 domain_path = "";
1860 1861 } else {
1861 1862 (void) ddi_strtol(domain, &ep, 0, &tmplong);
1862 1863 dom = tmplong;
1863 1864 path = xdcp->xs_path_be;
1864 1865 domain_path = domain;
1865 1866 }
1866 1867
1867 1868 if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1868 1869 &ndevices)) == NULL)
1869 1870 return;
1870 1871
1871 1872 for (j = 0, ldevices = 0; j < ndevices; j++) {
1872 1873 int vdev;
1873 1874
1874 1875 ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1875 1876 (void) ddi_strtol(devices[j], &ep, 0, &tmplong);
1876 1877 vdev = tmplong;
1877 1878
1878 1879 ndi_devi_enter(parent, &circ);
1879 1880
1880 1881 if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL)
1881 1882 (void) xvdi_create_dev(parent, xdcp->devclass,
1882 1883 dom, vdev);
1883 1884
1884 1885 ndi_devi_exit(parent, circ);
1885 1886 }
1886 1887 kmem_free(devices, ldevices);
1887 1888 }
1888 1889
1889 1890 /*
1890 1891 * Leaf drivers should call this in their detach() routine during suspend.
1891 1892 */
1892 1893 void
1893 1894 xvdi_suspend(dev_info_t *dip)
1894 1895 {
1895 1896 i_xvdi_rem_watches(dip);
1896 1897 }
1897 1898
1898 1899 /*
1899 1900 * Leaf drivers should call this in their attach() routine during resume.
1900 1901 */
1901 1902 int
1902 1903 xvdi_resume(dev_info_t *dip)
1903 1904 {
1904 1905 return (i_xvdi_add_watches(dip));
1905 1906 }
1906 1907
1907 1908 /*
1908 1909 * Add event handler for the leaf driver
1909 1910 * to handle event triggered by the change in xenstore
1910 1911 */
1911 1912 int
1912 1913 xvdi_add_event_handler(dev_info_t *dip, char *name,
1913 1914 void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
1914 1915 void *arg)
1915 1916 {
1916 1917 ddi_eventcookie_t ecv;
1917 1918 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1918 1919 ddi_callback_id_t *cbid;
1919 1920 boolean_t call_handler;
1920 1921 i_oestate_evt_t *evt = NULL;
1921 1922 XenbusState oestate;
1922 1923
1923 1924 ASSERT(pdp != NULL);
1924 1925
1925 1926 mutex_enter(&pdp->xd_ndi_lk);
1926 1927
1927 1928 if (strcmp(name, XS_OE_STATE) == 0) {
1928 1929 ASSERT(pdp->xd_xsdev.otherend != NULL);
1929 1930
1930 1931 cbid = &pdp->xd_oe_ehid;
1931 1932 } else if (strcmp(name, XS_HP_STATE) == 0) {
1932 1933 if (pdp->xd_xsdev.frontend == 1) {
1933 1934 mutex_exit(&pdp->xd_ndi_lk);
1934 1935 return (DDI_FAILURE);
1935 1936 }
1936 1937
1937 1938 ASSERT(pdp->xd_hp_watch.node != NULL);
1938 1939
1939 1940 cbid = &pdp->xd_hp_ehid;
1940 1941 } else {
1941 1942 /* Unsupported watch. */
1942 1943 mutex_exit(&pdp->xd_ndi_lk);
1943 1944 return (DDI_FAILURE);
1944 1945 }
1945 1946
1946 1947 /*
1947 1948 * No event handler provided, take default action to handle
1948 1949 * event.
1949 1950 */
1950 1951 if (evthandler == NULL) {
1951 1952 mutex_exit(&pdp->xd_ndi_lk);
1952 1953 return (DDI_SUCCESS);
1953 1954 }
1954 1955
1955 1956 ASSERT(*cbid == NULL);
1956 1957
1957 1958 if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1958 1959 cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1959 1960 name, ddi_get_name(dip), ddi_get_name_addr(dip));
1960 1961 mutex_exit(&pdp->xd_ndi_lk);
1961 1962 return (DDI_FAILURE);
1962 1963 }
1963 1964 if (ddi_add_event_handler(dip, ecv, evthandler, arg, cbid)
1964 1965 != DDI_SUCCESS) {
1965 1966 cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1966 1967 name, ddi_get_name(dip), ddi_get_name_addr(dip));
1967 1968 *cbid = NULL;
1968 1969 mutex_exit(&pdp->xd_ndi_lk);
1969 1970 return (DDI_FAILURE);
1970 1971 }
1971 1972
1972 1973 /*
1973 1974 * if we're adding an oe state callback, and the ring has already
1974 1975 * transitioned out of Unknown, call the handler after we release
1975 1976 * the mutex.
1976 1977 */
1977 1978 call_handler = B_FALSE;
1978 1979 if ((strcmp(name, XS_OE_STATE) == 0) &&
1979 1980 (pdp->xd_xsdev.otherend_state != XenbusStateUnknown)) {
1980 1981 oestate = pdp->xd_xsdev.otherend_state;
1981 1982 call_handler = B_TRUE;
1982 1983 }
1983 1984
1984 1985 mutex_exit(&pdp->xd_ndi_lk);
1985 1986
1986 1987 if (call_handler) {
1987 1988 evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
1988 1989 evt->dip = dip;
1989 1990 evt->state = oestate;
1990 1991 (void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
1991 1992 i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
1992 1993 }
1993 1994
1994 1995 return (DDI_SUCCESS);
1995 1996 }
1996 1997
1997 1998 /*
1998 1999 * Remove event handler for the leaf driver and unwatch xenstore
1999 2000 * so, driver will not be notified when xenstore entry changed later
2000 2001 */
2001 2002 void
2002 2003 xvdi_remove_event_handler(dev_info_t *dip, char *name)
2003 2004 {
2004 2005 struct xendev_ppd *pdp;
2005 2006 boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
2006 2007 ddi_callback_id_t oeid = NULL, hpid = NULL;
2007 2008
2008 2009 pdp = ddi_get_parent_data(dip);
2009 2010 ASSERT(pdp != NULL);
2010 2011
2011 2012 if (name == NULL) {
2012 2013 rem_oe = B_TRUE;
2013 2014 rem_hp = B_TRUE;
2014 2015 } else if (strcmp(name, XS_OE_STATE) == 0) {
2015 2016 rem_oe = B_TRUE;
2016 2017 } else if (strcmp(name, XS_HP_STATE) == 0) {
2017 2018 rem_hp = B_TRUE;
2018 2019 } else {
2019 2020 cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
2020 2021 return;
2021 2022 }
2022 2023
2023 2024 mutex_enter(&pdp->xd_ndi_lk);
2024 2025
2025 2026 if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
2026 2027 oeid = pdp->xd_oe_ehid;
2027 2028 pdp->xd_oe_ehid = NULL;
2028 2029 }
2029 2030
2030 2031 if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
2031 2032 hpid = pdp->xd_hp_ehid;
2032 2033 pdp->xd_hp_ehid = NULL;
2033 2034 }
2034 2035
2035 2036 mutex_exit(&pdp->xd_ndi_lk);
2036 2037
2037 2038 if (oeid != NULL)
2038 2039 (void) ddi_remove_event_handler(oeid);
2039 2040 if (hpid != NULL)
2040 2041 (void) ddi_remove_event_handler(hpid);
2041 2042 }
2042 2043
2043 2044
2044 2045 /*
2045 2046 * common ring interfaces
2046 2047 */
2047 2048
2048 2049 #define FRONT_RING(_ringp) (&(_ringp)->xr_sring.fr)
2049 2050 #define BACK_RING(_ringp) (&(_ringp)->xr_sring.br)
2050 2051 #define GET_RING_SIZE(_ringp) RING_SIZE(FRONT_RING(ringp))
2051 2052 #define GET_RING_ENTRY_FE(_ringp, _idx) \
2052 2053 (FRONT_RING(_ringp)->sring->ring + \
2053 2054 (_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2054 2055 #define GET_RING_ENTRY_BE(_ringp, _idx) \
2055 2056 (BACK_RING(_ringp)->sring->ring + \
2056 2057 (_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
2057 2058
2058 2059 unsigned int
2059 2060 xvdi_ring_avail_slots(xendev_ring_t *ringp)
2060 2061 {
2061 2062 comif_ring_fe_t *frp;
2062 2063 comif_ring_be_t *brp;
2063 2064
2064 2065 if (ringp->xr_frontend) {
2065 2066 frp = FRONT_RING(ringp);
2066 2067 return (GET_RING_SIZE(ringp) -
2067 2068 (frp->req_prod_pvt - frp->rsp_cons));
2068 2069 } else {
2069 2070 brp = BACK_RING(ringp);
2070 2071 return (GET_RING_SIZE(ringp) -
2071 2072 (brp->rsp_prod_pvt - brp->req_cons));
2072 2073 }
2073 2074 }
2074 2075
2075 2076 int
2076 2077 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
2077 2078 {
2078 2079 comif_ring_be_t *brp;
2079 2080
2080 2081 ASSERT(!ringp->xr_frontend);
2081 2082 brp = BACK_RING(ringp);
2082 2083 return ((brp->req_cons !=
2083 2084 ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
2084 2085 ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
2085 2086 }
2086 2087
2087 2088 int
2088 2089 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
2089 2090 {
2090 2091 comif_ring_fe_t *frp;
2091 2092
2092 2093 ASSERT(ringp->xr_frontend);
2093 2094 frp = FRONT_RING(ringp);
2094 2095 return (frp->req_prod_pvt !=
2095 2096 ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2096 2097 }
2097 2098
2098 2099 int
2099 2100 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
2100 2101 {
2101 2102 comif_ring_fe_t *frp;
2102 2103
2103 2104 ASSERT(ringp->xr_frontend);
2104 2105 frp = FRONT_RING(ringp);
2105 2106 return (frp->rsp_cons !=
2106 2107 ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
2107 2108 }
2108 2109
2109 2110 /* NOTE: req_event will be increased as needed */
2110 2111 void *
2111 2112 xvdi_ring_get_request(xendev_ring_t *ringp)
2112 2113 {
2113 2114 comif_ring_fe_t *frp;
2114 2115 comif_ring_be_t *brp;
2115 2116
2116 2117 if (ringp->xr_frontend) {
2117 2118 /* for frontend ring */
2118 2119 frp = FRONT_RING(ringp);
2119 2120 if (!RING_FULL(frp))
2120 2121 return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
2121 2122 else
2122 2123 return (NULL);
2123 2124 } else {
2124 2125 /* for backend ring */
2125 2126 brp = BACK_RING(ringp);
2126 2127 /* RING_FINAL_CHECK_FOR_REQUESTS() */
2127 2128 if (xvdi_ring_has_unconsumed_requests(ringp))
2128 2129 return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
2129 2130 else {
2130 2131 ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
2131 2132 brp->req_cons + 1);
2132 2133 membar_enter();
2133 2134 if (xvdi_ring_has_unconsumed_requests(ringp))
2134 2135 return (GET_RING_ENTRY_BE(ringp,
2135 2136 brp->req_cons++));
2136 2137 else
2137 2138 return (NULL);
2138 2139 }
2139 2140 }
2140 2141 }
2141 2142
2142 2143 int
2143 2144 xvdi_ring_push_request(xendev_ring_t *ringp)
2144 2145 {
2145 2146 RING_IDX old, new, reqevt;
2146 2147 comif_ring_fe_t *frp;
2147 2148
2148 2149 /* only frontend should be able to push request */
2149 2150 ASSERT(ringp->xr_frontend);
2150 2151
2151 2152 /* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
2152 2153 frp = FRONT_RING(ringp);
2153 2154 old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
2154 2155 new = frp->req_prod_pvt;
2155 2156 ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
2156 2157 membar_enter();
2157 2158 reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
2158 2159 return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
2159 2160 }
2160 2161
2161 2162 /* NOTE: rsp_event will be increased as needed */
2162 2163 void *
2163 2164 xvdi_ring_get_response(xendev_ring_t *ringp)
2164 2165 {
2165 2166 comif_ring_fe_t *frp;
2166 2167 comif_ring_be_t *brp;
2167 2168
2168 2169 if (!ringp->xr_frontend) {
2169 2170 /* for backend ring */
2170 2171 brp = BACK_RING(ringp);
2171 2172 return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
2172 2173 } else {
2173 2174 /* for frontend ring */
2174 2175 frp = FRONT_RING(ringp);
2175 2176 /* RING_FINAL_CHECK_FOR_RESPONSES() */
2176 2177 if (xvdi_ring_has_unconsumed_responses(ringp))
2177 2178 return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
2178 2179 else {
2179 2180 ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
2180 2181 frp->rsp_cons + 1);
2181 2182 membar_enter();
2182 2183 if (xvdi_ring_has_unconsumed_responses(ringp))
2183 2184 return (GET_RING_ENTRY_FE(ringp,
2184 2185 frp->rsp_cons++));
2185 2186 else
2186 2187 return (NULL);
2187 2188 }
2188 2189 }
2189 2190 }
2190 2191
2191 2192 int
2192 2193 xvdi_ring_push_response(xendev_ring_t *ringp)
2193 2194 {
2194 2195 RING_IDX old, new, rspevt;
2195 2196 comif_ring_be_t *brp;
2196 2197
2197 2198 /* only backend should be able to push response */
2198 2199 ASSERT(!ringp->xr_frontend);
2199 2200
2200 2201 /* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
2201 2202 brp = BACK_RING(ringp);
2202 2203 old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
2203 2204 new = brp->rsp_prod_pvt;
2204 2205 ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
2205 2206 membar_enter();
2206 2207 rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
2207 2208 return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
2208 2209 }
2209 2210
2210 2211 static void
2211 2212 xvdi_ring_init_sring(xendev_ring_t *ringp)
2212 2213 {
2213 2214 ddi_acc_handle_t acchdl;
2214 2215 comif_sring_t *xsrp;
2215 2216 int i;
2216 2217
2217 2218 xsrp = (comif_sring_t *)ringp->xr_vaddr;
2218 2219 acchdl = ringp->xr_acc_hdl;
2219 2220
2220 2221 /* shared ring initialization */
2221 2222 ddi_put32(acchdl, &xsrp->req_prod, 0);
2222 2223 ddi_put32(acchdl, &xsrp->rsp_prod, 0);
2223 2224 ddi_put32(acchdl, &xsrp->req_event, 1);
2224 2225 ddi_put32(acchdl, &xsrp->rsp_event, 1);
2225 2226 for (i = 0; i < sizeof (xsrp->pad); i++)
2226 2227 ddi_put8(acchdl, xsrp->pad + i, 0);
2227 2228 }
2228 2229
2229 2230 static void
2230 2231 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2231 2232 {
2232 2233 comif_ring_fe_t *xfrp;
2233 2234
2234 2235 xfrp = &ringp->xr_sring.fr;
2235 2236 xfrp->req_prod_pvt = 0;
2236 2237 xfrp->rsp_cons = 0;
2237 2238 xfrp->nr_ents = nentry;
2238 2239 xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2239 2240
2240 2241 ringp->xr_frontend = 1;
2241 2242 ringp->xr_entry_size = entrysize;
2242 2243 }
2243 2244
2244 2245 #ifndef XPV_HVM_DRIVER
2245 2246 static void
2246 2247 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2247 2248 {
2248 2249 comif_ring_be_t *xbrp;
2249 2250
2250 2251 xbrp = &ringp->xr_sring.br;
2251 2252 xbrp->rsp_prod_pvt = 0;
2252 2253 xbrp->req_cons = 0;
2253 2254 xbrp->nr_ents = nentry;
2254 2255 xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2255 2256
2256 2257 ringp->xr_frontend = 0;
2257 2258 ringp->xr_entry_size = entrysize;
2258 2259 }
2259 2260 #endif /* XPV_HVM_DRIVER */
2260 2261
2261 2262 static void
2262 2263 xendev_offline_device(void *arg)
2263 2264 {
2264 2265 dev_info_t *dip = (dev_info_t *)arg;
2265 2266 char devname[MAXNAMELEN] = {0};
2266 2267
2267 2268 /*
2268 2269 * This is currently the only chance to delete a devinfo node, which
2269 2270 * is _not_ always successful.
2270 2271 */
2271 2272 (void) ddi_deviname(dip, devname);
2272 2273 (void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
2273 2274 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
2274 2275 }
2275 2276
2276 2277 static void
2277 2278 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
2278 2279 {
2279 2280 dev_info_t *dip = (dev_info_t *)dev->data;
2280 2281 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2281 2282 i_oestate_evt_t *evt = NULL;
2282 2283 boolean_t call_handler;
2283 2284
2284 2285 XVDI_DPRINTF(XVDI_DBG_STATE,
2285 2286 "i_xvdi_oestate_cb: %s@%s sees oestate change to %d\n",
2286 2287 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
2287 2288 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
2288 2289 oestate);
2289 2290
2290 2291 /* only call the handler if our state has changed */
2291 2292 call_handler = B_FALSE;
2292 2293 mutex_enter(&pdp->xd_ndi_lk);
2293 2294 if (dev->otherend_state != oestate) {
2294 2295 dev->otherend_state = oestate;
2295 2296 call_handler = B_TRUE;
2296 2297 }
2297 2298 mutex_exit(&pdp->xd_ndi_lk);
2298 2299
2299 2300 if (call_handler) {
2300 2301 /*
2301 2302 * Try to deliver the oestate change event to the dip
2302 2303 */
2303 2304 evt = kmem_alloc(sizeof (i_oestate_evt_t), KM_SLEEP);
2304 2305 evt->dip = dip;
2305 2306 evt->state = oestate;
2306 2307 (void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2307 2308 i_xvdi_oestate_handler, (void *)evt, DDI_SLEEP);
2308 2309 }
2309 2310 }
2310 2311
2311 2312 /*ARGSUSED*/
2312 2313 static void
2313 2314 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2314 2315 unsigned int len)
2315 2316 {
2316 2317 dev_info_t *dip = (dev_info_t *)w->dev;
2317 2318 struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2318 2319
2319 2320 #ifdef DEBUG
2320 2321 char *hp_status = NULL;
2321 2322 unsigned int hpl = 0;
2322 2323
2323 2324 (void) xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
2324 2325 (void *)&hp_status, &hpl);
2325 2326 XVDI_DPRINTF(XVDI_DBG_STATE,
2326 2327 "i_xvdi_hpstate_cb: %s@%s sees hpstate change to %s\n",
2327 2328 ddi_binding_name(dip) == NULL ? "null" : ddi_binding_name(dip),
2328 2329 ddi_get_name_addr(dip) == NULL ? "null" : ddi_get_name_addr(dip),
2329 2330 hp_status == NULL ? "null" : hp_status);
2330 2331 if (hp_status != NULL)
2331 2332 kmem_free(hp_status, hpl);
2332 2333 #endif /* DEBUG */
2333 2334
2334 2335 (void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2335 2336 i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2336 2337 }
2337 2338
2338 2339 static void
2339 2340 i_xvdi_probe_path_handler(void *arg)
2340 2341 {
2341 2342 dev_info_t *parent;
2342 2343 char *path = arg, *p = NULL;
2343 2344 int i, vdev, circ;
2344 2345 i_xd_cfg_t *xdcp;
2345 2346 boolean_t frontend;
2346 2347 domid_t dom;
2347 2348
2348 2349 for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2349 2350
2350 2351 if ((xdcp->xs_path_fe != NULL) &&
2351 2352 (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2352 2353 == 0)) {
2353 2354
2354 2355 frontend = B_TRUE;
2355 2356 p = path + strlen(xdcp->xs_path_fe);
2356 2357 break;
2357 2358 }
2358 2359
2359 2360 if ((xdcp->xs_path_be != NULL) &&
2360 2361 (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2361 2362 == 0)) {
2362 2363
2363 2364 frontend = B_FALSE;
2364 2365 p = path + strlen(xdcp->xs_path_be);
2365 2366 break;
2366 2367 }
2367 2368
2368 2369 }
2369 2370
2370 2371 if (p == NULL) {
2371 2372 cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2372 2373 "unexpected path prefix in %s", path);
2373 2374 goto done;
2374 2375 }
2375 2376
2376 2377 if (frontend) {
2377 2378 dom = DOMID_SELF;
2378 2379 if (sscanf(p, "/%d/", &vdev) != 1) {
2379 2380 XVDI_DPRINTF(XVDI_DBG_PROBE,
2380 2381 "i_xvdi_probe_path_handler: "
2381 2382 "cannot parse frontend path %s",
2382 2383 path);
2383 2384 goto done;
2384 2385 }
2385 2386 } else {
2386 2387 if (sscanf(p, "/%hu/%d/", &dom, &vdev) != 2) {
2387 2388 XVDI_DPRINTF(XVDI_DBG_PROBE,
2388 2389 "i_xvdi_probe_path_handler: "
2389 2390 "cannot parse backend path %s",
2390 2391 path);
2391 2392 goto done;
2392 2393 }
2393 2394 }
2394 2395
2395 2396 /*
2396 2397 * This is an oxymoron, so indicates a bogus configuration we
2397 2398 * must check for.
2398 2399 */
2399 2400 if (vdev == VDEV_NOXS) {
2400 2401 cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2401 2402 "invalid path %s", path);
2402 2403 goto done;
2403 2404 }
2404 2405
2405 2406 parent = xendev_dip;
2406 2407 ASSERT(parent != NULL);
2407 2408
2408 2409 ndi_devi_enter(parent, &circ);
2409 2410
2410 2411 if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2411 2412 XVDI_DPRINTF(XVDI_DBG_PROBE,
2412 2413 "i_xvdi_probe_path_handler: create for %s", path);
2413 2414 (void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2414 2415 } else {
2415 2416 XVDI_DPRINTF(XVDI_DBG_PROBE,
2416 2417 "i_xvdi_probe_path_handler: %s already exists", path);
2417 2418 }
2418 2419
2419 2420 ndi_devi_exit(parent, circ);
2420 2421
2421 2422 done:
2422 2423 kmem_free(path, strlen(path) + 1);
2423 2424 }
|
↓ open down ↓ |
2077 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX