Print this page
NEX-17521 Unable to install Nexentastor on Lenovo SR650 platform
8702 PCI addresses with physaddr > 0xffffffff can't be mapped in
8703 pci/npe DDI_CTLOPS_REGSIZE should be 64-bit aware
8704 want OFF_MAX in the kernel
8705 unsupported 64-bit prefetch memory on pci-pci bridge
Contributed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Albert Lee <trisk@forkgnu.org>
9550 Create hub symlinks for xhci devices
Contributed by: Alexander Pyhalov <apyhalov@gmail.com>
Reviewed by: Andy Stormont <astormont@racktopsystems.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-15925 pseudonex, rootnex, and friends don't need to log useless device announcements
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/i86pc/io/rootnex.c
+++ new/usr/src/uts/i86pc/io/rootnex.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 +
21 22 /*
22 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 24 */
25 +
24 26 /*
25 - * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 + * Copyright 2018 Nexenta Systems, Inc.
26 28 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 29 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
28 30 * Copyright 2017 Joyent, Inc.
29 31 */
30 32
31 33 /*
32 34 * x86 root nexus driver
33 35 */
34 36
35 37 #include <sys/sysmacros.h>
36 38 #include <sys/conf.h>
37 39 #include <sys/autoconf.h>
38 40 #include <sys/sysmacros.h>
39 41 #include <sys/debug.h>
40 42 #include <sys/psw.h>
41 43 #include <sys/ddidmareq.h>
42 44 #include <sys/promif.h>
43 45 #include <sys/devops.h>
44 46 #include <sys/kmem.h>
45 47 #include <sys/cmn_err.h>
46 48 #include <vm/seg.h>
47 49 #include <vm/seg_kmem.h>
48 50 #include <vm/seg_dev.h>
49 51 #include <sys/vmem.h>
50 52 #include <sys/mman.h>
51 53 #include <vm/hat.h>
52 54 #include <vm/as.h>
53 55 #include <vm/page.h>
54 56 #include <sys/avintr.h>
55 57 #include <sys/errno.h>
56 58 #include <sys/modctl.h>
57 59 #include <sys/ddi_impldefs.h>
58 60 #include <sys/sunddi.h>
59 61 #include <sys/sunndi.h>
60 62 #include <sys/mach_intr.h>
61 63 #include <sys/psm.h>
62 64 #include <sys/ontrap.h>
63 65 #include <sys/atomic.h>
64 66 #include <sys/sdt.h>
65 67 #include <sys/rootnex.h>
66 68 #include <vm/hat_i86.h>
67 69 #include <sys/ddifm.h>
68 70 #include <sys/ddi_isa.h>
69 71 #include <sys/apic.h>
70 72
71 73 #ifdef __xpv
72 74 #include <sys/bootinfo.h>
73 75 #include <sys/hypervisor.h>
74 76 #include <sys/bootconf.h>
75 77 #include <vm/kboot_mmu.h>
76 78 #endif
77 79
78 80 #if defined(__amd64) && !defined(__xpv)
79 81 #include <sys/immu.h>
80 82 #endif
81 83
82 84
83 85 /*
84 86 * enable/disable extra checking of function parameters. Useful for debugging
85 87 * drivers.
86 88 */
87 89 #ifdef DEBUG
88 90 int rootnex_alloc_check_parms = 1;
89 91 int rootnex_bind_check_parms = 1;
90 92 int rootnex_bind_check_inuse = 1;
91 93 int rootnex_unbind_verify_buffer = 0;
92 94 int rootnex_sync_check_parms = 1;
93 95 #else
94 96 int rootnex_alloc_check_parms = 0;
95 97 int rootnex_bind_check_parms = 0;
96 98 int rootnex_bind_check_inuse = 0;
97 99 int rootnex_unbind_verify_buffer = 0;
98 100 int rootnex_sync_check_parms = 0;
99 101 #endif
100 102
101 103 boolean_t rootnex_dmar_not_setup;
102 104
103 105 /* Master Abort and Target Abort panic flag */
104 106 int rootnex_fm_ma_ta_panic_flag = 0;
105 107
106 108 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
107 109 int rootnex_bind_fail = 1;
108 110 int rootnex_bind_warn = 1;
109 111 uint8_t *rootnex_warn_list;
110 112 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
111 113 #define ROOTNEX_BIND_WARNING (0x1 << 0)
112 114
113 115 /*
114 116 * revert back to old broken behavior of always sync'ing entire copy buffer.
115 117 * This is useful if be have a buggy driver which doesn't correctly pass in
116 118 * the offset and size into ddi_dma_sync().
117 119 */
118 120 int rootnex_sync_ignore_params = 0;
119 121
120 122 /*
121 123 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
122 124 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
123 125 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
124 126 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
125 127 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
126 128 * (< 8K). We will still need to allocate the copy buffer during bind though
127 129 * (if we need one). These can only be modified in /etc/system before rootnex
128 130 * attach.
129 131 */
130 132 #if defined(__amd64)
131 133 int rootnex_prealloc_cookies = 65;
132 134 int rootnex_prealloc_windows = 4;
133 135 int rootnex_prealloc_copybuf = 2;
134 136 #else
135 137 int rootnex_prealloc_cookies = 33;
136 138 int rootnex_prealloc_windows = 4;
137 139 int rootnex_prealloc_copybuf = 2;
138 140 #endif
139 141
140 142 /* driver global state */
141 143 static rootnex_state_t *rootnex_state;
142 144
143 145 #ifdef DEBUG
144 146 /* shortcut to rootnex counters */
145 147 static uint64_t *rootnex_cnt;
146 148 #endif
147 149
148 150 /*
149 151 * XXX - does x86 even need these or are they left over from the SPARC days?
150 152 */
151 153 /* statically defined integer/boolean properties for the root node */
152 154 static rootnex_intprop_t rootnex_intprp[] = {
153 155 { "PAGESIZE", PAGESIZE },
154 156 { "MMU_PAGESIZE", MMU_PAGESIZE },
155 157 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET },
156 158 { DDI_RELATIVE_ADDRESSING, 1 },
157 159 };
158 160 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
159 161
160 162 /*
161 163 * If we're dom0, we're using a real device so we need to load
162 164 * the cookies with MFNs instead of PFNs.
163 165 */
164 166 #ifdef __xpv
165 167 typedef maddr_t rootnex_addr_t;
166 168 #define ROOTNEX_PADDR_TO_RBASE(pa) \
167 169 (DOMAIN_IS_INITDOMAIN(xen_info) ? pa_to_ma(pa) : (pa))
168 170 #else
169 171 typedef paddr_t rootnex_addr_t;
170 172 #define ROOTNEX_PADDR_TO_RBASE(pa) (pa)
171 173 #endif
172 174
173 175 static struct cb_ops rootnex_cb_ops = {
174 176 nodev, /* open */
175 177 nodev, /* close */
176 178 nodev, /* strategy */
177 179 nodev, /* print */
178 180 nodev, /* dump */
179 181 nodev, /* read */
180 182 nodev, /* write */
181 183 nodev, /* ioctl */
182 184 nodev, /* devmap */
183 185 nodev, /* mmap */
184 186 nodev, /* segmap */
185 187 nochpoll, /* chpoll */
186 188 ddi_prop_op, /* cb_prop_op */
187 189 NULL, /* struct streamtab */
188 190 D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */
189 191 CB_REV, /* Rev */
190 192 nodev, /* cb_aread */
191 193 nodev /* cb_awrite */
192 194 };
193 195
194 196 static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
195 197 off_t offset, off_t len, caddr_t *vaddrp);
196 198 static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip,
197 199 struct hat *hat, struct seg *seg, caddr_t addr,
198 200 struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock);
199 201 static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
200 202 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
201 203 ddi_dma_handle_t *handlep);
202 204 static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
203 205 ddi_dma_handle_t handle);
204 206 static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
205 207 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
206 208 ddi_dma_cookie_t *cookiep, uint_t *ccountp);
207 209 static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
208 210 ddi_dma_handle_t handle);
209 211 static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip,
210 212 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
211 213 static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip,
212 214 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
213 215 ddi_dma_cookie_t *cookiep, uint_t *ccountp);
214 216 static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
215 217 ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
216 218 off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags);
217 219 static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip,
218 220 ddi_ctl_enum_t ctlop, void *arg, void *result);
219 221 static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
220 222 ddi_iblock_cookie_t *ibc);
221 223 static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip,
222 224 ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
223 225 static int rootnex_alloc_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *,
224 226 void *);
225 227 static int rootnex_free_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *);
226 228
227 229 static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
228 230 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
229 231 ddi_dma_handle_t *handlep);
230 232 static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
231 233 ddi_dma_handle_t handle);
232 234 static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
233 235 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
234 236 ddi_dma_cookie_t *cookiep, uint_t *ccountp);
235 237 static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
236 238 ddi_dma_handle_t handle);
237 239 #if defined(__amd64) && !defined(__xpv)
238 240 static void rootnex_coredma_reset_cookies(dev_info_t *dip,
239 241 ddi_dma_handle_t handle);
240 242 static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
241 243 ddi_dma_cookie_t **cookiepp, uint_t *ccountp);
242 244 static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
243 245 ddi_dma_cookie_t *cookiep, uint_t ccount);
244 246 static int rootnex_coredma_clear_cookies(dev_info_t *dip,
245 247 ddi_dma_handle_t handle);
246 248 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle);
247 249 #endif
248 250 static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip,
249 251 ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags);
250 252 static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip,
251 253 ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp,
252 254 ddi_dma_cookie_t *cookiep, uint_t *ccountp);
253 255
254 256 #if defined(__amd64) && !defined(__xpv)
255 257 static int rootnex_coredma_hdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
256 258 ddi_dma_handle_t handle, void *v);
257 259 static void *rootnex_coredma_hdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
258 260 ddi_dma_handle_t handle);
259 261 #endif
260 262
261 263
262 264 static struct bus_ops rootnex_bus_ops = {
263 265 BUSO_REV,
264 266 rootnex_map,
265 267 NULL,
266 268 NULL,
267 269 NULL,
268 270 rootnex_map_fault,
269 271 0,
270 272 rootnex_dma_allochdl,
271 273 rootnex_dma_freehdl,
272 274 rootnex_dma_bindhdl,
273 275 rootnex_dma_unbindhdl,
274 276 rootnex_dma_sync,
275 277 rootnex_dma_win,
276 278 rootnex_dma_mctl,
277 279 rootnex_ctlops,
278 280 ddi_bus_prop_op,
279 281 i_ddi_rootnex_get_eventcookie,
280 282 i_ddi_rootnex_add_eventcall,
281 283 i_ddi_rootnex_remove_eventcall,
282 284 i_ddi_rootnex_post_event,
283 285 0, /* bus_intr_ctl */
284 286 0, /* bus_config */
285 287 0, /* bus_unconfig */
286 288 rootnex_fm_init, /* bus_fm_init */
287 289 NULL, /* bus_fm_fini */
288 290 NULL, /* bus_fm_access_enter */
289 291 NULL, /* bus_fm_access_exit */
290 292 NULL, /* bus_powr */
291 293 rootnex_intr_ops /* bus_intr_op */
292 294 };
293 295
294 296 static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
295 297 static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
296 298 static int rootnex_quiesce(dev_info_t *dip);
297 299
298 300 static struct dev_ops rootnex_ops = {
299 301 DEVO_REV,
300 302 0,
301 303 ddi_no_info,
302 304 nulldev,
303 305 nulldev,
304 306 rootnex_attach,
305 307 rootnex_detach,
306 308 nulldev,
307 309 &rootnex_cb_ops,
308 310 &rootnex_bus_ops,
309 311 NULL,
310 312 rootnex_quiesce, /* quiesce */
311 313 };
312 314
313 315 static struct modldrv rootnex_modldrv = {
314 316 &mod_driverops,
315 317 "i86pc root nexus",
316 318 &rootnex_ops
317 319 };
318 320
319 321 static struct modlinkage rootnex_modlinkage = {
320 322 MODREV_1,
321 323 (void *)&rootnex_modldrv,
322 324 NULL
323 325 };
324 326
325 327 #if defined(__amd64) && !defined(__xpv)
326 328 static iommulib_nexops_t iommulib_nexops = {
327 329 IOMMU_NEXOPS_VERSION,
328 330 "Rootnex IOMMU ops Vers 1.1",
329 331 NULL,
330 332 rootnex_coredma_allochdl,
331 333 rootnex_coredma_freehdl,
332 334 rootnex_coredma_bindhdl,
333 335 rootnex_coredma_unbindhdl,
334 336 rootnex_coredma_reset_cookies,
335 337 rootnex_coredma_get_cookies,
336 338 rootnex_coredma_set_cookies,
337 339 rootnex_coredma_clear_cookies,
338 340 rootnex_coredma_get_sleep_flags,
339 341 rootnex_coredma_sync,
340 342 rootnex_coredma_win,
341 343 rootnex_coredma_hdl_setprivate,
342 344 rootnex_coredma_hdl_getprivate
343 345 };
344 346 #endif
345 347
346 348 /*
347 349 * extern hacks
348 350 */
349 351 extern struct seg_ops segdev_ops;
350 352 extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */
351 353 #ifdef DDI_MAP_DEBUG
352 354 extern int ddi_map_debug_flag;
353 355 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf
354 356 #endif
355 357 extern void i86_pp_map(page_t *pp, caddr_t kaddr);
356 358 extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr);
357 359 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *,
358 360 psm_intr_op_t, int *);
359 361 extern int impl_ddi_sunbus_initchild(dev_info_t *dip);
360 362 extern void impl_ddi_sunbus_removechild(dev_info_t *dip);
361 363
362 364 /*
363 365 * Use device arena to use for device control register mappings.
364 366 * Various kernel memory walkers (debugger, dtrace) need to know
365 367 * to avoid this address range to prevent undesired device activity.
366 368 */
367 369 extern void *device_arena_alloc(size_t size, int vm_flag);
368 370 extern void device_arena_free(void * vaddr, size_t size);
369 371
370 372
371 373 /*
372 374 * Internal functions
373 375 */
374 376 static int rootnex_dma_init();
375 377 static void rootnex_add_props(dev_info_t *);
376 378 static int rootnex_ctl_reportdev(dev_info_t *dip);
377 379 static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum);
378 380 static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
379 381 static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp);
380 382 static int rootnex_map_handle(ddi_map_req_t *mp);
381 383 static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp);
382 384 static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize);
383 385 static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq,
384 386 ddi_dma_attr_t *attr);
385 387 static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
386 388 rootnex_sglinfo_t *sglinfo);
387 389 static void rootnex_dvma_get_sgl(ddi_dma_obj_t *dmar_object,
388 390 ddi_dma_cookie_t *sgl, rootnex_sglinfo_t *sglinfo);
389 391 static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
390 392 rootnex_dma_t *dma, ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag);
391 393 static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
392 394 rootnex_dma_t *dma, ddi_dma_attr_t *attr);
393 395 static void rootnex_teardown_copybuf(rootnex_dma_t *dma);
394 396 static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
395 397 ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag);
396 398 static void rootnex_teardown_windows(rootnex_dma_t *dma);
397 399 static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
398 400 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset);
399 401 static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object,
400 402 rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset,
401 403 size_t *copybuf_used, page_t **cur_pp);
402 404 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp,
403 405 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie,
404 406 ddi_dma_attr_t *attr, off_t cur_offset);
405 407 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp,
406 408 rootnex_dma_t *dma, rootnex_window_t **windowp,
407 409 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used);
408 410 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp,
409 411 rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie);
410 412 static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
411 413 off_t offset, size_t size, uint_t cache_flags);
412 414 static int rootnex_verify_buffer(rootnex_dma_t *dma);
413 415 static int rootnex_dma_check(dev_info_t *dip, const void *handle,
414 416 const void *comp_addr, const void *not_used);
415 417 static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object,
416 418 rootnex_sglinfo_t *sglinfo);
417 419 static struct as *rootnex_get_as(ddi_dma_obj_t *dmar_object);
418 420
419 421 /*
420 422 * _init()
421 423 *
422 424 */
423 425 int
424 426 _init(void)
425 427 {
426 428
427 429 rootnex_state = NULL;
428 430 return (mod_install(&rootnex_modlinkage));
429 431 }
430 432
431 433
432 434 /*
433 435 * _info()
434 436 *
435 437 */
436 438 int
437 439 _info(struct modinfo *modinfop)
438 440 {
439 441 return (mod_info(&rootnex_modlinkage, modinfop));
440 442 }
441 443
442 444
443 445 /*
444 446 * _fini()
445 447 *
446 448 */
447 449 int
448 450 _fini(void)
449 451 {
450 452 return (EBUSY);
451 453 }
452 454
453 455
454 456 /*
455 457 * rootnex_attach()
456 458 *
457 459 */
458 460 static int
459 461 rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
460 462 {
461 463 int fmcap;
462 464 int e;
463 465
464 466 switch (cmd) {
465 467 case DDI_ATTACH:
466 468 break;
467 469 case DDI_RESUME:
468 470 #if defined(__amd64) && !defined(__xpv)
469 471 return (immu_unquiesce());
470 472 #else
471 473 return (DDI_SUCCESS);
472 474 #endif
473 475 default:
474 476 return (DDI_FAILURE);
475 477 }
476 478
477 479 /*
478 480 * We should only have one instance of rootnex. Save it away since we
479 481 * don't have an easy way to get it back later.
480 482 */
481 483 ASSERT(rootnex_state == NULL);
482 484 rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP);
483 485
484 486 rootnex_state->r_dip = dip;
485 487 rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15);
486 488 rootnex_state->r_reserved_msg_printed = B_FALSE;
487 489 #ifdef DEBUG
488 490 rootnex_cnt = &rootnex_state->r_counters[0];
489 491 #endif
490 492
491 493 /*
492 494 * Set minimum fm capability level for i86pc platforms and then
493 495 * initialize error handling. Since we're the rootnex, we don't
494 496 * care what's returned in the fmcap field.
495 497 */
496 498 ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
497 499 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
498 500 fmcap = ddi_system_fmcap;
499 501 ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc);
500 502
501 503 /* initialize DMA related state */
502 504 e = rootnex_dma_init();
503 505 if (e != DDI_SUCCESS) {
504 506 kmem_free(rootnex_state, sizeof (rootnex_state_t));
505 507 return (DDI_FAILURE);
506 508 }
507 509
508 510 /* Add static root node properties */
509 511 rootnex_add_props(dip);
510 512
511 513 /* since we can't call ddi_report_dev() */
512 514 cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip));
513 515
514 516 /* Initialize rootnex event handle */
515 517 i_ddi_rootnex_init_events(dip);
516 518
517 519 #if defined(__amd64) && !defined(__xpv)
518 520 e = iommulib_nexus_register(dip, &iommulib_nexops,
519 521 &rootnex_state->r_iommulib_handle);
520 522
521 523 ASSERT(e == DDI_SUCCESS);
522 524 #endif
523 525
524 526 return (DDI_SUCCESS);
525 527 }
526 528
527 529
528 530 /*
529 531 * rootnex_detach()
530 532 *
531 533 */
532 534 /*ARGSUSED*/
533 535 static int
534 536 rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
535 537 {
536 538 switch (cmd) {
537 539 case DDI_SUSPEND:
538 540 #if defined(__amd64) && !defined(__xpv)
539 541 return (immu_quiesce());
540 542 #else
541 543 return (DDI_SUCCESS);
542 544 #endif
543 545 default:
544 546 return (DDI_FAILURE);
545 547 }
546 548 /*NOTREACHED*/
547 549
548 550 }
549 551
550 552
551 553 /*
552 554 * rootnex_dma_init()
553 555 *
554 556 */
555 557 /*ARGSUSED*/
556 558 static int
557 559 rootnex_dma_init()
558 560 {
559 561 size_t bufsize;
560 562
561 563
562 564 /*
563 565 * size of our cookie/window/copybuf state needed in dma bind that we
564 566 * pre-alloc in dma_alloc_handle
565 567 */
566 568 rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies;
567 569 rootnex_state->r_prealloc_size =
568 570 (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) +
569 571 (rootnex_prealloc_windows * sizeof (rootnex_window_t)) +
570 572 (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t));
571 573
572 574 /*
573 575 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
574 576 * allocate 16 extra bytes for struct pointer alignment
575 577 * (p->dmai_private & dma->dp_prealloc_buffer)
576 578 */
577 579 bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) +
578 580 rootnex_state->r_prealloc_size + 0x10;
579 581 rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl",
580 582 bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0);
581 583 if (rootnex_state->r_dmahdl_cache == NULL) {
582 584 return (DDI_FAILURE);
583 585 }
584 586
585 587 /*
586 588 * allocate array to track which major numbers we have printed warnings
587 589 * for.
588 590 */
589 591 rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list),
590 592 KM_SLEEP);
591 593
592 594 return (DDI_SUCCESS);
593 595 }
594 596
595 597
596 598 /*
597 599 * rootnex_add_props()
598 600 *
599 601 */
600 602 static void
601 603 rootnex_add_props(dev_info_t *dip)
602 604 {
603 605 rootnex_intprop_t *rpp;
604 606 int i;
605 607
606 608 /* Add static integer/boolean properties to the root node */
607 609 rpp = rootnex_intprp;
608 610 for (i = 0; i < NROOT_INTPROPS; i++) {
609 611 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip,
610 612 rpp[i].prop_name, rpp[i].prop_value);
611 613 }
612 614 }
613 615
614 616
615 617
616 618 /*
617 619 * *************************
618 620 * ctlops related routines
619 621 * *************************
620 622 */
621 623
622 624 /*
623 625 * rootnex_ctlops()
624 626 *
625 627 */
626 628 /*ARGSUSED*/
627 629 static int
628 630 rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
629 631 void *arg, void *result)
630 632 {
631 633 int n, *ptr;
632 634 struct ddi_parent_private_data *pdp;
633 635
634 636 switch (ctlop) {
635 637 case DDI_CTLOPS_DMAPMAPC:
636 638 /*
637 639 * Return 'partial' to indicate that dma mapping
638 640 * has to be done in the main MMU.
639 641 */
640 642 return (DDI_DMA_PARTIAL);
641 643
642 644 case DDI_CTLOPS_BTOP:
643 645 /*
644 646 * Convert byte count input to physical page units.
645 647 * (byte counts that are not a page-size multiple
646 648 * are rounded down)
647 649 */
648 650 *(ulong_t *)result = btop(*(ulong_t *)arg);
649 651 return (DDI_SUCCESS);
650 652
651 653 case DDI_CTLOPS_PTOB:
652 654 /*
653 655 * Convert size in physical pages to bytes
654 656 */
655 657 *(ulong_t *)result = ptob(*(ulong_t *)arg);
656 658 return (DDI_SUCCESS);
657 659
658 660 case DDI_CTLOPS_BTOPR:
659 661 /*
660 662 * Convert byte count input to physical page units
661 663 * (byte counts that are not a page-size multiple
662 664 * are rounded up)
663 665 */
664 666 *(ulong_t *)result = btopr(*(ulong_t *)arg);
665 667 return (DDI_SUCCESS);
666 668
667 669 case DDI_CTLOPS_INITCHILD:
668 670 return (impl_ddi_sunbus_initchild(arg));
669 671
670 672 case DDI_CTLOPS_UNINITCHILD:
671 673 impl_ddi_sunbus_removechild(arg);
672 674 return (DDI_SUCCESS);
673 675
674 676 case DDI_CTLOPS_REPORTDEV:
675 677 return (rootnex_ctl_reportdev(rdip));
676 678
677 679 case DDI_CTLOPS_IOMIN:
678 680 /*
679 681 * Nothing to do here but reflect back..
680 682 */
681 683 return (DDI_SUCCESS);
682 684
683 685 case DDI_CTLOPS_REGSIZE:
684 686 case DDI_CTLOPS_NREGS:
685 687 break;
686 688
687 689 case DDI_CTLOPS_SIDDEV:
688 690 if (ndi_dev_is_prom_node(rdip))
689 691 return (DDI_SUCCESS);
690 692 if (ndi_dev_is_persistent_node(rdip))
691 693 return (DDI_SUCCESS);
692 694 return (DDI_FAILURE);
693 695
694 696 case DDI_CTLOPS_POWER:
695 697 return ((*pm_platform_power)((power_req_t *)arg));
696 698
697 699 case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */
698 700 case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
699 701 case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
700 702 case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
701 703 case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
702 704 case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
703 705 if (!rootnex_state->r_reserved_msg_printed) {
704 706 rootnex_state->r_reserved_msg_printed = B_TRUE;
705 707 cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for "
706 708 "1 or more reserved/obsolete operations.");
707 709 }
708 710 return (DDI_FAILURE);
709 711
710 712 default:
711 713 return (DDI_FAILURE);
712 714 }
713 715 /*
714 716 * The rest are for "hardware" properties
715 717 */
716 718 if ((pdp = ddi_get_parent_data(rdip)) == NULL)
717 719 return (DDI_FAILURE);
718 720
719 721 if (ctlop == DDI_CTLOPS_NREGS) {
720 722 ptr = (int *)result;
721 723 *ptr = pdp->par_nreg;
722 724 } else {
723 725 off_t *size = (off_t *)result;
724 726
|
↓ open down ↓ |
689 lines elided |
↑ open up ↑ |
725 727 ptr = (int *)arg;
726 728 n = *ptr;
727 729 if (n >= pdp->par_nreg) {
728 730 return (DDI_FAILURE);
729 731 }
730 732 *size = (off_t)pdp->par_reg[n].regspec_size;
731 733 }
732 734 return (DDI_SUCCESS);
733 735 }
734 736
735 -
736 -/*
737 - * rootnex_ctl_reportdev()
738 - *
739 - */
737 +/*ARGSUSED*/
740 738 static int
741 739 rootnex_ctl_reportdev(dev_info_t *dev)
742 740 {
743 - int i, n, len, f_len = 0;
744 - char *buf;
745 -
746 - buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP);
747 - f_len += snprintf(buf, REPORTDEV_BUFSIZE,
748 - "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev));
749 - len = strlen(buf);
750 -
751 - for (i = 0; i < sparc_pd_getnreg(dev); i++) {
752 -
753 - struct regspec *rp = sparc_pd_getreg(dev, i);
754 -
755 - if (i == 0)
756 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
757 - ": ");
758 - else
759 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
760 - " and ");
761 - len = strlen(buf);
762 -
763 - switch (rp->regspec_bustype) {
764 -
765 - case BTEISA:
766 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
767 - "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr);
768 - break;
769 -
770 - case BTISA:
771 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
772 - "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr);
773 - break;
774 -
775 - default:
776 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
777 - "space %x offset %x",
778 - rp->regspec_bustype, rp->regspec_addr);
779 - break;
780 - }
781 - len = strlen(buf);
782 - }
783 - for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) {
784 - int pri;
785 -
786 - if (i != 0) {
787 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
788 - ",");
789 - len = strlen(buf);
790 - }
791 - pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri);
792 - f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len,
793 - " sparc ipl %d", pri);
794 - len = strlen(buf);
795 - }
796 -#ifdef DEBUG
797 - if (f_len + 1 >= REPORTDEV_BUFSIZE) {
798 - cmn_err(CE_NOTE, "next message is truncated: "
799 - "printed length 1024, real length %d", f_len);
800 - }
801 -#endif /* DEBUG */
802 - cmn_err(CE_CONT, "?%s\n", buf);
803 - kmem_free(buf, REPORTDEV_BUFSIZE);
804 741 return (DDI_SUCCESS);
805 742 }
806 743
807 744
808 745 /*
809 746 * ******************
810 747 * map related code
811 748 * ******************
812 749 */
813 750
814 751 /*
815 752 * rootnex_map()
816 753 *
817 754 */
818 755 static int
819 756 rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset,
820 757 off_t len, caddr_t *vaddrp)
821 758 {
822 759 struct regspec *orp = NULL;
823 760 struct regspec64 rp = { 0 };
824 761 ddi_map_req_t mr = *mp; /* Get private copy of request */
825 762
826 763 mp = &mr;
827 764
828 765 switch (mp->map_op) {
829 766 case DDI_MO_MAP_LOCKED:
830 767 case DDI_MO_UNMAP:
831 768 case DDI_MO_MAP_HANDLE:
832 769 break;
833 770 default:
834 771 #ifdef DDI_MAP_DEBUG
835 772 cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.",
836 773 mp->map_op);
837 774 #endif /* DDI_MAP_DEBUG */
838 775 return (DDI_ME_UNIMPLEMENTED);
839 776 }
840 777
841 778 if (mp->map_flags & DDI_MF_USER_MAPPING) {
842 779 #ifdef DDI_MAP_DEBUG
843 780 cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user.");
844 781 #endif /* DDI_MAP_DEBUG */
845 782 return (DDI_ME_UNIMPLEMENTED);
846 783 }
847 784
848 785 /*
849 786 * First, we need to get the original regspec out before we convert it
850 787 * to the extended format. If we have a register number, then we need to
851 788 * convert that to a regspec.
852 789 */
853 790 if (mp->map_type == DDI_MT_RNUMBER) {
854 791
855 792 int rnumber = mp->map_obj.rnumber;
856 793 #ifdef DDI_MAP_DEBUG
857 794 static char *out_of_range =
858 795 "rootnex_map: Out of range rnumber <%d>, device <%s>";
859 796 #endif /* DDI_MAP_DEBUG */
860 797
861 798 orp = i_ddi_rnumber_to_regspec(rdip, rnumber);
862 799 if (orp == NULL) {
863 800 #ifdef DDI_MAP_DEBUG
864 801 cmn_err(CE_WARN, out_of_range, rnumber,
865 802 ddi_get_name(rdip));
866 803 #endif /* DDI_MAP_DEBUG */
867 804 return (DDI_ME_RNUMBER_RANGE);
868 805 }
869 806 } else if (!(mp->map_flags & DDI_MF_EXT_REGSPEC)) {
870 807 orp = mp->map_obj.rp;
871 808 }
872 809
873 810 /*
874 811 * Ensure that we are always using a 64-bit extended regspec regardless
875 812 * of what was passed into us. If the child driver is using a 64-bit
876 813 * regspec, then we need to make sure that we copy this to the local
877 814 * regspec64, rp.
878 815 */
879 816 if (orp != NULL) {
880 817 rp.regspec_bustype = orp->regspec_bustype;
881 818 rp.regspec_addr = orp->regspec_addr;
882 819 rp.regspec_size = orp->regspec_size;
883 820 } else {
884 821 struct regspec64 *rp64;
885 822 rp64 = (struct regspec64 *)mp->map_obj.rp;
886 823 rp = *rp64;
887 824 }
888 825
889 826 mp->map_type = DDI_MT_REGSPEC;
890 827 mp->map_flags |= DDI_MF_EXT_REGSPEC;
891 828 mp->map_obj.rp = (struct regspec *)&rp;
892 829
893 830 /*
894 831 * Adjust offset and length correspnding to called values...
895 832 * XXX: A non-zero length means override the one in the regspec
896 833 * XXX: (regardless of what's in the parent's range?)
897 834 */
898 835
899 836 #ifdef DDI_MAP_DEBUG
900 837 cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
901 838 "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip),
902 839 rp.regspec_bustype, rp.regspec_addr, rp.regspec_size, offset,
903 840 len, mp->map_handlep);
904 841 #endif /* DDI_MAP_DEBUG */
905 842
906 843 /*
907 844 * I/O or memory mapping:
908 845 *
909 846 * <bustype=0, addr=x, len=x>: memory
910 847 * <bustype=1, addr=x, len=x>: i/o
911 848 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
912 849 */
913 850
914 851 if (rp.regspec_bustype > 1 && rp.regspec_addr != 0) {
915 852 cmn_err(CE_WARN, "<%s,%s> invalid register spec"
916 853 " <0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ">",
917 854 ddi_get_name(dip), ddi_get_name(rdip), rp.regspec_bustype,
918 855 rp.regspec_addr, rp.regspec_size);
919 856 return (DDI_ME_INVAL);
920 857 }
921 858
922 859 if (rp.regspec_bustype > 1 && rp.regspec_addr == 0) {
923 860 /*
924 861 * compatibility i/o mapping
925 862 */
926 863 rp.regspec_bustype += offset;
927 864 } else {
928 865 /*
929 866 * Normal memory or i/o mapping
930 867 */
931 868 rp.regspec_addr += offset;
932 869 }
933 870
934 871 if (len != 0)
935 872 rp.regspec_size = len;
936 873
937 874 #ifdef DDI_MAP_DEBUG
938 875 cmn_err(CE_CONT, " <%s,%s> <0x%" PRIx64 ", 0x%" PRIx64
939 876 ", 0x%" PRId64 "> offset %d len %d handle 0x%x\n",
940 877 ddi_get_name(dip), ddi_get_name(rdip), rp.regspec_bustype,
941 878 rp.regspec_addr, rp.regspec_size, offset, len, mp->map_handlep);
942 879 #endif /* DDI_MAP_DEBUG */
943 880
944 881
945 882 /*
946 883 * The x86 root nexus does not have any notion of valid ranges of
947 884 * addresses. Its children have valid ranges, but because there are none
948 885 * for the nexus, we don't need to call i_ddi_apply_range(). Verify
949 886 * that is the case.
950 887 */
951 888 ASSERT0(sparc_pd_getnrng(dip));
952 889
953 890 switch (mp->map_op) {
954 891 case DDI_MO_MAP_LOCKED:
955 892
956 893 /*
957 894 * Set up the locked down kernel mapping to the regspec...
958 895 */
959 896
960 897 return (rootnex_map_regspec(mp, vaddrp));
961 898
962 899 case DDI_MO_UNMAP:
963 900
964 901 /*
965 902 * Release mapping...
966 903 */
967 904
968 905 return (rootnex_unmap_regspec(mp, vaddrp));
969 906
970 907 case DDI_MO_MAP_HANDLE:
971 908
972 909 return (rootnex_map_handle(mp));
973 910
974 911 default:
975 912 return (DDI_ME_UNIMPLEMENTED);
976 913 }
977 914 }
978 915
979 916
980 917 /*
981 918 * rootnex_map_fault()
982 919 *
983 920 * fault in mappings for requestors
984 921 */
985 922 /*ARGSUSED*/
986 923 static int
987 924 rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat,
988 925 struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot,
989 926 uint_t lock)
990 927 {
991 928
992 929 #ifdef DDI_MAP_DEBUG
993 930 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn);
994 931 ddi_map_debug(" Seg <%s>\n",
995 932 seg->s_ops == &segdev_ops ? "segdev" :
996 933 seg == &kvseg ? "segkmem" : "NONE!");
997 934 #endif /* DDI_MAP_DEBUG */
998 935
999 936 /*
1000 937 * This is all terribly broken, but it is a start
1001 938 *
1002 939 * XXX Note that this test means that segdev_ops
1003 940 * must be exported from seg_dev.c.
1004 941 * XXX What about devices with their own segment drivers?
1005 942 */
1006 943 if (seg->s_ops == &segdev_ops) {
1007 944 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1008 945
1009 946 if (hat == NULL) {
1010 947 /*
1011 948 * This is one plausible interpretation of
1012 949 * a null hat i.e. use the first hat on the
1013 950 * address space hat list which by convention is
1014 951 * the hat of the system MMU. At alternative
1015 952 * would be to panic .. this might well be better ..
1016 953 */
1017 954 ASSERT(AS_READ_HELD(seg->s_as));
1018 955 hat = seg->s_as->a_hat;
1019 956 cmn_err(CE_NOTE, "rootnex_map_fault: nil hat");
1020 957 }
1021 958 hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr,
1022 959 (lock ? HAT_LOAD_LOCK : HAT_LOAD));
1023 960 } else if (seg == &kvseg && dp == NULL) {
1024 961 hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot,
1025 962 HAT_LOAD_LOCK);
1026 963 } else
1027 964 return (DDI_FAILURE);
1028 965 return (DDI_SUCCESS);
1029 966 }
1030 967
1031 968
1032 969 static int
1033 970 rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1034 971 {
1035 972 rootnex_addr_t rbase;
1036 973 void *cvaddr;
1037 974 uint64_t npages, pgoffset;
1038 975 struct regspec64 *rp;
1039 976 ddi_acc_hdl_t *hp;
1040 977 ddi_acc_impl_t *ap;
1041 978 uint_t hat_acc_flags;
1042 979 paddr_t pbase;
1043 980
1044 981 ASSERT(mp->map_flags & DDI_MF_EXT_REGSPEC);
1045 982 rp = (struct regspec64 *)mp->map_obj.rp;
1046 983 hp = mp->map_handlep;
1047 984
1048 985 #ifdef DDI_MAP_DEBUG
1049 986 ddi_map_debug(
1050 987 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1051 988 rp->regspec_bustype, rp->regspec_addr,
1052 989 rp->regspec_size, mp->map_handlep);
1053 990 #endif /* DDI_MAP_DEBUG */
1054 991
1055 992 /*
1056 993 * I/O or memory mapping
1057 994 *
1058 995 * <bustype=0, addr=x, len=x>: memory
1059 996 * <bustype=1, addr=x, len=x>: i/o
1060 997 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1061 998 */
1062 999
1063 1000 if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) {
1064 1001 cmn_err(CE_WARN, "rootnex: invalid register spec"
1065 1002 " <0x%" PRIx64 ", 0x%" PRIx64", 0x%" PRIx64">",
1066 1003 rp->regspec_bustype, rp->regspec_addr, rp->regspec_size);
1067 1004 return (DDI_FAILURE);
1068 1005 }
1069 1006
1070 1007 if (rp->regspec_bustype != 0) {
1071 1008 /*
1072 1009 * I/O space - needs a handle.
1073 1010 */
1074 1011 if (hp == NULL) {
1075 1012 return (DDI_FAILURE);
1076 1013 }
1077 1014 ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1078 1015 ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE;
1079 1016 impl_acc_hdl_init(hp);
1080 1017
1081 1018 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1082 1019 #ifdef DDI_MAP_DEBUG
1083 1020 ddi_map_debug("rootnex_map_regspec: mmap() "
1084 1021 "to I/O space is not supported.\n");
1085 1022 #endif /* DDI_MAP_DEBUG */
1086 1023 return (DDI_ME_INVAL);
1087 1024 } else {
1088 1025 /*
1089 1026 * 1275-compliant vs. compatibility i/o mapping
1090 1027 */
1091 1028 *vaddrp =
1092 1029 (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ?
1093 1030 ((caddr_t)(uintptr_t)rp->regspec_bustype) :
1094 1031 ((caddr_t)(uintptr_t)rp->regspec_addr);
1095 1032 #ifdef __xpv
1096 1033 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1097 1034 hp->ah_pfn = xen_assign_pfn(
1098 1035 mmu_btop((ulong_t)rp->regspec_addr &
1099 1036 MMU_PAGEMASK));
1100 1037 } else {
1101 1038 hp->ah_pfn = mmu_btop(
1102 1039 (ulong_t)rp->regspec_addr & MMU_PAGEMASK);
1103 1040 }
1104 1041 #else
1105 1042 hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
1106 1043 MMU_PAGEMASK);
1107 1044 #endif
1108 1045 hp->ah_pnum = mmu_btopr(rp->regspec_size +
1109 1046 (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET);
1110 1047 }
1111 1048
1112 1049 #ifdef DDI_MAP_DEBUG
1113 1050 ddi_map_debug(
1114 1051 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1115 1052 rp->regspec_size, *vaddrp);
1116 1053 #endif /* DDI_MAP_DEBUG */
1117 1054 return (DDI_SUCCESS);
1118 1055 }
1119 1056
1120 1057 /*
1121 1058 * Memory space
1122 1059 */
1123 1060
1124 1061 if (hp != NULL) {
1125 1062 /*
1126 1063 * hat layer ignores
1127 1064 * hp->ah_acc.devacc_attr_endian_flags.
1128 1065 */
1129 1066 switch (hp->ah_acc.devacc_attr_dataorder) {
1130 1067 case DDI_STRICTORDER_ACC:
1131 1068 hat_acc_flags = HAT_STRICTORDER;
1132 1069 break;
1133 1070 case DDI_UNORDERED_OK_ACC:
1134 1071 hat_acc_flags = HAT_UNORDERED_OK;
1135 1072 break;
1136 1073 case DDI_MERGING_OK_ACC:
1137 1074 hat_acc_flags = HAT_MERGING_OK;
1138 1075 break;
1139 1076 case DDI_LOADCACHING_OK_ACC:
1140 1077 hat_acc_flags = HAT_LOADCACHING_OK;
1141 1078 break;
1142 1079 case DDI_STORECACHING_OK_ACC:
1143 1080 hat_acc_flags = HAT_STORECACHING_OK;
1144 1081 break;
1145 1082 }
1146 1083 ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1147 1084 ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
1148 1085 impl_acc_hdl_init(hp);
1149 1086 hp->ah_hat_flags = hat_acc_flags;
1150 1087 } else {
1151 1088 hat_acc_flags = HAT_STRICTORDER;
1152 1089 }
1153 1090
1154 1091 rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK);
1155 1092 #ifdef __xpv
1156 1093 /*
1157 1094 * If we're dom0, we're using a real device so we need to translate
1158 1095 * the MA to a PA.
1159 1096 */
1160 1097 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1161 1098 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
1162 1099 } else {
1163 1100 pbase = rbase;
1164 1101 }
1165 1102 #else
1166 1103 pbase = rbase;
1167 1104 #endif
1168 1105 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1169 1106
1170 1107 if (rp->regspec_size == 0) {
1171 1108 #ifdef DDI_MAP_DEBUG
1172 1109 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1173 1110 #endif /* DDI_MAP_DEBUG */
1174 1111 return (DDI_ME_INVAL);
1175 1112 }
1176 1113
1177 1114 if (mp->map_flags & DDI_MF_DEVICE_MAPPING) {
1178 1115 /* extra cast to make gcc happy */
1179 1116 *vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
1180 1117 } else {
1181 1118 npages = mmu_btopr(rp->regspec_size + pgoffset);
1182 1119
1183 1120 #ifdef DDI_MAP_DEBUG
1184 1121 ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1185 1122 "physical %llx", npages, pbase);
1186 1123 #endif /* DDI_MAP_DEBUG */
1187 1124
1188 1125 cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
1189 1126 if (cvaddr == NULL)
1190 1127 return (DDI_ME_NORESOURCES);
1191 1128
1192 1129 /*
1193 1130 * Now map in the pages we've allocated...
1194 1131 */
1195 1132 hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages),
1196 1133 mmu_btop(pbase), mp->map_prot | hat_acc_flags,
1197 1134 HAT_LOAD_LOCK);
1198 1135 *vaddrp = (caddr_t)cvaddr + pgoffset;
1199 1136
1200 1137 /* save away pfn and npages for FMA */
1201 1138 hp = mp->map_handlep;
1202 1139 if (hp) {
1203 1140 hp->ah_pfn = mmu_btop(pbase);
1204 1141 hp->ah_pnum = npages;
1205 1142 }
1206 1143 }
1207 1144
1208 1145 #ifdef DDI_MAP_DEBUG
1209 1146 ddi_map_debug("at virtual 0x%x\n", *vaddrp);
1210 1147 #endif /* DDI_MAP_DEBUG */
1211 1148 return (DDI_SUCCESS);
1212 1149 }
1213 1150
1214 1151
1215 1152 static int
1216 1153 rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp)
1217 1154 {
1218 1155 caddr_t addr = (caddr_t)*vaddrp;
1219 1156 uint64_t npages, pgoffset;
1220 1157 struct regspec64 *rp;
1221 1158
1222 1159 if (mp->map_flags & DDI_MF_DEVICE_MAPPING)
1223 1160 return (0);
1224 1161
1225 1162 ASSERT(mp->map_flags & DDI_MF_EXT_REGSPEC);
1226 1163 rp = (struct regspec64 *)mp->map_obj.rp;
1227 1164
1228 1165 if (rp->regspec_size == 0) {
1229 1166 #ifdef DDI_MAP_DEBUG
1230 1167 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1231 1168 #endif /* DDI_MAP_DEBUG */
1232 1169 return (DDI_ME_INVAL);
1233 1170 }
1234 1171
1235 1172 /*
1236 1173 * I/O or memory mapping:
1237 1174 *
1238 1175 * <bustype=0, addr=x, len=x>: memory
1239 1176 * <bustype=1, addr=x, len=x>: i/o
1240 1177 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1241 1178 */
1242 1179 if (rp->regspec_bustype != 0) {
1243 1180 /*
1244 1181 * This is I/O space, which requires no particular
1245 1182 * processing on unmap since it isn't mapped in the
1246 1183 * first place.
1247 1184 */
1248 1185 return (DDI_SUCCESS);
1249 1186 }
1250 1187
1251 1188 /*
1252 1189 * Memory space
1253 1190 */
1254 1191 pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
1255 1192 npages = mmu_btopr(rp->regspec_size + pgoffset);
1256 1193 hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK);
1257 1194 device_arena_free(addr - pgoffset, ptob(npages));
1258 1195
1259 1196 /*
1260 1197 * Destroy the pointer - the mapping has logically gone
1261 1198 */
1262 1199 *vaddrp = NULL;
1263 1200
1264 1201 return (DDI_SUCCESS);
1265 1202 }
1266 1203
1267 1204 static int
1268 1205 rootnex_map_handle(ddi_map_req_t *mp)
1269 1206 {
1270 1207 rootnex_addr_t rbase;
1271 1208 ddi_acc_hdl_t *hp;
1272 1209 uint64_t pgoffset;
1273 1210 struct regspec64 *rp;
1274 1211 paddr_t pbase;
1275 1212
1276 1213 rp = (struct regspec64 *)mp->map_obj.rp;
1277 1214
1278 1215 #ifdef DDI_MAP_DEBUG
1279 1216 ddi_map_debug(
1280 1217 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1281 1218 rp->regspec_bustype, rp->regspec_addr,
1282 1219 rp->regspec_size, mp->map_handlep);
1283 1220 #endif /* DDI_MAP_DEBUG */
1284 1221
1285 1222 /*
1286 1223 * I/O or memory mapping:
1287 1224 *
1288 1225 * <bustype=0, addr=x, len=x>: memory
1289 1226 * <bustype=1, addr=x, len=x>: i/o
1290 1227 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1291 1228 */
1292 1229 if (rp->regspec_bustype != 0) {
1293 1230 /*
1294 1231 * This refers to I/O space, and we don't support "mapping"
1295 1232 * I/O space to a user.
1296 1233 */
1297 1234 return (DDI_FAILURE);
1298 1235 }
1299 1236
1300 1237 /*
1301 1238 * Set up the hat_flags for the mapping.
1302 1239 */
1303 1240 hp = mp->map_handlep;
1304 1241
1305 1242 switch (hp->ah_acc.devacc_attr_endian_flags) {
1306 1243 case DDI_NEVERSWAP_ACC:
1307 1244 hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER;
1308 1245 break;
1309 1246 case DDI_STRUCTURE_LE_ACC:
1310 1247 hp->ah_hat_flags = HAT_STRUCTURE_LE;
1311 1248 break;
1312 1249 case DDI_STRUCTURE_BE_ACC:
1313 1250 return (DDI_FAILURE);
1314 1251 default:
1315 1252 return (DDI_REGS_ACC_CONFLICT);
1316 1253 }
1317 1254
1318 1255 switch (hp->ah_acc.devacc_attr_dataorder) {
1319 1256 case DDI_STRICTORDER_ACC:
1320 1257 break;
1321 1258 case DDI_UNORDERED_OK_ACC:
1322 1259 hp->ah_hat_flags |= HAT_UNORDERED_OK;
1323 1260 break;
1324 1261 case DDI_MERGING_OK_ACC:
1325 1262 hp->ah_hat_flags |= HAT_MERGING_OK;
1326 1263 break;
1327 1264 case DDI_LOADCACHING_OK_ACC:
1328 1265 hp->ah_hat_flags |= HAT_LOADCACHING_OK;
1329 1266 break;
1330 1267 case DDI_STORECACHING_OK_ACC:
1331 1268 hp->ah_hat_flags |= HAT_STORECACHING_OK;
1332 1269 break;
1333 1270 default:
1334 1271 return (DDI_FAILURE);
1335 1272 }
1336 1273
1337 1274 rbase = (rootnex_addr_t)rp->regspec_addr &
1338 1275 (~(rootnex_addr_t)MMU_PAGEOFFSET);
1339 1276 pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET;
1340 1277
1341 1278 if (rp->regspec_size == 0)
1342 1279 return (DDI_ME_INVAL);
1343 1280
1344 1281 #ifdef __xpv
1345 1282 /*
1346 1283 * If we're dom0, we're using a real device so we need to translate
1347 1284 * the MA to a PA.
1348 1285 */
1349 1286 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1350 1287 pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
1351 1288 (rbase & MMU_PAGEOFFSET);
1352 1289 } else {
1353 1290 pbase = rbase;
1354 1291 }
1355 1292 #else
1356 1293 pbase = rbase;
1357 1294 #endif
1358 1295
1359 1296 hp->ah_pfn = mmu_btop(pbase);
1360 1297 hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset);
1361 1298
1362 1299 return (DDI_SUCCESS);
1363 1300 }
1364 1301
1365 1302
1366 1303
1367 1304 /*
1368 1305 * ************************
1369 1306 * interrupt related code
1370 1307 * ************************
1371 1308 */
1372 1309
1373 1310 /*
1374 1311 * rootnex_intr_ops()
1375 1312 * bus_intr_op() function for interrupt support
1376 1313 */
1377 1314 /* ARGSUSED */
1378 1315 static int
1379 1316 rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1380 1317 ddi_intr_handle_impl_t *hdlp, void *result)
1381 1318 {
1382 1319 struct intrspec *ispec;
1383 1320
1384 1321 DDI_INTR_NEXDBG((CE_CONT,
1385 1322 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1386 1323 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));
1387 1324
1388 1325 /* Process the interrupt operation */
1389 1326 switch (intr_op) {
1390 1327 case DDI_INTROP_GETCAP:
1391 1328 /* First check with pcplusmp */
1392 1329 if (psm_intr_ops == NULL)
1393 1330 return (DDI_FAILURE);
1394 1331
1395 1332 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) {
1396 1333 *(int *)result = 0;
1397 1334 return (DDI_FAILURE);
1398 1335 }
1399 1336 break;
1400 1337 case DDI_INTROP_SETCAP:
1401 1338 if (psm_intr_ops == NULL)
1402 1339 return (DDI_FAILURE);
1403 1340
1404 1341 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result))
1405 1342 return (DDI_FAILURE);
1406 1343 break;
1407 1344 case DDI_INTROP_ALLOC:
1408 1345 ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
1409 1346 return (rootnex_alloc_intr_fixed(rdip, hdlp, result));
1410 1347 case DDI_INTROP_FREE:
1411 1348 ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED);
1412 1349 return (rootnex_free_intr_fixed(rdip, hdlp));
1413 1350 case DDI_INTROP_GETPRI:
1414 1351 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1415 1352 return (DDI_FAILURE);
1416 1353 *(int *)result = ispec->intrspec_pri;
1417 1354 break;
1418 1355 case DDI_INTROP_SETPRI:
1419 1356 /* Validate the interrupt priority passed to us */
1420 1357 if (*(int *)result > LOCK_LEVEL)
1421 1358 return (DDI_FAILURE);
1422 1359
1423 1360 /* Ensure that PSM is all initialized and ispec is ok */
1424 1361 if ((psm_intr_ops == NULL) ||
1425 1362 ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL))
1426 1363 return (DDI_FAILURE);
1427 1364
1428 1365 /* Change the priority */
1429 1366 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) ==
1430 1367 PSM_FAILURE)
1431 1368 return (DDI_FAILURE);
1432 1369
1433 1370 /* update the ispec with the new priority */
1434 1371 ispec->intrspec_pri = *(int *)result;
1435 1372 break;
1436 1373 case DDI_INTROP_ADDISR:
1437 1374 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1438 1375 return (DDI_FAILURE);
1439 1376 ispec->intrspec_func = hdlp->ih_cb_func;
1440 1377 break;
1441 1378 case DDI_INTROP_REMISR:
1442 1379 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1443 1380 return (DDI_FAILURE);
1444 1381 ispec->intrspec_func = (uint_t (*)()) 0;
1445 1382 break;
1446 1383 case DDI_INTROP_ENABLE:
1447 1384 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1448 1385 return (DDI_FAILURE);
1449 1386
1450 1387 /* Call psmi to translate irq with the dip */
1451 1388 if (psm_intr_ops == NULL)
1452 1389 return (DDI_FAILURE);
1453 1390
1454 1391 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1455 1392 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR,
1456 1393 (int *)&hdlp->ih_vector) == PSM_FAILURE)
1457 1394 return (DDI_FAILURE);
1458 1395
1459 1396 /* Add the interrupt handler */
1460 1397 if (!add_avintr((void *)hdlp, ispec->intrspec_pri,
1461 1398 hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector,
1462 1399 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip))
1463 1400 return (DDI_FAILURE);
1464 1401 break;
1465 1402 case DDI_INTROP_DISABLE:
1466 1403 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1467 1404 return (DDI_FAILURE);
1468 1405
1469 1406 /* Call psm_ops() to translate irq with the dip */
1470 1407 if (psm_intr_ops == NULL)
1471 1408 return (DDI_FAILURE);
1472 1409
1473 1410 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1474 1411 (void) (*psm_intr_ops)(rdip, hdlp,
1475 1412 PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector);
1476 1413
1477 1414 /* Remove the interrupt handler */
1478 1415 rem_avintr((void *)hdlp, ispec->intrspec_pri,
1479 1416 hdlp->ih_cb_func, hdlp->ih_vector);
1480 1417 break;
1481 1418 case DDI_INTROP_SETMASK:
1482 1419 if (psm_intr_ops == NULL)
1483 1420 return (DDI_FAILURE);
1484 1421
1485 1422 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL))
1486 1423 return (DDI_FAILURE);
1487 1424 break;
1488 1425 case DDI_INTROP_CLRMASK:
1489 1426 if (psm_intr_ops == NULL)
1490 1427 return (DDI_FAILURE);
1491 1428
1492 1429 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL))
1493 1430 return (DDI_FAILURE);
1494 1431 break;
1495 1432 case DDI_INTROP_GETPENDING:
1496 1433 if (psm_intr_ops == NULL)
1497 1434 return (DDI_FAILURE);
1498 1435
1499 1436 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING,
1500 1437 result)) {
1501 1438 *(int *)result = 0;
1502 1439 return (DDI_FAILURE);
1503 1440 }
1504 1441 break;
1505 1442 case DDI_INTROP_NAVAIL:
1506 1443 case DDI_INTROP_NINTRS:
1507 1444 *(int *)result = i_ddi_get_intx_nintrs(rdip);
1508 1445 if (*(int *)result == 0) {
1509 1446 /*
1510 1447 * Special case for 'pcic' driver' only. This driver
1511 1448 * driver is a child of 'isa' and 'rootnex' drivers.
1512 1449 *
1513 1450 * See detailed comments on this in the function
1514 1451 * rootnex_get_ispec().
1515 1452 *
1516 1453 * Children of 'pcic' send 'NINITR' request all the
1517 1454 * way to rootnex driver. But, the 'pdp->par_nintr'
1518 1455 * field may not initialized. So, we fake it here
1519 1456 * to return 1 (a la what PCMCIA nexus does).
1520 1457 */
1521 1458 if (strcmp(ddi_get_name(rdip), "pcic") == 0)
1522 1459 *(int *)result = 1;
1523 1460 else
1524 1461 return (DDI_FAILURE);
1525 1462 }
1526 1463 break;
1527 1464 case DDI_INTROP_SUPPORTED_TYPES:
1528 1465 *(int *)result = DDI_INTR_TYPE_FIXED; /* Always ... */
1529 1466 break;
1530 1467 default:
1531 1468 return (DDI_FAILURE);
1532 1469 }
1533 1470
1534 1471 return (DDI_SUCCESS);
1535 1472 }
1536 1473
1537 1474
1538 1475 /*
1539 1476 * rootnex_get_ispec()
1540 1477 * convert an interrupt number to an interrupt specification.
1541 1478 * The interrupt number determines which interrupt spec will be
1542 1479 * returned if more than one exists.
1543 1480 *
1544 1481 * Look into the parent private data area of the 'rdip' to find out
1545 1482 * the interrupt specification. First check to make sure there is
1546 1483 * one that matchs "inumber" and then return a pointer to it.
1547 1484 *
1548 1485 * Return NULL if one could not be found.
1549 1486 *
1550 1487 * NOTE: This is needed for rootnex_intr_ops()
1551 1488 */
1552 1489 static struct intrspec *
1553 1490 rootnex_get_ispec(dev_info_t *rdip, int inum)
1554 1491 {
1555 1492 struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);
1556 1493
1557 1494 /*
1558 1495 * Special case handling for drivers that provide their own
1559 1496 * intrspec structures instead of relying on the DDI framework.
1560 1497 *
1561 1498 * A broken hardware driver in ON could potentially provide its
1562 1499 * own intrspec structure, instead of relying on the hardware.
1563 1500 * If these drivers are children of 'rootnex' then we need to
1564 1501 * continue to provide backward compatibility to them here.
1565 1502 *
1566 1503 * Following check is a special case for 'pcic' driver which
1567 1504 * was found to have broken hardwre andby provides its own intrspec.
1568 1505 *
1569 1506 * Verbatim comments from this driver are shown here:
1570 1507 * "Don't use the ddi_add_intr since we don't have a
1571 1508 * default intrspec in all cases."
1572 1509 *
1573 1510 * Since an 'ispec' may not be always created for it,
1574 1511 * check for that and create one if so.
1575 1512 *
1576 1513 * NOTE: Currently 'pcic' is the only driver found to do this.
1577 1514 */
1578 1515 if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1579 1516 pdp->par_nintr = 1;
1580 1517 pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) *
1581 1518 pdp->par_nintr, KM_SLEEP);
1582 1519 }
1583 1520
1584 1521 /* Validate the interrupt number */
1585 1522 if (inum >= pdp->par_nintr)
1586 1523 return (NULL);
1587 1524
1588 1525 /* Get the interrupt structure pointer and return that */
1589 1526 return ((struct intrspec *)&pdp->par_intr[inum]);
1590 1527 }
1591 1528
1592 1529 /*
1593 1530 * Allocate interrupt vector for FIXED (legacy) type.
1594 1531 */
1595 1532 static int
1596 1533 rootnex_alloc_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp,
1597 1534 void *result)
1598 1535 {
1599 1536 struct intrspec *ispec;
1600 1537 ddi_intr_handle_impl_t info_hdl;
1601 1538 int ret;
1602 1539 int free_phdl = 0;
1603 1540 apic_get_type_t type_info;
1604 1541
1605 1542 if (psm_intr_ops == NULL)
1606 1543 return (DDI_FAILURE);
1607 1544
1608 1545 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1609 1546 return (DDI_FAILURE);
1610 1547
1611 1548 /*
1612 1549 * If the PSM module is "APIX" then pass the request for it
1613 1550 * to allocate the vector now.
1614 1551 */
1615 1552 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
1616 1553 info_hdl.ih_private = &type_info;
1617 1554 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
1618 1555 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
1619 1556 if (hdlp->ih_private == NULL) { /* allocate phdl structure */
1620 1557 free_phdl = 1;
1621 1558 i_ddi_alloc_intr_phdl(hdlp);
1622 1559 }
1623 1560 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1624 1561 ret = (*psm_intr_ops)(rdip, hdlp,
1625 1562 PSM_INTR_OP_ALLOC_VECTORS, result);
1626 1563 if (free_phdl) { /* free up the phdl structure */
1627 1564 free_phdl = 0;
1628 1565 i_ddi_free_intr_phdl(hdlp);
1629 1566 hdlp->ih_private = NULL;
1630 1567 }
1631 1568 } else {
1632 1569 /*
1633 1570 * No APIX module; fall back to the old scheme where the
1634 1571 * interrupt vector is allocated during ddi_enable_intr() call.
1635 1572 */
1636 1573 hdlp->ih_pri = ispec->intrspec_pri;
1637 1574 *(int *)result = hdlp->ih_scratch1;
1638 1575 ret = DDI_SUCCESS;
1639 1576 }
1640 1577
1641 1578 return (ret);
1642 1579 }
1643 1580
1644 1581 /*
1645 1582 * Free up interrupt vector for FIXED (legacy) type.
1646 1583 */
1647 1584 static int
1648 1585 rootnex_free_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
1649 1586 {
1650 1587 struct intrspec *ispec;
1651 1588 struct ddi_parent_private_data *pdp;
1652 1589 ddi_intr_handle_impl_t info_hdl;
1653 1590 int ret;
1654 1591 apic_get_type_t type_info;
1655 1592
1656 1593 if (psm_intr_ops == NULL)
1657 1594 return (DDI_FAILURE);
1658 1595
1659 1596 /*
1660 1597 * If the PSM module is "APIX" then pass the request for it
1661 1598 * to free up the vector now.
1662 1599 */
1663 1600 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t));
1664 1601 info_hdl.ih_private = &type_info;
1665 1602 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) ==
1666 1603 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) {
1667 1604 if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)
1668 1605 return (DDI_FAILURE);
1669 1606 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec;
1670 1607 ret = (*psm_intr_ops)(rdip, hdlp,
1671 1608 PSM_INTR_OP_FREE_VECTORS, NULL);
1672 1609 } else {
1673 1610 /*
1674 1611 * No APIX module; fall back to the old scheme where
1675 1612 * the interrupt vector was already freed during
1676 1613 * ddi_disable_intr() call.
1677 1614 */
1678 1615 ret = DDI_SUCCESS;
1679 1616 }
1680 1617
1681 1618 pdp = ddi_get_parent_data(rdip);
1682 1619
1683 1620 /*
1684 1621 * Special case for 'pcic' driver' only.
1685 1622 * If an intrspec was created for it, clean it up here
1686 1623 * See detailed comments on this in the function
1687 1624 * rootnex_get_ispec().
1688 1625 */
1689 1626 if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) {
1690 1627 kmem_free(pdp->par_intr, sizeof (struct intrspec) *
1691 1628 pdp->par_nintr);
1692 1629 /*
1693 1630 * Set it to zero; so that
1694 1631 * DDI framework doesn't free it again
1695 1632 */
1696 1633 pdp->par_intr = NULL;
1697 1634 pdp->par_nintr = 0;
1698 1635 }
1699 1636
1700 1637 return (ret);
1701 1638 }
1702 1639
1703 1640
1704 1641 /*
1705 1642 * ******************
1706 1643 * dma related code
1707 1644 * ******************
1708 1645 */
1709 1646
1710 1647 /*ARGSUSED*/
1711 1648 static int
1712 1649 rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1713 1650 ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg,
1714 1651 ddi_dma_handle_t *handlep)
1715 1652 {
1716 1653 uint64_t maxsegmentsize_ll;
1717 1654 uint_t maxsegmentsize;
1718 1655 ddi_dma_impl_t *hp;
1719 1656 rootnex_dma_t *dma;
1720 1657 uint64_t count_max;
1721 1658 uint64_t seg;
1722 1659 int kmflag;
1723 1660 int e;
1724 1661
1725 1662
1726 1663 /* convert our sleep flags */
1727 1664 if (waitfp == DDI_DMA_SLEEP) {
1728 1665 kmflag = KM_SLEEP;
1729 1666 } else {
1730 1667 kmflag = KM_NOSLEEP;
1731 1668 }
1732 1669
1733 1670 /*
1734 1671 * We try to do only one memory allocation here. We'll do a little
1735 1672 * pointer manipulation later. If the bind ends up taking more than
1736 1673 * our prealloc's space, we'll have to allocate more memory in the
1737 1674 * bind operation. Not great, but much better than before and the
1738 1675 * best we can do with the current bind interfaces.
1739 1676 */
1740 1677 hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag);
1741 1678 if (hp == NULL)
1742 1679 return (DDI_DMA_NORESOURCES);
1743 1680
1744 1681 /* Do our pointer manipulation now, align the structures */
1745 1682 hp->dmai_private = (void *)(((uintptr_t)hp +
1746 1683 (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7);
1747 1684 dma = (rootnex_dma_t *)hp->dmai_private;
1748 1685 dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma +
1749 1686 sizeof (rootnex_dma_t) + 0x7) & ~0x7);
1750 1687
1751 1688 /* setup the handle */
1752 1689 rootnex_clean_dmahdl(hp);
1753 1690 hp->dmai_error.err_fep = NULL;
1754 1691 hp->dmai_error.err_cf = NULL;
1755 1692 dma->dp_dip = rdip;
1756 1693 dma->dp_sglinfo.si_flags = attr->dma_attr_flags;
1757 1694 dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo;
1758 1695
1759 1696 /*
1760 1697 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1761 1698 * is being used. Set the upper limit to the seg value.
1762 1699 * There will be enough DVMA space to always get addresses
1763 1700 * that will match the constraints.
1764 1701 */
1765 1702 if (IOMMU_USED(rdip) &&
1766 1703 (attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG)) {
1767 1704 dma->dp_sglinfo.si_max_addr = attr->dma_attr_seg;
1768 1705 dma->dp_sglinfo.si_flags &= ~_DDI_DMA_BOUNCE_ON_SEG;
1769 1706 } else
1770 1707 dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi;
1771 1708
1772 1709 hp->dmai_minxfer = attr->dma_attr_minxfer;
1773 1710 hp->dmai_burstsizes = attr->dma_attr_burstsizes;
1774 1711 hp->dmai_rdip = rdip;
1775 1712 hp->dmai_attr = *attr;
1776 1713
1777 1714 if (attr->dma_attr_seg >= dma->dp_sglinfo.si_max_addr)
1778 1715 dma->dp_sglinfo.si_cancross = B_FALSE;
1779 1716 else
1780 1717 dma->dp_sglinfo.si_cancross = B_TRUE;
1781 1718
1782 1719 /* we don't need to worry about the SPL since we do a tryenter */
1783 1720 mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL);
1784 1721
1785 1722 /*
1786 1723 * Figure out our maximum segment size. If the segment size is greater
1787 1724 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1788 1725 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1789 1726 * dma_attr_count_max are size-1 type values.
1790 1727 *
1791 1728 * Maximum segment size is the largest physically contiguous chunk of
1792 1729 * memory that we can return from a bind (i.e. the maximum size of a
1793 1730 * single cookie).
1794 1731 */
1795 1732
1796 1733 /* handle the rollover cases */
1797 1734 seg = attr->dma_attr_seg + 1;
1798 1735 if (seg < attr->dma_attr_seg) {
1799 1736 seg = attr->dma_attr_seg;
1800 1737 }
1801 1738 count_max = attr->dma_attr_count_max + 1;
1802 1739 if (count_max < attr->dma_attr_count_max) {
1803 1740 count_max = attr->dma_attr_count_max;
1804 1741 }
1805 1742
1806 1743 /*
1807 1744 * granularity may or may not be a power of two. If it isn't, we can't
1808 1745 * use a simple mask.
1809 1746 */
1810 1747 if (!ISP2(attr->dma_attr_granular)) {
1811 1748 dma->dp_granularity_power_2 = B_FALSE;
1812 1749 } else {
1813 1750 dma->dp_granularity_power_2 = B_TRUE;
1814 1751 }
1815 1752
1816 1753 /*
1817 1754 * maxxfer should be a whole multiple of granularity. If we're going to
1818 1755 * break up a window because we're greater than maxxfer, we might as
1819 1756 * well make sure it's maxxfer is a whole multiple so we don't have to
1820 1757 * worry about triming the window later on for this case.
1821 1758 */
1822 1759 if (attr->dma_attr_granular > 1) {
1823 1760 if (dma->dp_granularity_power_2) {
1824 1761 dma->dp_maxxfer = attr->dma_attr_maxxfer -
1825 1762 (attr->dma_attr_maxxfer &
1826 1763 (attr->dma_attr_granular - 1));
1827 1764 } else {
1828 1765 dma->dp_maxxfer = attr->dma_attr_maxxfer -
1829 1766 (attr->dma_attr_maxxfer % attr->dma_attr_granular);
1830 1767 }
1831 1768 } else {
1832 1769 dma->dp_maxxfer = attr->dma_attr_maxxfer;
1833 1770 }
1834 1771
1835 1772 maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer);
1836 1773 maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max);
1837 1774 if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) {
1838 1775 maxsegmentsize = 0xFFFFFFFF;
1839 1776 } else {
1840 1777 maxsegmentsize = maxsegmentsize_ll;
1841 1778 }
1842 1779 dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize;
1843 1780 dma->dp_sglinfo.si_segmask = attr->dma_attr_seg;
1844 1781
1845 1782 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1846 1783 if (rootnex_alloc_check_parms) {
1847 1784 e = rootnex_valid_alloc_parms(attr, maxsegmentsize);
1848 1785 if (e != DDI_SUCCESS) {
1849 1786 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]);
1850 1787 (void) rootnex_dma_freehdl(dip, rdip,
1851 1788 (ddi_dma_handle_t)hp);
1852 1789 return (e);
1853 1790 }
1854 1791 }
1855 1792
1856 1793 *handlep = (ddi_dma_handle_t)hp;
1857 1794
1858 1795 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1859 1796 ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t,
1860 1797 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1861 1798
1862 1799 return (DDI_SUCCESS);
1863 1800 }
1864 1801
1865 1802
1866 1803 /*
1867 1804 * rootnex_dma_allochdl()
1868 1805 * called from ddi_dma_alloc_handle().
1869 1806 */
1870 1807 static int
1871 1808 rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1872 1809 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1873 1810 {
1874 1811 int retval = DDI_SUCCESS;
1875 1812 #if defined(__amd64) && !defined(__xpv)
1876 1813
1877 1814 if (IOMMU_UNITIALIZED(rdip)) {
1878 1815 retval = iommulib_nex_open(dip, rdip);
1879 1816
1880 1817 if (retval != DDI_SUCCESS && retval != DDI_ENOTSUP)
1881 1818 return (retval);
1882 1819 }
1883 1820
1884 1821 if (IOMMU_UNUSED(rdip)) {
1885 1822 retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1886 1823 handlep);
1887 1824 } else {
1888 1825 retval = iommulib_nexdma_allochdl(dip, rdip, attr,
1889 1826 waitfp, arg, handlep);
1890 1827 }
1891 1828 #else
1892 1829 retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg,
1893 1830 handlep);
1894 1831 #endif
1895 1832 switch (retval) {
1896 1833 case DDI_DMA_NORESOURCES:
1897 1834 if (waitfp != DDI_DMA_DONTWAIT) {
1898 1835 ddi_set_callback(waitfp, arg,
1899 1836 &rootnex_state->r_dvma_call_list_id);
1900 1837 }
1901 1838 break;
1902 1839 case DDI_SUCCESS:
1903 1840 ndi_fmc_insert(rdip, DMA_HANDLE, *handlep, NULL);
1904 1841 break;
1905 1842 default:
1906 1843 break;
1907 1844 }
1908 1845 return (retval);
1909 1846 }
1910 1847
1911 1848 /*ARGSUSED*/
1912 1849 static int
1913 1850 rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip,
1914 1851 ddi_dma_handle_t handle)
1915 1852 {
1916 1853 ddi_dma_impl_t *hp;
1917 1854 rootnex_dma_t *dma;
1918 1855
1919 1856
1920 1857 hp = (ddi_dma_impl_t *)handle;
1921 1858 dma = (rootnex_dma_t *)hp->dmai_private;
1922 1859
1923 1860 /* unbind should have been called first */
1924 1861 ASSERT(!dma->dp_inuse);
1925 1862
1926 1863 mutex_destroy(&dma->dp_mutex);
1927 1864 kmem_cache_free(rootnex_state->r_dmahdl_cache, hp);
1928 1865
1929 1866 ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1930 1867 ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t,
1931 1868 rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]);
1932 1869
1933 1870 return (DDI_SUCCESS);
1934 1871 }
1935 1872
1936 1873 /*
1937 1874 * rootnex_dma_freehdl()
1938 1875 * called from ddi_dma_free_handle().
1939 1876 */
1940 1877 static int
1941 1878 rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1942 1879 {
1943 1880 int ret;
1944 1881
1945 1882 ndi_fmc_remove(rdip, DMA_HANDLE, handle);
1946 1883 #if defined(__amd64) && !defined(__xpv)
1947 1884 if (IOMMU_USED(rdip))
1948 1885 ret = iommulib_nexdma_freehdl(dip, rdip, handle);
1949 1886 else
1950 1887 #endif
1951 1888 ret = rootnex_coredma_freehdl(dip, rdip, handle);
1952 1889
1953 1890 if (rootnex_state->r_dvma_call_list_id)
1954 1891 ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
1955 1892
1956 1893 return (ret);
1957 1894 }
1958 1895
1959 1896 /*ARGSUSED*/
1960 1897 static int
1961 1898 rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1962 1899 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1963 1900 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1964 1901 {
1965 1902 rootnex_sglinfo_t *sinfo;
1966 1903 ddi_dma_obj_t *dmao;
1967 1904 #if defined(__amd64) && !defined(__xpv)
1968 1905 struct dvmaseg *dvs;
1969 1906 ddi_dma_cookie_t *cookie;
1970 1907 #endif
1971 1908 ddi_dma_attr_t *attr;
1972 1909 ddi_dma_impl_t *hp;
1973 1910 rootnex_dma_t *dma;
1974 1911 int kmflag;
1975 1912 int e;
1976 1913 uint_t ncookies;
1977 1914
1978 1915 hp = (ddi_dma_impl_t *)handle;
1979 1916 dma = (rootnex_dma_t *)hp->dmai_private;
1980 1917 dmao = &dma->dp_dma;
1981 1918 sinfo = &dma->dp_sglinfo;
1982 1919 attr = &hp->dmai_attr;
1983 1920
1984 1921 /* convert the sleep flags */
1985 1922 if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
1986 1923 dma->dp_sleep_flags = kmflag = KM_SLEEP;
1987 1924 } else {
1988 1925 dma->dp_sleep_flags = kmflag = KM_NOSLEEP;
1989 1926 }
1990 1927
1991 1928 hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1992 1929
1993 1930 /*
1994 1931 * This is useful for debugging a driver. Not as useful in a production
1995 1932 * system. The only time this will fail is if you have a driver bug.
1996 1933 */
1997 1934 if (rootnex_bind_check_inuse) {
1998 1935 /*
1999 1936 * No one else should ever have this lock unless someone else
2000 1937 * is trying to use this handle. So contention on the lock
2001 1938 * is the same as inuse being set.
2002 1939 */
2003 1940 e = mutex_tryenter(&dma->dp_mutex);
2004 1941 if (e == 0) {
2005 1942 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2006 1943 return (DDI_DMA_INUSE);
2007 1944 }
2008 1945 if (dma->dp_inuse) {
2009 1946 mutex_exit(&dma->dp_mutex);
2010 1947 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2011 1948 return (DDI_DMA_INUSE);
2012 1949 }
2013 1950 dma->dp_inuse = B_TRUE;
2014 1951 mutex_exit(&dma->dp_mutex);
2015 1952 }
2016 1953
2017 1954 /* check the ddi_dma_attr arg to make sure it makes a little sense */
2018 1955 if (rootnex_bind_check_parms) {
2019 1956 e = rootnex_valid_bind_parms(dmareq, attr);
2020 1957 if (e != DDI_SUCCESS) {
2021 1958 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2022 1959 rootnex_clean_dmahdl(hp);
2023 1960 return (e);
2024 1961 }
2025 1962 }
2026 1963
2027 1964 /* save away the original bind info */
2028 1965 dma->dp_dma = dmareq->dmar_object;
2029 1966
2030 1967 #if defined(__amd64) && !defined(__xpv)
2031 1968 if (IOMMU_USED(rdip)) {
2032 1969 dmao = &dma->dp_dvma;
2033 1970 e = iommulib_nexdma_mapobject(dip, rdip, handle, dmareq, dmao);
2034 1971 switch (e) {
2035 1972 case DDI_SUCCESS:
2036 1973 if (sinfo->si_cancross ||
2037 1974 dmao->dmao_obj.dvma_obj.dv_nseg != 1 ||
2038 1975 dmao->dmao_size > sinfo->si_max_cookie_size) {
2039 1976 dma->dp_dvma_used = B_TRUE;
2040 1977 break;
2041 1978 }
2042 1979 sinfo->si_sgl_size = 1;
2043 1980 hp->dmai_rflags |= DMP_NOSYNC;
2044 1981
2045 1982 dma->dp_dvma_used = B_TRUE;
2046 1983 dma->dp_need_to_free_cookie = B_FALSE;
2047 1984
2048 1985 dvs = &dmao->dmao_obj.dvma_obj.dv_seg[0];
2049 1986 cookie = hp->dmai_cookie = dma->dp_cookies =
2050 1987 (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
2051 1988 cookie->dmac_laddress = dvs->dvs_start +
2052 1989 dmao->dmao_obj.dvma_obj.dv_off;
2053 1990 cookie->dmac_size = dvs->dvs_len;
2054 1991 cookie->dmac_type = 0;
2055 1992
2056 1993 ROOTNEX_DPROBE1(rootnex__bind__dvmafast, dev_info_t *,
2057 1994 rdip);
2058 1995 goto fast;
2059 1996 case DDI_ENOTSUP:
2060 1997 break;
2061 1998 default:
2062 1999 rootnex_clean_dmahdl(hp);
2063 2000 return (e);
2064 2001 }
2065 2002 }
2066 2003 #endif
2067 2004
2068 2005 /*
2069 2006 * Figure out a rough estimate of what maximum number of pages
2070 2007 * this buffer could use (a high estimate of course).
2071 2008 */
2072 2009 sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1;
2073 2010
2074 2011 if (dma->dp_dvma_used) {
2075 2012 /*
2076 2013 * The number of physical pages is the worst case.
2077 2014 *
2078 2015 * For DVMA, the worst case is the length divided
2079 2016 * by the maximum cookie length, plus 1. Add to that
2080 2017 * the number of segment boundaries potentially crossed, and
2081 2018 * the additional number of DVMA segments that was returned.
2082 2019 *
2083 2020 * In the normal case, for modern devices, si_cancross will
2084 2021 * be false, and dv_nseg will be 1, and the fast path will
2085 2022 * have been taken above.
2086 2023 */
2087 2024 ncookies = (dma->dp_dma.dmao_size / sinfo->si_max_cookie_size)
2088 2025 + 1;
2089 2026 if (sinfo->si_cancross)
2090 2027 ncookies +=
2091 2028 (dma->dp_dma.dmao_size / attr->dma_attr_seg) + 1;
2092 2029 ncookies += (dmao->dmao_obj.dvma_obj.dv_nseg - 1);
2093 2030
2094 2031 sinfo->si_max_pages = MIN(sinfo->si_max_pages, ncookies);
2095 2032 }
2096 2033
2097 2034 /*
2098 2035 * We'll use the pre-allocated cookies for any bind that will *always*
2099 2036 * fit (more important to be consistent, we don't want to create
2100 2037 * additional degenerate cases).
2101 2038 */
2102 2039 if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) {
2103 2040 dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer;
2104 2041 dma->dp_need_to_free_cookie = B_FALSE;
2105 2042 ROOTNEX_DPROBE2(rootnex__bind__prealloc, dev_info_t *, rdip,
2106 2043 uint_t, sinfo->si_max_pages);
2107 2044
2108 2045 /*
2109 2046 * For anything larger than that, we'll go ahead and allocate the
2110 2047 * maximum number of pages we expect to see. Hopefuly, we won't be
2111 2048 * seeing this path in the fast path for high performance devices very
2112 2049 * frequently.
2113 2050 *
2114 2051 * a ddi bind interface that allowed the driver to provide storage to
2115 2052 * the bind interface would speed this case up.
2116 2053 */
2117 2054 } else {
2118 2055 /*
2119 2056 * Save away how much memory we allocated. If we're doing a
2120 2057 * nosleep, the alloc could fail...
2121 2058 */
2122 2059 dma->dp_cookie_size = sinfo->si_max_pages *
2123 2060 sizeof (ddi_dma_cookie_t);
2124 2061 dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag);
2125 2062 if (dma->dp_cookies == NULL) {
2126 2063 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2127 2064 rootnex_clean_dmahdl(hp);
2128 2065 return (DDI_DMA_NORESOURCES);
2129 2066 }
2130 2067 dma->dp_need_to_free_cookie = B_TRUE;
2131 2068 ROOTNEX_DPROBE2(rootnex__bind__alloc, dev_info_t *, rdip,
2132 2069 uint_t, sinfo->si_max_pages);
2133 2070 }
2134 2071 hp->dmai_cookie = dma->dp_cookies;
2135 2072
2136 2073 /*
2137 2074 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2138 2075 * looking at the constraints in the dma structure. It will then put
2139 2076 * some additional state about the sgl in the dma struct (i.e. is
2140 2077 * the sgl clean, or do we need to do some munging; how many pages
2141 2078 * need to be copied, etc.)
2142 2079 */
2143 2080 if (dma->dp_dvma_used)
2144 2081 rootnex_dvma_get_sgl(dmao, dma->dp_cookies, &dma->dp_sglinfo);
2145 2082 else
2146 2083 rootnex_get_sgl(dmao, dma->dp_cookies, &dma->dp_sglinfo);
2147 2084
2148 2085 out:
2149 2086 ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages);
2150 2087 /* if we don't need a copy buffer, we don't need to sync */
2151 2088 if (sinfo->si_copybuf_req == 0) {
2152 2089 hp->dmai_rflags |= DMP_NOSYNC;
2153 2090 }
2154 2091
2155 2092 /*
2156 2093 * if we don't need the copybuf and we don't need to do a partial, we
2157 2094 * hit the fast path. All the high performance devices should be trying
2158 2095 * to hit this path. To hit this path, a device should be able to reach
2159 2096 * all of memory, shouldn't try to bind more than it can transfer, and
2160 2097 * the buffer shouldn't require more cookies than the driver/device can
2161 2098 * handle [sgllen]).
2162 2099 *
2163 2100 * Note that negative values of dma_attr_sgllen are supposed
2164 2101 * to mean unlimited, but we just cast them to mean a
2165 2102 * "ridiculous large limit". This saves some extra checks on
2166 2103 * hot paths.
2167 2104 */
2168 2105 if ((sinfo->si_copybuf_req == 0) &&
2169 2106 (sinfo->si_sgl_size <= (unsigned)attr->dma_attr_sgllen) &&
2170 2107 (dmao->dmao_size <= dma->dp_maxxfer)) {
2171 2108 fast:
2172 2109 /*
2173 2110 * If the driver supports FMA, insert the handle in the FMA DMA
2174 2111 * handle cache.
2175 2112 */
2176 2113 if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2177 2114 hp->dmai_error.err_cf = rootnex_dma_check;
2178 2115
2179 2116 /*
2180 2117 * copy out the first cookie and ccountp, set the cookie
2181 2118 * pointer to the second cookie. The first cookie is passed
2182 2119 * back on the stack. Additional cookies are accessed via
2183 2120 * ddi_dma_nextcookie()
2184 2121 */
2185 2122 *cookiep = dma->dp_cookies[0];
2186 2123 *ccountp = sinfo->si_sgl_size;
2187 2124 hp->dmai_cookie++;
2188 2125 hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2189 2126 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2190 2127 ROOTNEX_DPROBE4(rootnex__bind__fast, dev_info_t *, rdip,
2191 2128 uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS],
2192 2129 uint_t, dmao->dmao_size, uint_t, *ccountp);
2193 2130
2194 2131
2195 2132 return (DDI_DMA_MAPPED);
2196 2133 }
2197 2134
2198 2135 /*
2199 2136 * go to the slow path, we may need to alloc more memory, create
2200 2137 * multiple windows, and munge up a sgl to make the device happy.
2201 2138 */
2202 2139
2203 2140 /*
2204 2141 * With the IOMMU mapobject method used, we should never hit
2205 2142 * the slow path. If we do, something is seriously wrong.
2206 2143 * Clean up and return an error.
2207 2144 */
2208 2145
2209 2146 #if defined(__amd64) && !defined(__xpv)
2210 2147
2211 2148 if (dma->dp_dvma_used) {
2212 2149 (void) iommulib_nexdma_unmapobject(dip, rdip, handle,
2213 2150 &dma->dp_dvma);
2214 2151 e = DDI_DMA_NOMAPPING;
2215 2152 } else {
2216 2153 #endif
2217 2154 e = rootnex_bind_slowpath(hp, dmareq, dma, attr, &dma->dp_dma,
2218 2155 kmflag);
2219 2156 #if defined(__amd64) && !defined(__xpv)
2220 2157 }
2221 2158 #endif
2222 2159 if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) {
2223 2160 if (dma->dp_need_to_free_cookie) {
2224 2161 kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2225 2162 }
2226 2163 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]);
2227 2164 rootnex_clean_dmahdl(hp); /* must be after free cookie */
2228 2165 return (e);
2229 2166 }
2230 2167
2231 2168 /*
2232 2169 * If the driver supports FMA, insert the handle in the FMA DMA handle
2233 2170 * cache.
2234 2171 */
2235 2172 if (attr->dma_attr_flags & DDI_DMA_FLAGERR)
2236 2173 hp->dmai_error.err_cf = rootnex_dma_check;
2237 2174
2238 2175 /* if the first window uses the copy buffer, sync it for the device */
2239 2176 if ((dma->dp_window[dma->dp_current_win].wd_dosync) &&
2240 2177 (hp->dmai_rflags & DDI_DMA_WRITE)) {
2241 2178 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2242 2179 DDI_DMA_SYNC_FORDEV);
2243 2180 }
2244 2181
2245 2182 /*
2246 2183 * copy out the first cookie and ccountp, set the cookie pointer to the
2247 2184 * second cookie. Make sure the partial flag is set/cleared correctly.
2248 2185 * If we have a partial map (i.e. multiple windows), the number of
2249 2186 * cookies we return is the number of cookies in the first window.
2250 2187 */
2251 2188 if (e == DDI_DMA_MAPPED) {
2252 2189 hp->dmai_rflags &= ~DDI_DMA_PARTIAL;
2253 2190 *ccountp = sinfo->si_sgl_size;
2254 2191 hp->dmai_nwin = 1;
2255 2192 } else {
2256 2193 hp->dmai_rflags |= DDI_DMA_PARTIAL;
2257 2194 *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt;
2258 2195 ASSERT(hp->dmai_nwin <= dma->dp_max_win);
2259 2196 }
2260 2197 *cookiep = dma->dp_cookies[0];
2261 2198 hp->dmai_cookie++;
2262 2199
2263 2200 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2264 2201 ROOTNEX_DPROBE4(rootnex__bind__slow, dev_info_t *, rdip, uint64_t,
2265 2202 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t,
2266 2203 dmao->dmao_size, uint_t, *ccountp);
2267 2204 return (e);
2268 2205 }
2269 2206
2270 2207 /*
2271 2208 * rootnex_dma_bindhdl()
2272 2209 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2273 2210 */
2274 2211 static int
2275 2212 rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
2276 2213 ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
2277 2214 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
2278 2215 {
2279 2216 int ret;
2280 2217 #if defined(__amd64) && !defined(__xpv)
2281 2218 if (IOMMU_USED(rdip))
2282 2219 ret = iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq,
2283 2220 cookiep, ccountp);
2284 2221 else
2285 2222 #endif
2286 2223 ret = rootnex_coredma_bindhdl(dip, rdip, handle, dmareq,
2287 2224 cookiep, ccountp);
2288 2225
2289 2226 if (ret == DDI_DMA_NORESOURCES && dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
2290 2227 ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg,
2291 2228 &rootnex_state->r_dvma_call_list_id);
2292 2229 }
2293 2230
2294 2231 return (ret);
2295 2232 }
2296 2233
2297 2234
2298 2235
2299 2236 /*ARGSUSED*/
2300 2237 static int
2301 2238 rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2302 2239 ddi_dma_handle_t handle)
2303 2240 {
2304 2241 ddi_dma_impl_t *hp;
2305 2242 rootnex_dma_t *dma;
2306 2243 int e;
2307 2244
2308 2245 hp = (ddi_dma_impl_t *)handle;
2309 2246 dma = (rootnex_dma_t *)hp->dmai_private;
2310 2247
2311 2248 /* make sure the buffer wasn't free'd before calling unbind */
2312 2249 if (rootnex_unbind_verify_buffer) {
2313 2250 e = rootnex_verify_buffer(dma);
2314 2251 if (e != DDI_SUCCESS) {
2315 2252 ASSERT(0);
2316 2253 return (DDI_FAILURE);
2317 2254 }
2318 2255 }
2319 2256
2320 2257 /* sync the current window before unbinding the buffer */
2321 2258 if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync &&
2322 2259 (hp->dmai_rflags & DDI_DMA_READ)) {
2323 2260 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
2324 2261 DDI_DMA_SYNC_FORCPU);
2325 2262 }
2326 2263
2327 2264 /*
2328 2265 * cleanup and copy buffer or window state. if we didn't use the copy
2329 2266 * buffer or windows, there won't be much to do :-)
2330 2267 */
2331 2268 rootnex_teardown_copybuf(dma);
2332 2269 rootnex_teardown_windows(dma);
2333 2270
2334 2271 #if defined(__amd64) && !defined(__xpv)
2335 2272 if (IOMMU_USED(rdip) && dma->dp_dvma_used)
2336 2273 (void) iommulib_nexdma_unmapobject(dip, rdip, handle,
2337 2274 &dma->dp_dvma);
2338 2275 #endif
2339 2276
2340 2277 /*
2341 2278 * If we had to allocate space to for the worse case sgl (it didn't
2342 2279 * fit into our pre-allocate buffer), free that up now
2343 2280 */
2344 2281 if (dma->dp_need_to_free_cookie) {
2345 2282 kmem_free(dma->dp_cookies, dma->dp_cookie_size);
2346 2283 }
2347 2284
2348 2285 /*
2349 2286 * clean up the handle so it's ready for the next bind (i.e. if the
2350 2287 * handle is reused).
2351 2288 */
2352 2289 rootnex_clean_dmahdl(hp);
2353 2290 hp->dmai_error.err_cf = NULL;
2354 2291
2355 2292 ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2356 2293 ROOTNEX_DPROBE1(rootnex__unbind, uint64_t,
2357 2294 rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]);
2358 2295
2359 2296 return (DDI_SUCCESS);
2360 2297 }
2361 2298
2362 2299 /*
2363 2300 * rootnex_dma_unbindhdl()
2364 2301 * called from ddi_dma_unbind_handle()
2365 2302 */
2366 2303 /*ARGSUSED*/
2367 2304 static int
2368 2305 rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
2369 2306 ddi_dma_handle_t handle)
2370 2307 {
2371 2308 int ret;
2372 2309
2373 2310 #if defined(__amd64) && !defined(__xpv)
2374 2311 if (IOMMU_USED(rdip))
2375 2312 ret = iommulib_nexdma_unbindhdl(dip, rdip, handle);
2376 2313 else
2377 2314 #endif
2378 2315 ret = rootnex_coredma_unbindhdl(dip, rdip, handle);
2379 2316
2380 2317 if (rootnex_state->r_dvma_call_list_id)
2381 2318 ddi_run_callback(&rootnex_state->r_dvma_call_list_id);
2382 2319
2383 2320 return (ret);
2384 2321 }
2385 2322
2386 2323 #if defined(__amd64) && !defined(__xpv)
2387 2324
2388 2325 static int
2389 2326 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle)
2390 2327 {
2391 2328 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2392 2329 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2393 2330
2394 2331 if (dma->dp_sleep_flags != KM_SLEEP &&
2395 2332 dma->dp_sleep_flags != KM_NOSLEEP)
2396 2333 cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle");
2397 2334 return (dma->dp_sleep_flags);
2398 2335 }
2399 2336 /*ARGSUSED*/
2400 2337 static void
2401 2338 rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2402 2339 {
2403 2340 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2404 2341 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2405 2342 rootnex_window_t *window;
2406 2343
2407 2344 if (dma->dp_window) {
2408 2345 window = &dma->dp_window[dma->dp_current_win];
2409 2346 hp->dmai_cookie = window->wd_first_cookie;
2410 2347 } else {
2411 2348 hp->dmai_cookie = dma->dp_cookies;
2412 2349 }
2413 2350 hp->dmai_cookie++;
2414 2351 }
2415 2352
2416 2353 /*ARGSUSED*/
2417 2354 static int
2418 2355 rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2419 2356 ddi_dma_cookie_t **cookiepp, uint_t *ccountp)
2420 2357 {
2421 2358 int i;
2422 2359 int km_flags;
2423 2360 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2424 2361 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2425 2362 rootnex_window_t *window;
2426 2363 ddi_dma_cookie_t *cp;
2427 2364 ddi_dma_cookie_t *cookie;
2428 2365
2429 2366 ASSERT(*cookiepp == NULL);
2430 2367 ASSERT(*ccountp == 0);
2431 2368
2432 2369 if (dma->dp_window) {
2433 2370 window = &dma->dp_window[dma->dp_current_win];
2434 2371 cp = window->wd_first_cookie;
2435 2372 *ccountp = window->wd_cookie_cnt;
2436 2373 } else {
2437 2374 cp = dma->dp_cookies;
2438 2375 *ccountp = dma->dp_sglinfo.si_sgl_size;
2439 2376 }
2440 2377
2441 2378 km_flags = rootnex_coredma_get_sleep_flags(handle);
2442 2379 cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags);
2443 2380 if (cookie == NULL) {
2444 2381 return (DDI_DMA_NORESOURCES);
2445 2382 }
2446 2383
2447 2384 for (i = 0; i < *ccountp; i++) {
2448 2385 cookie[i].dmac_notused = cp[i].dmac_notused;
2449 2386 cookie[i].dmac_type = cp[i].dmac_type;
2450 2387 cookie[i].dmac_address = cp[i].dmac_address;
2451 2388 cookie[i].dmac_size = cp[i].dmac_size;
2452 2389 }
2453 2390
2454 2391 *cookiepp = cookie;
2455 2392
2456 2393 return (DDI_SUCCESS);
2457 2394 }
2458 2395
2459 2396 /*ARGSUSED*/
2460 2397 static int
2461 2398 rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle,
2462 2399 ddi_dma_cookie_t *cookiep, uint_t ccount)
2463 2400 {
2464 2401 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2465 2402 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2466 2403 rootnex_window_t *window;
2467 2404 ddi_dma_cookie_t *cur_cookiep;
2468 2405
2469 2406 ASSERT(cookiep);
2470 2407 ASSERT(ccount != 0);
2471 2408 ASSERT(dma->dp_need_to_switch_cookies == B_FALSE);
2472 2409
2473 2410 if (dma->dp_window) {
2474 2411 window = &dma->dp_window[dma->dp_current_win];
2475 2412 dma->dp_saved_cookies = window->wd_first_cookie;
2476 2413 window->wd_first_cookie = cookiep;
2477 2414 ASSERT(ccount == window->wd_cookie_cnt);
2478 2415 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
2479 2416 + window->wd_first_cookie;
2480 2417 } else {
2481 2418 dma->dp_saved_cookies = dma->dp_cookies;
2482 2419 dma->dp_cookies = cookiep;
2483 2420 ASSERT(ccount == dma->dp_sglinfo.si_sgl_size);
2484 2421 cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies)
2485 2422 + dma->dp_cookies;
2486 2423 }
2487 2424
2488 2425 dma->dp_need_to_switch_cookies = B_TRUE;
2489 2426 hp->dmai_cookie = cur_cookiep;
2490 2427
2491 2428 return (DDI_SUCCESS);
2492 2429 }
2493 2430
2494 2431 /*ARGSUSED*/
2495 2432 static int
2496 2433 rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle)
2497 2434 {
2498 2435 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
2499 2436 rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private;
2500 2437 rootnex_window_t *window;
2501 2438 ddi_dma_cookie_t *cur_cookiep;
2502 2439 ddi_dma_cookie_t *cookie_array;
2503 2440 uint_t ccount;
2504 2441
2505 2442 /* check if cookies have not been switched */
2506 2443 if (dma->dp_need_to_switch_cookies == B_FALSE)
2507 2444 return (DDI_SUCCESS);
2508 2445
2509 2446 ASSERT(dma->dp_saved_cookies);
2510 2447
2511 2448 if (dma->dp_window) {
2512 2449 window = &dma->dp_window[dma->dp_current_win];
2513 2450 cookie_array = window->wd_first_cookie;
2514 2451 window->wd_first_cookie = dma->dp_saved_cookies;
2515 2452 dma->dp_saved_cookies = NULL;
2516 2453 ccount = window->wd_cookie_cnt;
2517 2454 cur_cookiep = (hp->dmai_cookie - cookie_array)
2518 2455 + window->wd_first_cookie;
2519 2456 } else {
2520 2457 cookie_array = dma->dp_cookies;
2521 2458 dma->dp_cookies = dma->dp_saved_cookies;
2522 2459 dma->dp_saved_cookies = NULL;
2523 2460 ccount = dma->dp_sglinfo.si_sgl_size;
2524 2461 cur_cookiep = (hp->dmai_cookie - cookie_array)
2525 2462 + dma->dp_cookies;
2526 2463 }
2527 2464
2528 2465 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
2529 2466
2530 2467 hp->dmai_cookie = cur_cookiep;
2531 2468
2532 2469 dma->dp_need_to_switch_cookies = B_FALSE;
2533 2470
2534 2471 return (DDI_SUCCESS);
2535 2472 }
2536 2473
2537 2474 #endif
2538 2475
2539 2476 static struct as *
2540 2477 rootnex_get_as(ddi_dma_obj_t *dmao)
2541 2478 {
2542 2479 struct as *asp;
2543 2480
2544 2481 switch (dmao->dmao_type) {
2545 2482 case DMA_OTYP_VADDR:
2546 2483 case DMA_OTYP_BUFVADDR:
2547 2484 asp = dmao->dmao_obj.virt_obj.v_as;
2548 2485 if (asp == NULL)
2549 2486 asp = &kas;
2550 2487 break;
2551 2488 default:
2552 2489 asp = NULL;
2553 2490 break;
2554 2491 }
2555 2492 return (asp);
2556 2493 }
2557 2494
2558 2495 /*
2559 2496 * rootnex_verify_buffer()
2560 2497 * verify buffer wasn't free'd
2561 2498 */
2562 2499 static int
2563 2500 rootnex_verify_buffer(rootnex_dma_t *dma)
2564 2501 {
2565 2502 page_t **pplist;
2566 2503 caddr_t vaddr;
2567 2504 uint_t pcnt;
2568 2505 uint_t poff;
2569 2506 page_t *pp;
2570 2507 char b;
2571 2508 int i;
2572 2509
2573 2510 /* Figure out how many pages this buffer occupies */
2574 2511 if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) {
2575 2512 poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET;
2576 2513 } else {
2577 2514 vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr;
2578 2515 poff = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2579 2516 }
2580 2517 pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff);
2581 2518
2582 2519 switch (dma->dp_dma.dmao_type) {
2583 2520 case DMA_OTYP_PAGES:
2584 2521 /*
2585 2522 * for a linked list of pp's walk through them to make sure
2586 2523 * they're locked and not free.
2587 2524 */
2588 2525 pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp;
2589 2526 for (i = 0; i < pcnt; i++) {
2590 2527 if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) {
2591 2528 return (DDI_FAILURE);
2592 2529 }
2593 2530 pp = pp->p_next;
2594 2531 }
2595 2532 break;
2596 2533
2597 2534 case DMA_OTYP_VADDR:
2598 2535 case DMA_OTYP_BUFVADDR:
2599 2536 pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv;
2600 2537 /*
2601 2538 * for an array of pp's walk through them to make sure they're
2602 2539 * not free. It's possible that they may not be locked.
2603 2540 */
2604 2541 if (pplist) {
2605 2542 for (i = 0; i < pcnt; i++) {
2606 2543 if (PP_ISFREE(pplist[i])) {
2607 2544 return (DDI_FAILURE);
2608 2545 }
2609 2546 }
2610 2547
2611 2548 /* For a virtual address, try to peek at each page */
2612 2549 } else {
2613 2550 if (rootnex_get_as(&dma->dp_dma) == &kas) {
2614 2551 for (i = 0; i < pcnt; i++) {
2615 2552 if (ddi_peek8(NULL, vaddr, &b) ==
2616 2553 DDI_FAILURE)
2617 2554 return (DDI_FAILURE);
2618 2555 vaddr += MMU_PAGESIZE;
2619 2556 }
2620 2557 }
2621 2558 }
2622 2559 break;
2623 2560
2624 2561 default:
2625 2562 cmn_err(CE_PANIC, "rootnex_verify_buffer: bad DMA object");
2626 2563 break;
2627 2564 }
2628 2565
2629 2566 return (DDI_SUCCESS);
2630 2567 }
2631 2568
2632 2569
2633 2570 /*
2634 2571 * rootnex_clean_dmahdl()
2635 2572 * Clean the dma handle. This should be called on a handle alloc and an
2636 2573 * unbind handle. Set the handle state to the default settings.
2637 2574 */
2638 2575 static void
2639 2576 rootnex_clean_dmahdl(ddi_dma_impl_t *hp)
2640 2577 {
2641 2578 rootnex_dma_t *dma;
2642 2579
2643 2580
2644 2581 dma = (rootnex_dma_t *)hp->dmai_private;
2645 2582
2646 2583 hp->dmai_nwin = 0;
2647 2584 dma->dp_current_cookie = 0;
2648 2585 dma->dp_copybuf_size = 0;
2649 2586 dma->dp_window = NULL;
2650 2587 dma->dp_cbaddr = NULL;
2651 2588 dma->dp_inuse = B_FALSE;
2652 2589 dma->dp_dvma_used = B_FALSE;
2653 2590 dma->dp_need_to_free_cookie = B_FALSE;
2654 2591 dma->dp_need_to_switch_cookies = B_FALSE;
2655 2592 dma->dp_saved_cookies = NULL;
2656 2593 dma->dp_sleep_flags = KM_PANIC;
2657 2594 dma->dp_need_to_free_window = B_FALSE;
2658 2595 dma->dp_partial_required = B_FALSE;
2659 2596 dma->dp_trim_required = B_FALSE;
2660 2597 dma->dp_sglinfo.si_copybuf_req = 0;
2661 2598 #if !defined(__amd64)
2662 2599 dma->dp_cb_remaping = B_FALSE;
2663 2600 dma->dp_kva = NULL;
2664 2601 #endif
2665 2602
2666 2603 /* FMA related initialization */
2667 2604 hp->dmai_fault = 0;
2668 2605 hp->dmai_fault_check = NULL;
2669 2606 hp->dmai_fault_notify = NULL;
2670 2607 hp->dmai_error.err_ena = 0;
2671 2608 hp->dmai_error.err_status = DDI_FM_OK;
2672 2609 hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
2673 2610 hp->dmai_error.err_ontrap = NULL;
2674 2611 }
2675 2612
2676 2613
2677 2614 /*
2678 2615 * rootnex_valid_alloc_parms()
2679 2616 * Called in ddi_dma_alloc_handle path to validate its parameters.
2680 2617 */
2681 2618 static int
2682 2619 rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize)
2683 2620 {
2684 2621 if ((attr->dma_attr_seg < MMU_PAGEOFFSET) ||
2685 2622 (attr->dma_attr_count_max < MMU_PAGEOFFSET) ||
2686 2623 (attr->dma_attr_granular > MMU_PAGESIZE) ||
2687 2624 (attr->dma_attr_maxxfer < MMU_PAGESIZE)) {
2688 2625 return (DDI_DMA_BADATTR);
2689 2626 }
2690 2627
2691 2628 if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) {
2692 2629 return (DDI_DMA_BADATTR);
2693 2630 }
2694 2631
2695 2632 if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET ||
2696 2633 MMU_PAGESIZE & (attr->dma_attr_granular - 1) ||
2697 2634 attr->dma_attr_sgllen == 0) {
2698 2635 return (DDI_DMA_BADATTR);
2699 2636 }
2700 2637
2701 2638 /* We should be able to DMA into every byte offset in a page */
2702 2639 if (maxsegmentsize < MMU_PAGESIZE) {
2703 2640 return (DDI_DMA_BADATTR);
2704 2641 }
2705 2642
2706 2643 /* if we're bouncing on seg, seg must be <= addr_hi */
2707 2644 if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) &&
2708 2645 (attr->dma_attr_seg > attr->dma_attr_addr_hi)) {
2709 2646 return (DDI_DMA_BADATTR);
2710 2647 }
2711 2648 return (DDI_SUCCESS);
2712 2649 }
2713 2650
2714 2651 /*
2715 2652 * rootnex_valid_bind_parms()
2716 2653 * Called in ddi_dma_*_bind_handle path to validate its parameters.
2717 2654 */
2718 2655 /* ARGSUSED */
2719 2656 static int
2720 2657 rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr)
2721 2658 {
2722 2659 #if !defined(__amd64)
2723 2660 /*
2724 2661 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2725 2662 * we can track the offset for the obsoleted interfaces.
2726 2663 */
2727 2664 if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) {
2728 2665 return (DDI_DMA_TOOBIG);
2729 2666 }
2730 2667 #endif
2731 2668
2732 2669 return (DDI_SUCCESS);
2733 2670 }
2734 2671
2735 2672
2736 2673 /*
2737 2674 * rootnex_need_bounce_seg()
2738 2675 * check to see if the buffer lives on both side of the seg.
2739 2676 */
2740 2677 static boolean_t
2741 2678 rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo)
2742 2679 {
2743 2680 ddi_dma_atyp_t buftype;
2744 2681 rootnex_addr_t raddr;
2745 2682 boolean_t lower_addr;
2746 2683 boolean_t upper_addr;
2747 2684 uint64_t offset;
2748 2685 page_t **pplist;
2749 2686 uint64_t paddr;
2750 2687 uint32_t psize;
2751 2688 uint32_t size;
2752 2689 caddr_t vaddr;
2753 2690 uint_t pcnt;
2754 2691 page_t *pp;
2755 2692
2756 2693
2757 2694 /* shortcuts */
2758 2695 pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2759 2696 vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2760 2697 buftype = dmar_object->dmao_type;
2761 2698 size = dmar_object->dmao_size;
2762 2699
2763 2700 lower_addr = B_FALSE;
2764 2701 upper_addr = B_FALSE;
2765 2702 pcnt = 0;
2766 2703
2767 2704 /*
2768 2705 * Process the first page to handle the initial offset of the buffer.
2769 2706 * We'll use the base address we get later when we loop through all
2770 2707 * the pages.
2771 2708 */
2772 2709 if (buftype == DMA_OTYP_PAGES) {
2773 2710 pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2774 2711 offset = dmar_object->dmao_obj.pp_obj.pp_offset &
2775 2712 MMU_PAGEOFFSET;
2776 2713 paddr = pfn_to_pa(pp->p_pagenum) + offset;
2777 2714 psize = MIN(size, (MMU_PAGESIZE - offset));
2778 2715 pp = pp->p_next;
2779 2716 sglinfo->si_asp = NULL;
2780 2717 } else if (pplist != NULL) {
2781 2718 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2782 2719 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2783 2720 if (sglinfo->si_asp == NULL) {
2784 2721 sglinfo->si_asp = &kas;
2785 2722 }
2786 2723 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2787 2724 paddr += offset;
2788 2725 psize = MIN(size, (MMU_PAGESIZE - offset));
2789 2726 pcnt++;
2790 2727 } else {
2791 2728 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2792 2729 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2793 2730 if (sglinfo->si_asp == NULL) {
2794 2731 sglinfo->si_asp = &kas;
2795 2732 }
2796 2733 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2797 2734 paddr += offset;
2798 2735 psize = MIN(size, (MMU_PAGESIZE - offset));
2799 2736 vaddr += psize;
2800 2737 }
2801 2738
2802 2739 raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2803 2740
2804 2741 if ((raddr + psize) > sglinfo->si_segmask) {
2805 2742 upper_addr = B_TRUE;
2806 2743 } else {
2807 2744 lower_addr = B_TRUE;
2808 2745 }
2809 2746 size -= psize;
2810 2747
2811 2748 /*
2812 2749 * Walk through the rest of the pages in the buffer. Track to see
2813 2750 * if we have pages on both sides of the segment boundary.
2814 2751 */
2815 2752 while (size > 0) {
2816 2753 /* partial or full page */
2817 2754 psize = MIN(size, MMU_PAGESIZE);
2818 2755
2819 2756 if (buftype == DMA_OTYP_PAGES) {
2820 2757 /* get the paddr from the page_t */
2821 2758 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2822 2759 paddr = pfn_to_pa(pp->p_pagenum);
2823 2760 pp = pp->p_next;
2824 2761 } else if (pplist != NULL) {
2825 2762 /* index into the array of page_t's to get the paddr */
2826 2763 ASSERT(!PP_ISFREE(pplist[pcnt]));
2827 2764 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2828 2765 pcnt++;
2829 2766 } else {
2830 2767 /* call into the VM to get the paddr */
2831 2768 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
2832 2769 vaddr));
2833 2770 vaddr += psize;
2834 2771 }
2835 2772
2836 2773 raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2837 2774
2838 2775 if ((raddr + psize) > sglinfo->si_segmask) {
2839 2776 upper_addr = B_TRUE;
2840 2777 } else {
2841 2778 lower_addr = B_TRUE;
2842 2779 }
2843 2780 /*
2844 2781 * if the buffer lives both above and below the segment
2845 2782 * boundary, or the current page is the page immediately
2846 2783 * after the segment, we will use a copy/bounce buffer for
2847 2784 * all pages > seg.
2848 2785 */
2849 2786 if ((lower_addr && upper_addr) ||
2850 2787 (raddr == (sglinfo->si_segmask + 1))) {
2851 2788 return (B_TRUE);
2852 2789 }
2853 2790
2854 2791 size -= psize;
2855 2792 }
2856 2793
2857 2794 return (B_FALSE);
2858 2795 }
2859 2796
2860 2797 /*
2861 2798 * rootnex_get_sgl()
2862 2799 * Called in bind fastpath to get the sgl. Most of this will be replaced
2863 2800 * with a call to the vm layer when vm2.0 comes around...
2864 2801 */
2865 2802 static void
2866 2803 rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
2867 2804 rootnex_sglinfo_t *sglinfo)
2868 2805 {
2869 2806 ddi_dma_atyp_t buftype;
2870 2807 rootnex_addr_t raddr;
2871 2808 uint64_t last_page;
2872 2809 uint64_t offset;
2873 2810 uint64_t addrhi;
2874 2811 uint64_t addrlo;
2875 2812 uint64_t maxseg;
2876 2813 page_t **pplist;
2877 2814 uint64_t paddr;
2878 2815 uint32_t psize;
2879 2816 uint32_t size;
2880 2817 caddr_t vaddr;
2881 2818 uint_t pcnt;
2882 2819 page_t *pp;
2883 2820 uint_t cnt;
2884 2821
2885 2822
2886 2823 /* shortcuts */
2887 2824 pplist = dmar_object->dmao_obj.virt_obj.v_priv;
2888 2825 vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
2889 2826 maxseg = sglinfo->si_max_cookie_size;
2890 2827 buftype = dmar_object->dmao_type;
2891 2828 addrhi = sglinfo->si_max_addr;
2892 2829 addrlo = sglinfo->si_min_addr;
2893 2830 size = dmar_object->dmao_size;
2894 2831
2895 2832 pcnt = 0;
2896 2833 cnt = 0;
2897 2834
2898 2835
2899 2836 /*
2900 2837 * check to see if we need to use the copy buffer for pages over
2901 2838 * the segment attr.
2902 2839 */
2903 2840 sglinfo->si_bounce_on_seg = B_FALSE;
2904 2841 if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) {
2905 2842 sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg(
2906 2843 dmar_object, sglinfo);
2907 2844 }
2908 2845
2909 2846 /*
2910 2847 * if we were passed down a linked list of pages, i.e. pointer to
2911 2848 * page_t, use this to get our physical address and buf offset.
2912 2849 */
2913 2850 if (buftype == DMA_OTYP_PAGES) {
2914 2851 pp = dmar_object->dmao_obj.pp_obj.pp_pp;
2915 2852 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
2916 2853 offset = dmar_object->dmao_obj.pp_obj.pp_offset &
2917 2854 MMU_PAGEOFFSET;
2918 2855 paddr = pfn_to_pa(pp->p_pagenum) + offset;
2919 2856 psize = MIN(size, (MMU_PAGESIZE - offset));
2920 2857 pp = pp->p_next;
2921 2858 sglinfo->si_asp = NULL;
2922 2859
2923 2860 /*
2924 2861 * We weren't passed down a linked list of pages, but if we were passed
2925 2862 * down an array of pages, use this to get our physical address and buf
2926 2863 * offset.
2927 2864 */
2928 2865 } else if (pplist != NULL) {
2929 2866 ASSERT((buftype == DMA_OTYP_VADDR) ||
2930 2867 (buftype == DMA_OTYP_BUFVADDR));
2931 2868
2932 2869 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2933 2870 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2934 2871 if (sglinfo->si_asp == NULL) {
2935 2872 sglinfo->si_asp = &kas;
2936 2873 }
2937 2874
2938 2875 ASSERT(!PP_ISFREE(pplist[pcnt]));
2939 2876 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
2940 2877 paddr += offset;
2941 2878 psize = MIN(size, (MMU_PAGESIZE - offset));
2942 2879 pcnt++;
2943 2880
2944 2881 /*
2945 2882 * All we have is a virtual address, we'll need to call into the VM
2946 2883 * to get the physical address.
2947 2884 */
2948 2885 } else {
2949 2886 ASSERT((buftype == DMA_OTYP_VADDR) ||
2950 2887 (buftype == DMA_OTYP_BUFVADDR));
2951 2888
2952 2889 offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
2953 2890 sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as;
2954 2891 if (sglinfo->si_asp == NULL) {
2955 2892 sglinfo->si_asp = &kas;
2956 2893 }
2957 2894
2958 2895 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr));
2959 2896 paddr += offset;
2960 2897 psize = MIN(size, (MMU_PAGESIZE - offset));
2961 2898 vaddr += psize;
2962 2899 }
2963 2900
2964 2901 raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
2965 2902
2966 2903 /*
2967 2904 * Setup the first cookie with the physical address of the page and the
2968 2905 * size of the page (which takes into account the initial offset into
2969 2906 * the page.
2970 2907 */
2971 2908 sgl[cnt].dmac_laddress = raddr;
2972 2909 sgl[cnt].dmac_size = psize;
2973 2910 sgl[cnt].dmac_type = 0;
2974 2911
2975 2912 /*
2976 2913 * Save away the buffer offset into the page. We'll need this later in
2977 2914 * the copy buffer code to help figure out the page index within the
2978 2915 * buffer and the offset into the current page.
2979 2916 */
2980 2917 sglinfo->si_buf_offset = offset;
2981 2918
2982 2919 /*
2983 2920 * If we are using the copy buffer for anything over the segment
2984 2921 * boundary, and this page is over the segment boundary.
2985 2922 * OR
2986 2923 * if the DMA engine can't reach the physical address.
2987 2924 */
2988 2925 if (((sglinfo->si_bounce_on_seg) &&
2989 2926 ((raddr + psize) > sglinfo->si_segmask)) ||
2990 2927 ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
2991 2928 /*
2992 2929 * Increase how much copy buffer we use. We always increase by
2993 2930 * pagesize so we don't have to worry about converting offsets.
2994 2931 * Set a flag in the cookies dmac_type to indicate that it uses
2995 2932 * the copy buffer. If this isn't the last cookie, go to the
2996 2933 * next cookie (since we separate each page which uses the copy
2997 2934 * buffer in case the copy buffer is not physically contiguous.
2998 2935 */
2999 2936 sglinfo->si_copybuf_req += MMU_PAGESIZE;
3000 2937 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
3001 2938 if ((cnt + 1) < sglinfo->si_max_pages) {
3002 2939 cnt++;
3003 2940 sgl[cnt].dmac_laddress = 0;
3004 2941 sgl[cnt].dmac_size = 0;
3005 2942 sgl[cnt].dmac_type = 0;
3006 2943 }
3007 2944 }
3008 2945
3009 2946 /*
3010 2947 * save this page's physical address so we can figure out if the next
3011 2948 * page is physically contiguous. Keep decrementing size until we are
3012 2949 * done with the buffer.
3013 2950 */
3014 2951 last_page = raddr & MMU_PAGEMASK;
3015 2952 size -= psize;
3016 2953
3017 2954 while (size > 0) {
3018 2955 /* Get the size for this page (i.e. partial or full page) */
3019 2956 psize = MIN(size, MMU_PAGESIZE);
3020 2957
3021 2958 if (buftype == DMA_OTYP_PAGES) {
3022 2959 /* get the paddr from the page_t */
3023 2960 ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp));
3024 2961 paddr = pfn_to_pa(pp->p_pagenum);
3025 2962 pp = pp->p_next;
3026 2963 } else if (pplist != NULL) {
3027 2964 /* index into the array of page_t's to get the paddr */
3028 2965 ASSERT(!PP_ISFREE(pplist[pcnt]));
3029 2966 paddr = pfn_to_pa(pplist[pcnt]->p_pagenum);
3030 2967 pcnt++;
3031 2968 } else {
3032 2969 /* call into the VM to get the paddr */
3033 2970 paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat,
3034 2971 vaddr));
3035 2972 vaddr += psize;
3036 2973 }
3037 2974
3038 2975 raddr = ROOTNEX_PADDR_TO_RBASE(paddr);
3039 2976
3040 2977 /*
3041 2978 * If we are using the copy buffer for anything over the
3042 2979 * segment boundary, and this page is over the segment
3043 2980 * boundary.
3044 2981 * OR
3045 2982 * if the DMA engine can't reach the physical address.
3046 2983 */
3047 2984 if (((sglinfo->si_bounce_on_seg) &&
3048 2985 ((raddr + psize) > sglinfo->si_segmask)) ||
3049 2986 ((raddr < addrlo) || ((raddr + psize) > addrhi))) {
3050 2987
3051 2988 sglinfo->si_copybuf_req += MMU_PAGESIZE;
3052 2989
3053 2990 /*
3054 2991 * if there is something in the current cookie, go to
3055 2992 * the next one. We only want one page in a cookie which
3056 2993 * uses the copybuf since the copybuf doesn't have to
3057 2994 * be physically contiguous.
3058 2995 */
3059 2996 if (sgl[cnt].dmac_size != 0) {
3060 2997 cnt++;
3061 2998 }
3062 2999 sgl[cnt].dmac_laddress = raddr;
3063 3000 sgl[cnt].dmac_size = psize;
3064 3001 #if defined(__amd64)
3065 3002 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF;
3066 3003 #else
3067 3004 /*
3068 3005 * save the buf offset for 32-bit kernel. used in the
3069 3006 * obsoleted interfaces.
3070 3007 */
3071 3008 sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF |
3072 3009 (dmar_object->dmao_size - size);
3073 3010 #endif
3074 3011 /* if this isn't the last cookie, go to the next one */
3075 3012 if ((cnt + 1) < sglinfo->si_max_pages) {
3076 3013 cnt++;
3077 3014 sgl[cnt].dmac_laddress = 0;
3078 3015 sgl[cnt].dmac_size = 0;
3079 3016 sgl[cnt].dmac_type = 0;
3080 3017 }
3081 3018
3082 3019 /*
3083 3020 * this page didn't need the copy buffer, if it's not physically
3084 3021 * contiguous, or it would put us over a segment boundary, or it
3085 3022 * puts us over the max cookie size, or the current sgl doesn't
3086 3023 * have anything in it.
3087 3024 */
3088 3025 } else if (((last_page + MMU_PAGESIZE) != raddr) ||
3089 3026 !(raddr & sglinfo->si_segmask) ||
3090 3027 ((sgl[cnt].dmac_size + psize) > maxseg) ||
3091 3028 (sgl[cnt].dmac_size == 0)) {
3092 3029 /*
3093 3030 * if we're not already in a new cookie, go to the next
3094 3031 * cookie.
3095 3032 */
3096 3033 if (sgl[cnt].dmac_size != 0) {
3097 3034 cnt++;
3098 3035 }
3099 3036
3100 3037 /* save the cookie information */
3101 3038 sgl[cnt].dmac_laddress = raddr;
3102 3039 sgl[cnt].dmac_size = psize;
3103 3040 #if defined(__amd64)
3104 3041 sgl[cnt].dmac_type = 0;
3105 3042 #else
3106 3043 /*
3107 3044 * save the buf offset for 32-bit kernel. used in the
3108 3045 * obsoleted interfaces.
3109 3046 */
3110 3047 sgl[cnt].dmac_type = dmar_object->dmao_size - size;
3111 3048 #endif
3112 3049
3113 3050 /*
3114 3051 * this page didn't need the copy buffer, it is physically
3115 3052 * contiguous with the last page, and it's <= the max cookie
3116 3053 * size.
3117 3054 */
3118 3055 } else {
3119 3056 sgl[cnt].dmac_size += psize;
3120 3057
3121 3058 /*
3122 3059 * if this exactly == the maximum cookie size, and
3123 3060 * it isn't the last cookie, go to the next cookie.
3124 3061 */
3125 3062 if (((sgl[cnt].dmac_size + psize) == maxseg) &&
3126 3063 ((cnt + 1) < sglinfo->si_max_pages)) {
3127 3064 cnt++;
3128 3065 sgl[cnt].dmac_laddress = 0;
3129 3066 sgl[cnt].dmac_size = 0;
3130 3067 sgl[cnt].dmac_type = 0;
3131 3068 }
3132 3069 }
3133 3070
3134 3071 /*
3135 3072 * save this page's physical address so we can figure out if the
3136 3073 * next page is physically contiguous. Keep decrementing size
3137 3074 * until we are done with the buffer.
3138 3075 */
3139 3076 last_page = raddr;
3140 3077 size -= psize;
3141 3078 }
3142 3079
3143 3080 /* we're done, save away how many cookies the sgl has */
3144 3081 if (sgl[cnt].dmac_size == 0) {
3145 3082 ASSERT(cnt < sglinfo->si_max_pages);
3146 3083 sglinfo->si_sgl_size = cnt;
3147 3084 } else {
3148 3085 sglinfo->si_sgl_size = cnt + 1;
3149 3086 }
3150 3087 }
3151 3088
3152 3089 static void
3153 3090 rootnex_dvma_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl,
3154 3091 rootnex_sglinfo_t *sglinfo)
3155 3092 {
3156 3093 uint64_t offset;
3157 3094 uint64_t maxseg;
3158 3095 uint64_t dvaddr;
3159 3096 struct dvmaseg *dvs;
3160 3097 uint64_t paddr;
3161 3098 uint32_t psize, ssize;
3162 3099 uint32_t size;
3163 3100 uint_t cnt;
3164 3101 int physcontig;
3165 3102
3166 3103 ASSERT(dmar_object->dmao_type == DMA_OTYP_DVADDR);
3167 3104
3168 3105 /* shortcuts */
3169 3106 maxseg = sglinfo->si_max_cookie_size;
3170 3107 size = dmar_object->dmao_size;
3171 3108
3172 3109 cnt = 0;
3173 3110 sglinfo->si_bounce_on_seg = B_FALSE;
3174 3111
3175 3112 dvs = dmar_object->dmao_obj.dvma_obj.dv_seg;
3176 3113 offset = dmar_object->dmao_obj.dvma_obj.dv_off;
3177 3114 ssize = dvs->dvs_len;
3178 3115 paddr = dvs->dvs_start;
3179 3116 paddr += offset;
3180 3117 psize = MIN(ssize, (maxseg - offset));
3181 3118 dvaddr = paddr + psize;
3182 3119 ssize -= psize;
3183 3120
3184 3121 sgl[cnt].dmac_laddress = paddr;
3185 3122 sgl[cnt].dmac_size = psize;
3186 3123 sgl[cnt].dmac_type = 0;
3187 3124
3188 3125 size -= psize;
3189 3126 while (size > 0) {
3190 3127 if (ssize == 0) {
3191 3128 dvs++;
3192 3129 ssize = dvs->dvs_len;
3193 3130 dvaddr = dvs->dvs_start;
3194 3131 physcontig = 0;
3195 3132 } else
3196 3133 physcontig = 1;
3197 3134
3198 3135 paddr = dvaddr;
3199 3136 psize = MIN(ssize, maxseg);
3200 3137 dvaddr += psize;
3201 3138 ssize -= psize;
3202 3139
3203 3140 if (!physcontig || !(paddr & sglinfo->si_segmask) ||
3204 3141 ((sgl[cnt].dmac_size + psize) > maxseg) ||
3205 3142 (sgl[cnt].dmac_size == 0)) {
3206 3143 /*
3207 3144 * if we're not already in a new cookie, go to the next
3208 3145 * cookie.
3209 3146 */
3210 3147 if (sgl[cnt].dmac_size != 0) {
3211 3148 cnt++;
3212 3149 }
3213 3150
3214 3151 /* save the cookie information */
3215 3152 sgl[cnt].dmac_laddress = paddr;
3216 3153 sgl[cnt].dmac_size = psize;
3217 3154 sgl[cnt].dmac_type = 0;
3218 3155 } else {
3219 3156 sgl[cnt].dmac_size += psize;
3220 3157
3221 3158 /*
3222 3159 * if this exactly == the maximum cookie size, and
3223 3160 * it isn't the last cookie, go to the next cookie.
3224 3161 */
3225 3162 if (((sgl[cnt].dmac_size + psize) == maxseg) &&
3226 3163 ((cnt + 1) < sglinfo->si_max_pages)) {
3227 3164 cnt++;
3228 3165 sgl[cnt].dmac_laddress = 0;
3229 3166 sgl[cnt].dmac_size = 0;
3230 3167 sgl[cnt].dmac_type = 0;
3231 3168 }
3232 3169 }
3233 3170 size -= psize;
3234 3171 }
3235 3172
3236 3173 /* we're done, save away how many cookies the sgl has */
3237 3174 if (sgl[cnt].dmac_size == 0) {
3238 3175 sglinfo->si_sgl_size = cnt;
3239 3176 } else {
3240 3177 sglinfo->si_sgl_size = cnt + 1;
3241 3178 }
3242 3179 }
3243 3180
3244 3181 /*
3245 3182 * rootnex_bind_slowpath()
3246 3183 * Call in the bind path if the calling driver can't use the sgl without
3247 3184 * modifying it. We either need to use the copy buffer and/or we will end up
3248 3185 * with a partial bind.
3249 3186 */
3250 3187 static int
3251 3188 rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3252 3189 rootnex_dma_t *dma, ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag)
3253 3190 {
3254 3191 rootnex_sglinfo_t *sinfo;
3255 3192 rootnex_window_t *window;
3256 3193 ddi_dma_cookie_t *cookie;
3257 3194 size_t copybuf_used;
3258 3195 size_t dmac_size;
3259 3196 boolean_t partial;
3260 3197 off_t cur_offset;
3261 3198 page_t *cur_pp;
3262 3199 major_t mnum;
3263 3200 int e;
3264 3201 int i;
3265 3202
3266 3203
3267 3204 sinfo = &dma->dp_sglinfo;
3268 3205 copybuf_used = 0;
3269 3206 partial = B_FALSE;
3270 3207
3271 3208 /*
3272 3209 * If we're using the copybuf, set the copybuf state in dma struct.
3273 3210 * Needs to be first since it sets the copy buffer size.
3274 3211 */
3275 3212 if (sinfo->si_copybuf_req != 0) {
3276 3213 e = rootnex_setup_copybuf(hp, dmareq, dma, attr);
3277 3214 if (e != DDI_SUCCESS) {
3278 3215 return (e);
3279 3216 }
3280 3217 } else {
3281 3218 dma->dp_copybuf_size = 0;
3282 3219 }
3283 3220
3284 3221 /*
3285 3222 * Figure out if we need to do a partial mapping. If so, figure out
3286 3223 * if we need to trim the buffers when we munge the sgl.
3287 3224 */
3288 3225 if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) ||
3289 3226 (dmao->dmao_size > dma->dp_maxxfer) ||
3290 3227 ((unsigned)attr->dma_attr_sgllen < sinfo->si_sgl_size)) {
3291 3228 dma->dp_partial_required = B_TRUE;
3292 3229 if (attr->dma_attr_granular != 1) {
3293 3230 dma->dp_trim_required = B_TRUE;
3294 3231 }
3295 3232 } else {
3296 3233 dma->dp_partial_required = B_FALSE;
3297 3234 dma->dp_trim_required = B_FALSE;
3298 3235 }
3299 3236
3300 3237 /* If we need to do a partial bind, make sure the driver supports it */
3301 3238 if (dma->dp_partial_required &&
3302 3239 !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
3303 3240
3304 3241 mnum = ddi_driver_major(dma->dp_dip);
3305 3242 /*
3306 3243 * patchable which allows us to print one warning per major
3307 3244 * number.
3308 3245 */
3309 3246 if ((rootnex_bind_warn) &&
3310 3247 ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) {
3311 3248 rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING;
3312 3249 cmn_err(CE_WARN, "!%s: coding error detected, the "
3313 3250 "driver is using ddi_dma_attr(9S) incorrectly. "
3314 3251 "There is a small risk of data corruption in "
3315 3252 "particular with large I/Os. The driver should be "
3316 3253 "replaced with a corrected version for proper "
3317 3254 "system operation. To disable this warning, add "
3318 3255 "'set rootnex:rootnex_bind_warn=0' to "
3319 3256 "/etc/system(4).", ddi_driver_name(dma->dp_dip));
3320 3257 }
3321 3258 return (DDI_DMA_TOOBIG);
3322 3259 }
3323 3260
3324 3261 /*
3325 3262 * we might need multiple windows, setup state to handle them. In this
3326 3263 * code path, we will have at least one window.
3327 3264 */
3328 3265 e = rootnex_setup_windows(hp, dma, attr, dmao, kmflag);
3329 3266 if (e != DDI_SUCCESS) {
3330 3267 rootnex_teardown_copybuf(dma);
3331 3268 return (e);
3332 3269 }
3333 3270
3334 3271 window = &dma->dp_window[0];
3335 3272 cookie = &dma->dp_cookies[0];
3336 3273 cur_offset = 0;
3337 3274 rootnex_init_win(hp, dma, window, cookie, cur_offset);
3338 3275 if (dmao->dmao_type == DMA_OTYP_PAGES) {
3339 3276 cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
3340 3277 }
3341 3278
3342 3279 /* loop though all the cookies we got back from get_sgl() */
3343 3280 for (i = 0; i < sinfo->si_sgl_size; i++) {
3344 3281 /*
3345 3282 * If we're using the copy buffer, check this cookie and setup
3346 3283 * its associated copy buffer state. If this cookie uses the
3347 3284 * copy buffer, make sure we sync this window during dma_sync.
3348 3285 */
3349 3286 if (dma->dp_copybuf_size > 0) {
3350 3287 rootnex_setup_cookie(dmao, dma, cookie,
3351 3288 cur_offset, ©buf_used, &cur_pp);
3352 3289 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3353 3290 window->wd_dosync = B_TRUE;
3354 3291 }
3355 3292 }
3356 3293
3357 3294 /*
3358 3295 * save away the cookie size, since it could be modified in
3359 3296 * the windowing code.
3360 3297 */
3361 3298 dmac_size = cookie->dmac_size;
3362 3299
3363 3300 /* if we went over max copybuf size */
3364 3301 if (dma->dp_copybuf_size &&
3365 3302 (copybuf_used > dma->dp_copybuf_size)) {
3366 3303 partial = B_TRUE;
3367 3304 e = rootnex_copybuf_window_boundary(hp, dma, &window,
3368 3305 cookie, cur_offset, ©buf_used);
3369 3306 if (e != DDI_SUCCESS) {
3370 3307 rootnex_teardown_copybuf(dma);
3371 3308 rootnex_teardown_windows(dma);
3372 3309 return (e);
3373 3310 }
3374 3311
3375 3312 /*
3376 3313 * if the coookie uses the copy buffer, make sure the
3377 3314 * new window we just moved to is set to sync.
3378 3315 */
3379 3316 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3380 3317 window->wd_dosync = B_TRUE;
3381 3318 }
3382 3319 ROOTNEX_DPROBE1(rootnex__copybuf__window, dev_info_t *,
3383 3320 dma->dp_dip);
3384 3321
3385 3322 /* if the cookie cnt == max sgllen, move to the next window */
3386 3323 } else if (window->wd_cookie_cnt >=
3387 3324 (unsigned)attr->dma_attr_sgllen) {
3388 3325 partial = B_TRUE;
3389 3326 ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen);
3390 3327 e = rootnex_sgllen_window_boundary(hp, dma, &window,
3391 3328 cookie, attr, cur_offset);
3392 3329 if (e != DDI_SUCCESS) {
3393 3330 rootnex_teardown_copybuf(dma);
3394 3331 rootnex_teardown_windows(dma);
3395 3332 return (e);
3396 3333 }
3397 3334
3398 3335 /*
3399 3336 * if the coookie uses the copy buffer, make sure the
3400 3337 * new window we just moved to is set to sync.
3401 3338 */
3402 3339 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3403 3340 window->wd_dosync = B_TRUE;
3404 3341 }
3405 3342 ROOTNEX_DPROBE1(rootnex__sgllen__window, dev_info_t *,
3406 3343 dma->dp_dip);
3407 3344
3408 3345 /* else if we will be over maxxfer */
3409 3346 } else if ((window->wd_size + dmac_size) >
3410 3347 dma->dp_maxxfer) {
3411 3348 partial = B_TRUE;
3412 3349 e = rootnex_maxxfer_window_boundary(hp, dma, &window,
3413 3350 cookie);
3414 3351 if (e != DDI_SUCCESS) {
3415 3352 rootnex_teardown_copybuf(dma);
3416 3353 rootnex_teardown_windows(dma);
3417 3354 return (e);
3418 3355 }
3419 3356
3420 3357 /*
3421 3358 * if the coookie uses the copy buffer, make sure the
3422 3359 * new window we just moved to is set to sync.
3423 3360 */
3424 3361 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3425 3362 window->wd_dosync = B_TRUE;
3426 3363 }
3427 3364 ROOTNEX_DPROBE1(rootnex__maxxfer__window, dev_info_t *,
3428 3365 dma->dp_dip);
3429 3366
3430 3367 /* else this cookie fits in the current window */
3431 3368 } else {
3432 3369 window->wd_cookie_cnt++;
3433 3370 window->wd_size += dmac_size;
3434 3371 }
3435 3372
3436 3373 /* track our offset into the buffer, go to the next cookie */
3437 3374 ASSERT(dmac_size <= dmao->dmao_size);
3438 3375 ASSERT(cookie->dmac_size <= dmac_size);
3439 3376 cur_offset += dmac_size;
3440 3377 cookie++;
3441 3378 }
3442 3379
3443 3380 /* if we ended up with a zero sized window in the end, clean it up */
3444 3381 if (window->wd_size == 0) {
3445 3382 hp->dmai_nwin--;
3446 3383 window--;
3447 3384 }
3448 3385
3449 3386 ASSERT(window->wd_trim.tr_trim_last == B_FALSE);
3450 3387
3451 3388 if (!partial) {
3452 3389 return (DDI_DMA_MAPPED);
3453 3390 }
3454 3391
3455 3392 ASSERT(dma->dp_partial_required);
3456 3393 return (DDI_DMA_PARTIAL_MAP);
3457 3394 }
3458 3395
3459 3396 /*
3460 3397 * rootnex_setup_copybuf()
3461 3398 * Called in bind slowpath. Figures out if we're going to use the copy
3462 3399 * buffer, and if we do, sets up the basic state to handle it.
3463 3400 */
3464 3401 static int
3465 3402 rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
3466 3403 rootnex_dma_t *dma, ddi_dma_attr_t *attr)
3467 3404 {
3468 3405 rootnex_sglinfo_t *sinfo;
3469 3406 ddi_dma_attr_t lattr;
3470 3407 size_t max_copybuf;
3471 3408 int cansleep;
3472 3409 int e;
3473 3410 #if !defined(__amd64)
3474 3411 int vmflag;
3475 3412 #endif
3476 3413
3477 3414 ASSERT(!dma->dp_dvma_used);
3478 3415
3479 3416 sinfo = &dma->dp_sglinfo;
3480 3417
3481 3418 /* read this first so it's consistent through the routine */
3482 3419 max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK;
3483 3420
3484 3421 /* We need to call into the rootnex on ddi_dma_sync() */
3485 3422 hp->dmai_rflags &= ~DMP_NOSYNC;
3486 3423
3487 3424 /* make sure the copybuf size <= the max size */
3488 3425 dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf);
3489 3426 ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0);
3490 3427
3491 3428 #if !defined(__amd64)
3492 3429 /*
3493 3430 * if we don't have kva space to copy to/from, allocate the KVA space
3494 3431 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3495 3432 * the 64-bit kernel.
3496 3433 */
3497 3434 if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) ||
3498 3435 (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) {
3499 3436
3500 3437 /* convert the sleep flags */
3501 3438 if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3502 3439 vmflag = VM_SLEEP;
3503 3440 } else {
3504 3441 vmflag = VM_NOSLEEP;
3505 3442 }
3506 3443
3507 3444 /* allocate Kernel VA space that we can bcopy to/from */
3508 3445 dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size,
3509 3446 vmflag);
3510 3447 if (dma->dp_kva == NULL) {
3511 3448 return (DDI_DMA_NORESOURCES);
3512 3449 }
3513 3450 }
3514 3451 #endif
3515 3452
3516 3453 /* convert the sleep flags */
3517 3454 if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
3518 3455 cansleep = 1;
3519 3456 } else {
3520 3457 cansleep = 0;
3521 3458 }
3522 3459
3523 3460 /*
3524 3461 * Allocate the actual copy buffer. This needs to fit within the DMA
3525 3462 * engine limits, so we can't use kmem_alloc... We don't need
3526 3463 * contiguous memory (sgllen) since we will be forcing windows on
3527 3464 * sgllen anyway.
3528 3465 */
3529 3466 lattr = *attr;
3530 3467 lattr.dma_attr_align = MMU_PAGESIZE;
3531 3468 lattr.dma_attr_sgllen = -1; /* no limit */
3532 3469 /*
3533 3470 * if we're using the copy buffer because of seg, use that for our
3534 3471 * upper address limit.
3535 3472 */
3536 3473 if (sinfo->si_bounce_on_seg) {
3537 3474 lattr.dma_attr_addr_hi = lattr.dma_attr_seg;
3538 3475 }
3539 3476 e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep,
3540 3477 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL);
3541 3478 if (e != DDI_SUCCESS) {
3542 3479 #if !defined(__amd64)
3543 3480 if (dma->dp_kva != NULL) {
3544 3481 vmem_free(heap_arena, dma->dp_kva,
3545 3482 dma->dp_copybuf_size);
3546 3483 }
3547 3484 #endif
3548 3485 return (DDI_DMA_NORESOURCES);
3549 3486 }
3550 3487
3551 3488 ROOTNEX_DPROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip,
3552 3489 size_t, dma->dp_copybuf_size);
3553 3490
3554 3491 return (DDI_SUCCESS);
3555 3492 }
3556 3493
3557 3494
3558 3495 /*
3559 3496 * rootnex_setup_windows()
3560 3497 * Called in bind slowpath to setup the window state. We always have windows
3561 3498 * in the slowpath. Even if the window count = 1.
3562 3499 */
3563 3500 static int
3564 3501 rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3565 3502 ddi_dma_attr_t *attr, ddi_dma_obj_t *dmao, int kmflag)
3566 3503 {
3567 3504 rootnex_window_t *windowp;
3568 3505 rootnex_sglinfo_t *sinfo;
3569 3506 size_t copy_state_size;
3570 3507 size_t win_state_size;
3571 3508 size_t state_available;
3572 3509 size_t space_needed;
3573 3510 uint_t copybuf_win;
3574 3511 uint_t maxxfer_win;
3575 3512 size_t space_used;
3576 3513 uint_t sglwin;
3577 3514
3578 3515
3579 3516 sinfo = &dma->dp_sglinfo;
3580 3517
3581 3518 dma->dp_current_win = 0;
3582 3519 hp->dmai_nwin = 0;
3583 3520
3584 3521 /* If we don't need to do a partial, we only have one window */
3585 3522 if (!dma->dp_partial_required) {
3586 3523 dma->dp_max_win = 1;
3587 3524
3588 3525 /*
3589 3526 * we need multiple windows, need to figure out the worse case number
3590 3527 * of windows.
3591 3528 */
3592 3529 } else {
3593 3530 /*
3594 3531 * if we need windows because we need more copy buffer that
3595 3532 * we allow, the worse case number of windows we could need
3596 3533 * here would be (copybuf space required / copybuf space that
3597 3534 * we have) plus one for remainder, and plus 2 to handle the
3598 3535 * extra pages on the trim for the first and last pages of the
3599 3536 * buffer (a page is the minimum window size so under the right
3600 3537 * attr settings, you could have a window for each page).
3601 3538 * The last page will only be hit here if the size is not a
3602 3539 * multiple of the granularity (which theoretically shouldn't
3603 3540 * be the case but never has been enforced, so we could have
3604 3541 * broken things without it).
3605 3542 */
3606 3543 if (sinfo->si_copybuf_req > dma->dp_copybuf_size) {
3607 3544 ASSERT(dma->dp_copybuf_size > 0);
3608 3545 copybuf_win = (sinfo->si_copybuf_req /
3609 3546 dma->dp_copybuf_size) + 1 + 2;
3610 3547 } else {
3611 3548 copybuf_win = 0;
3612 3549 }
3613 3550
3614 3551 /*
3615 3552 * if we need windows because we have more cookies than the H/W
3616 3553 * can handle, the number of windows we would need here would
3617 3554 * be (cookie count / cookies count H/W supports minus 1[for
3618 3555 * trim]) plus one for remainder.
3619 3556 */
3620 3557 if ((unsigned)attr->dma_attr_sgllen < sinfo->si_sgl_size) {
3621 3558 sglwin = (sinfo->si_sgl_size /
3622 3559 (attr->dma_attr_sgllen - 1)) + 1;
3623 3560 } else {
3624 3561 sglwin = 0;
3625 3562 }
3626 3563
3627 3564 /*
3628 3565 * if we need windows because we're binding more memory than the
3629 3566 * H/W can transfer at once, the number of windows we would need
3630 3567 * here would be (xfer count / max xfer H/W supports) plus one
3631 3568 * for remainder, and plus 2 to handle the extra pages on the
3632 3569 * trim (see above comment about trim)
3633 3570 */
3634 3571 if (dmao->dmao_size > dma->dp_maxxfer) {
3635 3572 maxxfer_win = (dmao->dmao_size /
3636 3573 dma->dp_maxxfer) + 1 + 2;
3637 3574 } else {
3638 3575 maxxfer_win = 0;
3639 3576 }
3640 3577 dma->dp_max_win = copybuf_win + sglwin + maxxfer_win;
3641 3578 ASSERT(dma->dp_max_win > 0);
3642 3579 }
3643 3580 win_state_size = dma->dp_max_win * sizeof (rootnex_window_t);
3644 3581
3645 3582 /*
3646 3583 * Get space for window and potential copy buffer state. Before we
3647 3584 * go and allocate memory, see if we can get away with using what's
3648 3585 * left in the pre-allocted state or the dynamically allocated sgl.
3649 3586 */
3650 3587 space_used = (uintptr_t)(sinfo->si_sgl_size *
3651 3588 sizeof (ddi_dma_cookie_t));
3652 3589
3653 3590 /* if we dynamically allocated space for the cookies */
3654 3591 if (dma->dp_need_to_free_cookie) {
3655 3592 /* if we have more space in the pre-allocted buffer, use it */
3656 3593 ASSERT(space_used <= dma->dp_cookie_size);
3657 3594 if ((dma->dp_cookie_size - space_used) <=
3658 3595 rootnex_state->r_prealloc_size) {
3659 3596 state_available = rootnex_state->r_prealloc_size;
3660 3597 windowp = (rootnex_window_t *)dma->dp_prealloc_buffer;
3661 3598
3662 3599 /*
3663 3600 * else, we have more free space in the dynamically allocated
3664 3601 * buffer, i.e. the buffer wasn't worse case fragmented so we
3665 3602 * didn't need a lot of cookies.
3666 3603 */
3667 3604 } else {
3668 3605 state_available = dma->dp_cookie_size - space_used;
3669 3606 windowp = (rootnex_window_t *)
3670 3607 &dma->dp_cookies[sinfo->si_sgl_size];
3671 3608 }
3672 3609
3673 3610 /* we used the pre-alloced buffer */
3674 3611 } else {
3675 3612 ASSERT(space_used <= rootnex_state->r_prealloc_size);
3676 3613 state_available = rootnex_state->r_prealloc_size - space_used;
3677 3614 windowp = (rootnex_window_t *)
3678 3615 &dma->dp_cookies[sinfo->si_sgl_size];
3679 3616 }
3680 3617
3681 3618 /*
3682 3619 * figure out how much state we need to track the copy buffer. Add an
3683 3620 * addition 8 bytes for pointer alignemnt later.
3684 3621 */
3685 3622 if (dma->dp_copybuf_size > 0) {
3686 3623 copy_state_size = sinfo->si_max_pages *
3687 3624 sizeof (rootnex_pgmap_t);
3688 3625 } else {
3689 3626 copy_state_size = 0;
3690 3627 }
3691 3628 /* add an additional 8 bytes for pointer alignment */
3692 3629 space_needed = win_state_size + copy_state_size + 0x8;
3693 3630
3694 3631 /* if we have enough space already, use it */
3695 3632 if (state_available >= space_needed) {
3696 3633 dma->dp_window = windowp;
3697 3634 dma->dp_need_to_free_window = B_FALSE;
3698 3635
3699 3636 /* not enough space, need to allocate more. */
3700 3637 } else {
3701 3638 dma->dp_window = kmem_alloc(space_needed, kmflag);
3702 3639 if (dma->dp_window == NULL) {
3703 3640 return (DDI_DMA_NORESOURCES);
3704 3641 }
3705 3642 dma->dp_need_to_free_window = B_TRUE;
3706 3643 dma->dp_window_size = space_needed;
3707 3644 ROOTNEX_DPROBE2(rootnex__bind__sp__alloc, dev_info_t *,
3708 3645 dma->dp_dip, size_t, space_needed);
3709 3646 }
3710 3647
3711 3648 /*
3712 3649 * we allocate copy buffer state and window state at the same time.
3713 3650 * setup our copy buffer state pointers. Make sure it's aligned.
3714 3651 */
3715 3652 if (dma->dp_copybuf_size > 0) {
3716 3653 dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t)
3717 3654 &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7);
3718 3655
3719 3656 #if !defined(__amd64)
3720 3657 /*
3721 3658 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3722 3659 * false/NULL. Should be quicker to bzero vs loop and set.
3723 3660 */
3724 3661 bzero(dma->dp_pgmap, copy_state_size);
3725 3662 #endif
3726 3663 } else {
3727 3664 dma->dp_pgmap = NULL;
3728 3665 }
3729 3666
3730 3667 return (DDI_SUCCESS);
3731 3668 }
3732 3669
3733 3670
3734 3671 /*
3735 3672 * rootnex_teardown_copybuf()
3736 3673 * cleans up after rootnex_setup_copybuf()
3737 3674 */
3738 3675 static void
3739 3676 rootnex_teardown_copybuf(rootnex_dma_t *dma)
3740 3677 {
3741 3678 #if !defined(__amd64)
3742 3679 int i;
3743 3680
3744 3681 /*
3745 3682 * if we allocated kernel heap VMEM space, go through all the pages and
3746 3683 * map out any of the ones that we're mapped into the kernel heap VMEM
3747 3684 * arena. Then free the VMEM space.
3748 3685 */
3749 3686 if (dma->dp_kva != NULL) {
3750 3687 for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) {
3751 3688 if (dma->dp_pgmap[i].pm_mapped) {
3752 3689 hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr,
3753 3690 MMU_PAGESIZE, HAT_UNLOAD);
3754 3691 dma->dp_pgmap[i].pm_mapped = B_FALSE;
3755 3692 }
3756 3693 }
3757 3694
3758 3695 vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size);
3759 3696 }
3760 3697
3761 3698 #endif
3762 3699
3763 3700 /* if we allocated a copy buffer, free it */
3764 3701 if (dma->dp_cbaddr != NULL) {
3765 3702 i_ddi_mem_free(dma->dp_cbaddr, NULL);
3766 3703 }
3767 3704 }
3768 3705
3769 3706
3770 3707 /*
3771 3708 * rootnex_teardown_windows()
3772 3709 * cleans up after rootnex_setup_windows()
3773 3710 */
3774 3711 static void
3775 3712 rootnex_teardown_windows(rootnex_dma_t *dma)
3776 3713 {
3777 3714 /*
3778 3715 * if we had to allocate window state on the last bind (because we
3779 3716 * didn't have enough pre-allocated space in the handle), free it.
3780 3717 */
3781 3718 if (dma->dp_need_to_free_window) {
3782 3719 kmem_free(dma->dp_window, dma->dp_window_size);
3783 3720 }
3784 3721 }
3785 3722
3786 3723
3787 3724 /*
3788 3725 * rootnex_init_win()
3789 3726 * Called in bind slow path during creation of a new window. Initializes
3790 3727 * window state to default values.
3791 3728 */
3792 3729 /*ARGSUSED*/
3793 3730 static void
3794 3731 rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
3795 3732 rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset)
3796 3733 {
3797 3734 hp->dmai_nwin++;
3798 3735 window->wd_dosync = B_FALSE;
3799 3736 window->wd_offset = cur_offset;
3800 3737 window->wd_size = 0;
3801 3738 window->wd_first_cookie = cookie;
3802 3739 window->wd_cookie_cnt = 0;
3803 3740 window->wd_trim.tr_trim_first = B_FALSE;
3804 3741 window->wd_trim.tr_trim_last = B_FALSE;
3805 3742 window->wd_trim.tr_first_copybuf_win = B_FALSE;
3806 3743 window->wd_trim.tr_last_copybuf_win = B_FALSE;
3807 3744 #if !defined(__amd64)
3808 3745 window->wd_remap_copybuf = dma->dp_cb_remaping;
3809 3746 #endif
3810 3747 }
3811 3748
3812 3749
3813 3750 /*
3814 3751 * rootnex_setup_cookie()
3815 3752 * Called in the bind slow path when the sgl uses the copy buffer. If any of
3816 3753 * the sgl uses the copy buffer, we need to go through each cookie, figure
3817 3754 * out if it uses the copy buffer, and if it does, save away everything we'll
3818 3755 * need during sync.
3819 3756 */
3820 3757 static void
3821 3758 rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma,
3822 3759 ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used,
3823 3760 page_t **cur_pp)
3824 3761 {
3825 3762 boolean_t copybuf_sz_power_2;
3826 3763 rootnex_sglinfo_t *sinfo;
3827 3764 paddr_t paddr;
3828 3765 uint_t pidx;
3829 3766 uint_t pcnt;
3830 3767 off_t poff;
3831 3768 #if defined(__amd64)
3832 3769 pfn_t pfn;
3833 3770 #else
3834 3771 page_t **pplist;
3835 3772 #endif
3836 3773
3837 3774 ASSERT(dmar_object->dmao_type != DMA_OTYP_DVADDR);
3838 3775
3839 3776 sinfo = &dma->dp_sglinfo;
3840 3777
3841 3778 /*
3842 3779 * Calculate the page index relative to the start of the buffer. The
3843 3780 * index to the current page for our buffer is the offset into the
3844 3781 * first page of the buffer plus our current offset into the buffer
3845 3782 * itself, shifted of course...
3846 3783 */
3847 3784 pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT;
3848 3785 ASSERT(pidx < sinfo->si_max_pages);
3849 3786
3850 3787 /* if this cookie uses the copy buffer */
3851 3788 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
3852 3789 /*
3853 3790 * NOTE: we know that since this cookie uses the copy buffer, it
3854 3791 * is <= MMU_PAGESIZE.
3855 3792 */
3856 3793
3857 3794 /*
3858 3795 * get the offset into the page. For the 64-bit kernel, get the
3859 3796 * pfn which we'll use with seg kpm.
3860 3797 */
3861 3798 poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
3862 3799 #if defined(__amd64)
3863 3800 /* mfn_to_pfn() is a NOP on i86pc */
3864 3801 pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT);
3865 3802 #endif /* __amd64 */
3866 3803
3867 3804 /* figure out if the copybuf size is a power of 2 */
3868 3805 if (!ISP2(dma->dp_copybuf_size)) {
3869 3806 copybuf_sz_power_2 = B_FALSE;
3870 3807 } else {
3871 3808 copybuf_sz_power_2 = B_TRUE;
3872 3809 }
3873 3810
3874 3811 /* This page uses the copy buffer */
3875 3812 dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE;
3876 3813
3877 3814 /*
3878 3815 * save the copy buffer KVA that we'll use with this page.
3879 3816 * if we still fit within the copybuf, it's a simple add.
3880 3817 * otherwise, we need to wrap over using & or % accordingly.
3881 3818 */
3882 3819 if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) {
3883 3820 dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr +
3884 3821 *copybuf_used;
3885 3822 } else {
3886 3823 if (copybuf_sz_power_2) {
3887 3824 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3888 3825 (uintptr_t)dma->dp_cbaddr +
3889 3826 (*copybuf_used &
3890 3827 (dma->dp_copybuf_size - 1)));
3891 3828 } else {
3892 3829 dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)(
3893 3830 (uintptr_t)dma->dp_cbaddr +
3894 3831 (*copybuf_used % dma->dp_copybuf_size));
3895 3832 }
3896 3833 }
3897 3834
3898 3835 /*
3899 3836 * over write the cookie physical address with the address of
3900 3837 * the physical address of the copy buffer page that we will
3901 3838 * use.
3902 3839 */
3903 3840 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
3904 3841 dma->dp_pgmap[pidx].pm_cbaddr)) + poff;
3905 3842
3906 3843 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(paddr);
3907 3844
3908 3845 /* if we have a kernel VA, it's easy, just save that address */
3909 3846 if ((dmar_object->dmao_type != DMA_OTYP_PAGES) &&
3910 3847 (sinfo->si_asp == &kas)) {
3911 3848 /*
3912 3849 * save away the page aligned virtual address of the
3913 3850 * driver buffer. Offsets are handled in the sync code.
3914 3851 */
3915 3852 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t)
3916 3853 dmar_object->dmao_obj.virt_obj.v_addr + cur_offset)
3917 3854 & MMU_PAGEMASK);
3918 3855 #if !defined(__amd64)
3919 3856 /*
3920 3857 * we didn't need to, and will never need to map this
3921 3858 * page.
3922 3859 */
3923 3860 dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
3924 3861 #endif
3925 3862
3926 3863 /* we don't have a kernel VA. We need one for the bcopy. */
3927 3864 } else {
3928 3865 #if defined(__amd64)
3929 3866 /*
3930 3867 * for the 64-bit kernel, it's easy. We use seg kpm to
3931 3868 * get a Kernel VA for the corresponding pfn.
3932 3869 */
3933 3870 dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn);
3934 3871 #else
3935 3872 /*
3936 3873 * for the 32-bit kernel, this is a pain. First we'll
3937 3874 * save away the page_t or user VA for this page. This
3938 3875 * is needed in rootnex_dma_win() when we switch to a
3939 3876 * new window which requires us to re-map the copy
3940 3877 * buffer.
3941 3878 */
3942 3879 pplist = dmar_object->dmao_obj.virt_obj.v_priv;
3943 3880 if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
3944 3881 dma->dp_pgmap[pidx].pm_pp = *cur_pp;
3945 3882 dma->dp_pgmap[pidx].pm_vaddr = NULL;
3946 3883 } else if (pplist != NULL) {
3947 3884 dma->dp_pgmap[pidx].pm_pp = pplist[pidx];
3948 3885 dma->dp_pgmap[pidx].pm_vaddr = NULL;
3949 3886 } else {
3950 3887 dma->dp_pgmap[pidx].pm_pp = NULL;
3951 3888 dma->dp_pgmap[pidx].pm_vaddr = (caddr_t)
3952 3889 (((uintptr_t)
3953 3890 dmar_object->dmao_obj.virt_obj.v_addr +
3954 3891 cur_offset) & MMU_PAGEMASK);
3955 3892 }
3956 3893
3957 3894 /*
3958 3895 * save away the page aligned virtual address which was
3959 3896 * allocated from the kernel heap arena (taking into
3960 3897 * account if we need more copy buffer than we alloced
3961 3898 * and use multiple windows to handle this, i.e. &,%).
3962 3899 * NOTE: there isn't and physical memory backing up this
3963 3900 * virtual address space currently.
3964 3901 */
3965 3902 if ((*copybuf_used + MMU_PAGESIZE) <=
3966 3903 dma->dp_copybuf_size) {
3967 3904 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3968 3905 (((uintptr_t)dma->dp_kva + *copybuf_used) &
3969 3906 MMU_PAGEMASK);
3970 3907 } else {
3971 3908 if (copybuf_sz_power_2) {
3972 3909 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3973 3910 (((uintptr_t)dma->dp_kva +
3974 3911 (*copybuf_used &
3975 3912 (dma->dp_copybuf_size - 1))) &
3976 3913 MMU_PAGEMASK);
3977 3914 } else {
3978 3915 dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)
3979 3916 (((uintptr_t)dma->dp_kva +
3980 3917 (*copybuf_used %
3981 3918 dma->dp_copybuf_size)) &
3982 3919 MMU_PAGEMASK);
3983 3920 }
3984 3921 }
3985 3922
3986 3923 /*
3987 3924 * if we haven't used up the available copy buffer yet,
3988 3925 * map the kva to the physical page.
3989 3926 */
3990 3927 if (!dma->dp_cb_remaping && ((*copybuf_used +
3991 3928 MMU_PAGESIZE) <= dma->dp_copybuf_size)) {
3992 3929 dma->dp_pgmap[pidx].pm_mapped = B_TRUE;
3993 3930 if (dma->dp_pgmap[pidx].pm_pp != NULL) {
3994 3931 i86_pp_map(dma->dp_pgmap[pidx].pm_pp,
3995 3932 dma->dp_pgmap[pidx].pm_kaddr);
3996 3933 } else {
3997 3934 i86_va_map(dma->dp_pgmap[pidx].pm_vaddr,
3998 3935 sinfo->si_asp,
3999 3936 dma->dp_pgmap[pidx].pm_kaddr);
4000 3937 }
4001 3938
4002 3939 /*
4003 3940 * we've used up the available copy buffer, this page
4004 3941 * will have to be mapped during rootnex_dma_win() when
4005 3942 * we switch to a new window which requires a re-map
4006 3943 * the copy buffer. (32-bit kernel only)
4007 3944 */
4008 3945 } else {
4009 3946 dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4010 3947 }
4011 3948 #endif
4012 3949 /* go to the next page_t */
4013 3950 if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
4014 3951 *cur_pp = (*cur_pp)->p_next;
4015 3952 }
4016 3953 }
4017 3954
4018 3955 /* add to the copy buffer count */
4019 3956 *copybuf_used += MMU_PAGESIZE;
4020 3957
4021 3958 /*
4022 3959 * This cookie doesn't use the copy buffer. Walk through the pages this
4023 3960 * cookie occupies to reflect this.
4024 3961 */
4025 3962 } else {
4026 3963 /*
4027 3964 * figure out how many pages the cookie occupies. We need to
4028 3965 * use the original page offset of the buffer and the cookies
4029 3966 * offset in the buffer to do this.
4030 3967 */
4031 3968 poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET;
4032 3969 pcnt = mmu_btopr(cookie->dmac_size + poff);
4033 3970
4034 3971 while (pcnt > 0) {
4035 3972 #if !defined(__amd64)
4036 3973 /*
4037 3974 * the 32-bit kernel doesn't have seg kpm, so we need
4038 3975 * to map in the driver buffer (if it didn't come down
4039 3976 * with a kernel VA) on the fly. Since this page doesn't
4040 3977 * use the copy buffer, it's not, or will it ever, have
4041 3978 * to be mapped in.
4042 3979 */
4043 3980 dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4044 3981 #endif
4045 3982 dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE;
4046 3983
4047 3984 /*
4048 3985 * we need to update pidx and cur_pp or we'll loose
4049 3986 * track of where we are.
4050 3987 */
4051 3988 if (dmar_object->dmao_type == DMA_OTYP_PAGES) {
4052 3989 *cur_pp = (*cur_pp)->p_next;
4053 3990 }
4054 3991 pidx++;
4055 3992 pcnt--;
4056 3993 }
4057 3994 }
4058 3995 }
4059 3996
4060 3997
4061 3998 /*
4062 3999 * rootnex_sgllen_window_boundary()
4063 4000 * Called in the bind slow path when the next cookie causes us to exceed (in
4064 4001 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4065 4002 * length supported by the DMA H/W.
4066 4003 */
4067 4004 static int
4068 4005 rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4069 4006 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr,
4070 4007 off_t cur_offset)
4071 4008 {
4072 4009 off_t new_offset;
4073 4010 size_t trim_sz;
4074 4011 off_t coffset;
4075 4012
4076 4013
4077 4014 /*
4078 4015 * if we know we'll never have to trim, it's pretty easy. Just move to
4079 4016 * the next window and init it. We're done.
4080 4017 */
4081 4018 if (!dma->dp_trim_required) {
4082 4019 (*windowp)++;
4083 4020 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4084 4021 (*windowp)->wd_cookie_cnt++;
4085 4022 (*windowp)->wd_size = cookie->dmac_size;
4086 4023 return (DDI_SUCCESS);
4087 4024 }
4088 4025
4089 4026 /* figure out how much we need to trim from the window */
4090 4027 ASSERT(attr->dma_attr_granular != 0);
4091 4028 if (dma->dp_granularity_power_2) {
4092 4029 trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1);
4093 4030 } else {
4094 4031 trim_sz = (*windowp)->wd_size % attr->dma_attr_granular;
4095 4032 }
4096 4033
4097 4034 /* The window's a whole multiple of granularity. We're done */
4098 4035 if (trim_sz == 0) {
4099 4036 (*windowp)++;
4100 4037 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4101 4038 (*windowp)->wd_cookie_cnt++;
4102 4039 (*windowp)->wd_size = cookie->dmac_size;
4103 4040 return (DDI_SUCCESS);
4104 4041 }
4105 4042
4106 4043 /*
4107 4044 * The window's not a whole multiple of granularity, since we know this
4108 4045 * is due to the sgllen, we need to go back to the last cookie and trim
4109 4046 * that one, add the left over part of the old cookie into the new
4110 4047 * window, and then add in the new cookie into the new window.
4111 4048 */
4112 4049
4113 4050 /*
4114 4051 * make sure the driver isn't making us do something bad... Trimming and
4115 4052 * sgllen == 1 don't go together.
4116 4053 */
4117 4054 if (attr->dma_attr_sgllen == 1) {
4118 4055 return (DDI_DMA_NOMAPPING);
4119 4056 }
4120 4057
4121 4058 /*
4122 4059 * first, setup the current window to account for the trim. Need to go
4123 4060 * back to the last cookie for this.
4124 4061 */
4125 4062 cookie--;
4126 4063 (*windowp)->wd_trim.tr_trim_last = B_TRUE;
4127 4064 (*windowp)->wd_trim.tr_last_cookie = cookie;
4128 4065 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4129 4066 ASSERT(cookie->dmac_size > trim_sz);
4130 4067 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4131 4068 (*windowp)->wd_size -= trim_sz;
4132 4069
4133 4070 /* save the buffer offsets for the next window */
4134 4071 coffset = cookie->dmac_size - trim_sz;
4135 4072 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4136 4073
4137 4074 /*
4138 4075 * set this now in case this is the first window. all other cases are
4139 4076 * set in dma_win()
4140 4077 */
4141 4078 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4142 4079
4143 4080 /*
4144 4081 * initialize the next window using what's left over in the previous
4145 4082 * cookie.
4146 4083 */
4147 4084 (*windowp)++;
4148 4085 rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4149 4086 (*windowp)->wd_cookie_cnt++;
4150 4087 (*windowp)->wd_trim.tr_trim_first = B_TRUE;
4151 4088 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
4152 4089 (*windowp)->wd_trim.tr_first_size = trim_sz;
4153 4090 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4154 4091 (*windowp)->wd_dosync = B_TRUE;
4155 4092 }
4156 4093
4157 4094 /*
4158 4095 * now go back to the current cookie and add it to the new window. set
4159 4096 * the new window size to the what was left over from the previous
4160 4097 * cookie and what's in the current cookie.
4161 4098 */
4162 4099 cookie++;
4163 4100 (*windowp)->wd_cookie_cnt++;
4164 4101 (*windowp)->wd_size = trim_sz + cookie->dmac_size;
4165 4102
4166 4103 /*
4167 4104 * trim plus the next cookie could put us over maxxfer (a cookie can be
4168 4105 * a max size of maxxfer). Handle that case.
4169 4106 */
4170 4107 if ((*windowp)->wd_size > dma->dp_maxxfer) {
4171 4108 /*
4172 4109 * maxxfer is already a whole multiple of granularity, and this
4173 4110 * trim will be <= the previous trim (since a cookie can't be
4174 4111 * larger than maxxfer). Make things simple here.
4175 4112 */
4176 4113 trim_sz = (*windowp)->wd_size - dma->dp_maxxfer;
4177 4114 (*windowp)->wd_trim.tr_trim_last = B_TRUE;
4178 4115 (*windowp)->wd_trim.tr_last_cookie = cookie;
4179 4116 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4180 4117 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4181 4118 (*windowp)->wd_size -= trim_sz;
4182 4119 ASSERT((*windowp)->wd_size == dma->dp_maxxfer);
4183 4120
4184 4121 /* save the buffer offsets for the next window */
4185 4122 coffset = cookie->dmac_size - trim_sz;
4186 4123 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4187 4124
4188 4125 /* setup the next window */
4189 4126 (*windowp)++;
4190 4127 rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4191 4128 (*windowp)->wd_cookie_cnt++;
4192 4129 (*windowp)->wd_trim.tr_trim_first = B_TRUE;
4193 4130 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4194 4131 coffset;
4195 4132 (*windowp)->wd_trim.tr_first_size = trim_sz;
4196 4133 }
4197 4134
4198 4135 return (DDI_SUCCESS);
4199 4136 }
4200 4137
4201 4138
4202 4139 /*
4203 4140 * rootnex_copybuf_window_boundary()
4204 4141 * Called in bind slowpath when we get to a window boundary because we used
4205 4142 * up all the copy buffer that we have.
4206 4143 */
4207 4144 static int
4208 4145 rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4209 4146 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset,
4210 4147 size_t *copybuf_used)
4211 4148 {
4212 4149 rootnex_sglinfo_t *sinfo;
4213 4150 off_t new_offset;
4214 4151 size_t trim_sz;
4215 4152 paddr_t paddr;
4216 4153 off_t coffset;
4217 4154 uint_t pidx;
4218 4155 off_t poff;
4219 4156
4220 4157
4221 4158 sinfo = &dma->dp_sglinfo;
4222 4159
4223 4160 /*
4224 4161 * the copy buffer should be a whole multiple of page size. We know that
4225 4162 * this cookie is <= MMU_PAGESIZE.
4226 4163 */
4227 4164 ASSERT(cookie->dmac_size <= MMU_PAGESIZE);
4228 4165
4229 4166 /*
4230 4167 * from now on, all new windows in this bind need to be re-mapped during
4231 4168 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4232 4169 * space...
4233 4170 */
4234 4171 #if !defined(__amd64)
4235 4172 dma->dp_cb_remaping = B_TRUE;
4236 4173 #endif
4237 4174
4238 4175 /* reset copybuf used */
4239 4176 *copybuf_used = 0;
4240 4177
4241 4178 /*
4242 4179 * if we don't have to trim (since granularity is set to 1), go to the
4243 4180 * next window and add the current cookie to it. We know the current
4244 4181 * cookie uses the copy buffer since we're in this code path.
4245 4182 */
4246 4183 if (!dma->dp_trim_required) {
4247 4184 (*windowp)++;
4248 4185 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4249 4186
4250 4187 /* Add this cookie to the new window */
4251 4188 (*windowp)->wd_cookie_cnt++;
4252 4189 (*windowp)->wd_size += cookie->dmac_size;
4253 4190 *copybuf_used += MMU_PAGESIZE;
4254 4191 return (DDI_SUCCESS);
4255 4192 }
4256 4193
4257 4194 /*
4258 4195 * *** may need to trim, figure it out.
4259 4196 */
4260 4197
4261 4198 /* figure out how much we need to trim from the window */
4262 4199 if (dma->dp_granularity_power_2) {
4263 4200 trim_sz = (*windowp)->wd_size &
4264 4201 (hp->dmai_attr.dma_attr_granular - 1);
4265 4202 } else {
4266 4203 trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular;
4267 4204 }
4268 4205
4269 4206 /*
4270 4207 * if the window's a whole multiple of granularity, go to the next
4271 4208 * window, init it, then add in the current cookie. We know the current
4272 4209 * cookie uses the copy buffer since we're in this code path.
4273 4210 */
4274 4211 if (trim_sz == 0) {
4275 4212 (*windowp)++;
4276 4213 rootnex_init_win(hp, dma, *windowp, cookie, cur_offset);
4277 4214
4278 4215 /* Add this cookie to the new window */
4279 4216 (*windowp)->wd_cookie_cnt++;
4280 4217 (*windowp)->wd_size += cookie->dmac_size;
4281 4218 *copybuf_used += MMU_PAGESIZE;
4282 4219 return (DDI_SUCCESS);
4283 4220 }
4284 4221
4285 4222 /*
4286 4223 * *** We figured it out, we definitly need to trim
4287 4224 */
4288 4225
4289 4226 /*
4290 4227 * make sure the driver isn't making us do something bad...
4291 4228 * Trimming and sgllen == 1 don't go together.
4292 4229 */
4293 4230 if (hp->dmai_attr.dma_attr_sgllen == 1) {
4294 4231 return (DDI_DMA_NOMAPPING);
4295 4232 }
4296 4233
4297 4234 /*
4298 4235 * first, setup the current window to account for the trim. Need to go
4299 4236 * back to the last cookie for this. Some of the last cookie will be in
4300 4237 * the current window, and some of the last cookie will be in the new
4301 4238 * window. All of the current cookie will be in the new window.
4302 4239 */
4303 4240 cookie--;
4304 4241 (*windowp)->wd_trim.tr_trim_last = B_TRUE;
4305 4242 (*windowp)->wd_trim.tr_last_cookie = cookie;
4306 4243 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4307 4244 ASSERT(cookie->dmac_size > trim_sz);
4308 4245 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4309 4246 (*windowp)->wd_size -= trim_sz;
4310 4247
4311 4248 /*
4312 4249 * we're trimming the last cookie (not the current cookie). So that
4313 4250 * last cookie may have or may not have been using the copy buffer (
4314 4251 * we know the cookie passed in uses the copy buffer since we're in
4315 4252 * this code path).
4316 4253 *
4317 4254 * If the last cookie doesn't use the copy buffer, nothing special to
4318 4255 * do. However, if it does uses the copy buffer, it will be both the
4319 4256 * last page in the current window and the first page in the next
4320 4257 * window. Since we are reusing the copy buffer (and KVA space on the
4321 4258 * 32-bit kernel), this page will use the end of the copy buffer in the
4322 4259 * current window, and the start of the copy buffer in the next window.
4323 4260 * Track that info... The cookie physical address was already set to
4324 4261 * the copy buffer physical address in setup_cookie..
4325 4262 */
4326 4263 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4327 4264 pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset +
4328 4265 (*windowp)->wd_size) >> MMU_PAGESHIFT;
4329 4266 (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE;
4330 4267 (*windowp)->wd_trim.tr_last_pidx = pidx;
4331 4268 (*windowp)->wd_trim.tr_last_cbaddr =
4332 4269 dma->dp_pgmap[pidx].pm_cbaddr;
4333 4270 #if !defined(__amd64)
4334 4271 (*windowp)->wd_trim.tr_last_kaddr =
4335 4272 dma->dp_pgmap[pidx].pm_kaddr;
4336 4273 #endif
4337 4274 }
4338 4275
4339 4276 /* save the buffer offsets for the next window */
4340 4277 coffset = cookie->dmac_size - trim_sz;
4341 4278 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4342 4279
4343 4280 /*
4344 4281 * set this now in case this is the first window. all other cases are
4345 4282 * set in dma_win()
4346 4283 */
4347 4284 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4348 4285
4349 4286 /*
4350 4287 * initialize the next window using what's left over in the previous
4351 4288 * cookie.
4352 4289 */
4353 4290 (*windowp)++;
4354 4291 rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4355 4292 (*windowp)->wd_cookie_cnt++;
4356 4293 (*windowp)->wd_trim.tr_trim_first = B_TRUE;
4357 4294 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset;
4358 4295 (*windowp)->wd_trim.tr_first_size = trim_sz;
4359 4296
4360 4297 /*
4361 4298 * again, we're tracking if the last cookie uses the copy buffer.
4362 4299 * read the comment above for more info on why we need to track
4363 4300 * additional state.
4364 4301 *
4365 4302 * For the first cookie in the new window, we need reset the physical
4366 4303 * address to DMA into to the start of the copy buffer plus any
4367 4304 * initial page offset which may be present.
4368 4305 */
4369 4306 if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) {
4370 4307 (*windowp)->wd_dosync = B_TRUE;
4371 4308 (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE;
4372 4309 (*windowp)->wd_trim.tr_first_pidx = pidx;
4373 4310 (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr;
4374 4311 poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET;
4375 4312
4376 4313 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) +
4377 4314 poff;
4378 4315 (*windowp)->wd_trim.tr_first_paddr =
4379 4316 ROOTNEX_PADDR_TO_RBASE(paddr);
4380 4317
4381 4318 #if !defined(__amd64)
4382 4319 (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva;
4383 4320 #endif
4384 4321 /* account for the cookie copybuf usage in the new window */
4385 4322 *copybuf_used += MMU_PAGESIZE;
4386 4323
4387 4324 /*
4388 4325 * every piece of code has to have a hack, and here is this
4389 4326 * ones :-)
4390 4327 *
4391 4328 * There is a complex interaction between setup_cookie and the
4392 4329 * copybuf window boundary. The complexity had to be in either
4393 4330 * the maxxfer window, or the copybuf window, and I chose the
4394 4331 * copybuf code.
4395 4332 *
4396 4333 * So in this code path, we have taken the last cookie,
4397 4334 * virtually broken it in half due to the trim, and it happens
4398 4335 * to use the copybuf which further complicates life. At the
4399 4336 * same time, we have already setup the current cookie, which
4400 4337 * is now wrong. More background info: the current cookie uses
4401 4338 * the copybuf, so it is only a page long max. So we need to
4402 4339 * fix the current cookies copy buffer address, physical
4403 4340 * address, and kva for the 32-bit kernel. We due this by
4404 4341 * bumping them by page size (of course, we can't due this on
4405 4342 * the physical address since the copy buffer may not be
4406 4343 * physically contiguous).
4407 4344 */
4408 4345 cookie++;
4409 4346 dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE;
4410 4347 poff = cookie->dmac_laddress & MMU_PAGEOFFSET;
4411 4348
4412 4349 paddr = pfn_to_pa(hat_getpfnum(kas.a_hat,
4413 4350 dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff;
4414 4351 cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(paddr);
4415 4352
4416 4353 #if !defined(__amd64)
4417 4354 ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE);
4418 4355 dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE;
4419 4356 #endif
4420 4357 } else {
4421 4358 /* go back to the current cookie */
4422 4359 cookie++;
4423 4360 }
4424 4361
4425 4362 /*
4426 4363 * add the current cookie to the new window. set the new window size to
4427 4364 * the what was left over from the previous cookie and what's in the
4428 4365 * current cookie.
4429 4366 */
4430 4367 (*windowp)->wd_cookie_cnt++;
4431 4368 (*windowp)->wd_size = trim_sz + cookie->dmac_size;
4432 4369 ASSERT((*windowp)->wd_size < dma->dp_maxxfer);
4433 4370
4434 4371 /*
4435 4372 * we know that the cookie passed in always uses the copy buffer. We
4436 4373 * wouldn't be here if it didn't.
4437 4374 */
4438 4375 *copybuf_used += MMU_PAGESIZE;
4439 4376
4440 4377 return (DDI_SUCCESS);
4441 4378 }
4442 4379
4443 4380
4444 4381 /*
4445 4382 * rootnex_maxxfer_window_boundary()
4446 4383 * Called in bind slowpath when we get to a window boundary because we will
4447 4384 * go over maxxfer.
4448 4385 */
4449 4386 static int
4450 4387 rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma,
4451 4388 rootnex_window_t **windowp, ddi_dma_cookie_t *cookie)
4452 4389 {
4453 4390 size_t dmac_size;
4454 4391 off_t new_offset;
4455 4392 size_t trim_sz;
4456 4393 off_t coffset;
4457 4394
4458 4395
4459 4396 /*
4460 4397 * calculate how much we have to trim off of the current cookie to equal
4461 4398 * maxxfer. We don't have to account for granularity here since our
4462 4399 * maxxfer already takes that into account.
4463 4400 */
4464 4401 trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer;
4465 4402 ASSERT(trim_sz <= cookie->dmac_size);
4466 4403 ASSERT(trim_sz <= dma->dp_maxxfer);
4467 4404
4468 4405 /* save cookie size since we need it later and we might change it */
4469 4406 dmac_size = cookie->dmac_size;
4470 4407
4471 4408 /*
4472 4409 * if we're not trimming the entire cookie, setup the current window to
4473 4410 * account for the trim.
4474 4411 */
4475 4412 if (trim_sz < cookie->dmac_size) {
4476 4413 (*windowp)->wd_cookie_cnt++;
4477 4414 (*windowp)->wd_trim.tr_trim_last = B_TRUE;
4478 4415 (*windowp)->wd_trim.tr_last_cookie = cookie;
4479 4416 (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress;
4480 4417 (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz;
4481 4418 (*windowp)->wd_size = dma->dp_maxxfer;
4482 4419
4483 4420 /*
4484 4421 * set the adjusted cookie size now in case this is the first
4485 4422 * window. All other windows are taken care of in get win
4486 4423 */
4487 4424 cookie->dmac_size = (*windowp)->wd_trim.tr_last_size;
4488 4425 }
4489 4426
4490 4427 /*
4491 4428 * coffset is the current offset within the cookie, new_offset is the
4492 4429 * current offset with the entire buffer.
4493 4430 */
4494 4431 coffset = dmac_size - trim_sz;
4495 4432 new_offset = (*windowp)->wd_offset + (*windowp)->wd_size;
4496 4433
4497 4434 /* initialize the next window */
4498 4435 (*windowp)++;
4499 4436 rootnex_init_win(hp, dma, *windowp, cookie, new_offset);
4500 4437 (*windowp)->wd_cookie_cnt++;
4501 4438 (*windowp)->wd_size = trim_sz;
4502 4439 if (trim_sz < dmac_size) {
4503 4440 (*windowp)->wd_trim.tr_trim_first = B_TRUE;
4504 4441 (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress +
4505 4442 coffset;
4506 4443 (*windowp)->wd_trim.tr_first_size = trim_sz;
4507 4444 }
4508 4445
4509 4446 return (DDI_SUCCESS);
4510 4447 }
4511 4448
4512 4449
4513 4450 /*ARGSUSED*/
4514 4451 static int
4515 4452 rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4516 4453 off_t off, size_t len, uint_t cache_flags)
4517 4454 {
4518 4455 rootnex_sglinfo_t *sinfo;
4519 4456 rootnex_pgmap_t *cbpage;
4520 4457 rootnex_window_t *win;
4521 4458 ddi_dma_impl_t *hp;
4522 4459 rootnex_dma_t *dma;
4523 4460 caddr_t fromaddr;
4524 4461 caddr_t toaddr;
4525 4462 uint_t psize;
4526 4463 off_t offset;
4527 4464 uint_t pidx;
4528 4465 size_t size;
4529 4466 off_t poff;
4530 4467 int e;
4531 4468
4532 4469
4533 4470 hp = (ddi_dma_impl_t *)handle;
4534 4471 dma = (rootnex_dma_t *)hp->dmai_private;
4535 4472 sinfo = &dma->dp_sglinfo;
4536 4473
4537 4474 /*
4538 4475 * if we don't have any windows, we don't need to sync. A copybuf
4539 4476 * will cause us to have at least one window.
4540 4477 */
4541 4478 if (dma->dp_window == NULL) {
4542 4479 return (DDI_SUCCESS);
4543 4480 }
4544 4481
4545 4482 /* This window may not need to be sync'd */
4546 4483 win = &dma->dp_window[dma->dp_current_win];
4547 4484 if (!win->wd_dosync) {
4548 4485 return (DDI_SUCCESS);
4549 4486 }
4550 4487
4551 4488 /* handle off and len special cases */
4552 4489 if ((off == 0) || (rootnex_sync_ignore_params)) {
4553 4490 offset = win->wd_offset;
4554 4491 } else {
4555 4492 offset = off;
4556 4493 }
4557 4494 if ((len == 0) || (rootnex_sync_ignore_params)) {
4558 4495 size = win->wd_size;
4559 4496 } else {
4560 4497 size = len;
4561 4498 }
4562 4499
4563 4500 /* check the sync args to make sure they make a little sense */
4564 4501 if (rootnex_sync_check_parms) {
4565 4502 e = rootnex_valid_sync_parms(hp, win, offset, size,
4566 4503 cache_flags);
4567 4504 if (e != DDI_SUCCESS) {
4568 4505 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]);
4569 4506 return (DDI_FAILURE);
4570 4507 }
4571 4508 }
4572 4509
4573 4510 /*
4574 4511 * special case the first page to handle the offset into the page. The
4575 4512 * offset to the current page for our buffer is the offset into the
4576 4513 * first page of the buffer plus our current offset into the buffer
4577 4514 * itself, masked of course.
4578 4515 */
4579 4516 poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET;
4580 4517 psize = MIN((MMU_PAGESIZE - poff), size);
4581 4518
4582 4519 /* go through all the pages that we want to sync */
4583 4520 while (size > 0) {
4584 4521 /*
4585 4522 * Calculate the page index relative to the start of the buffer.
4586 4523 * The index to the current page for our buffer is the offset
4587 4524 * into the first page of the buffer plus our current offset
4588 4525 * into the buffer itself, shifted of course...
4589 4526 */
4590 4527 pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT;
4591 4528 ASSERT(pidx < sinfo->si_max_pages);
4592 4529
4593 4530 /*
4594 4531 * if this page uses the copy buffer, we need to sync it,
4595 4532 * otherwise, go on to the next page.
4596 4533 */
4597 4534 cbpage = &dma->dp_pgmap[pidx];
4598 4535 ASSERT((cbpage->pm_uses_copybuf == B_TRUE) ||
4599 4536 (cbpage->pm_uses_copybuf == B_FALSE));
4600 4537 if (cbpage->pm_uses_copybuf) {
4601 4538 /* cbaddr and kaddr should be page aligned */
4602 4539 ASSERT(((uintptr_t)cbpage->pm_cbaddr &
4603 4540 MMU_PAGEOFFSET) == 0);
4604 4541 ASSERT(((uintptr_t)cbpage->pm_kaddr &
4605 4542 MMU_PAGEOFFSET) == 0);
4606 4543
4607 4544 /*
4608 4545 * if we're copying for the device, we are going to
4609 4546 * copy from the drivers buffer and to the rootnex
4610 4547 * allocated copy buffer.
4611 4548 */
4612 4549 if (cache_flags == DDI_DMA_SYNC_FORDEV) {
4613 4550 fromaddr = cbpage->pm_kaddr + poff;
4614 4551 toaddr = cbpage->pm_cbaddr + poff;
4615 4552 ROOTNEX_DPROBE2(rootnex__sync__dev,
4616 4553 dev_info_t *, dma->dp_dip, size_t, psize);
4617 4554
4618 4555 /*
4619 4556 * if we're copying for the cpu/kernel, we are going to
4620 4557 * copy from the rootnex allocated copy buffer to the
4621 4558 * drivers buffer.
4622 4559 */
4623 4560 } else {
4624 4561 fromaddr = cbpage->pm_cbaddr + poff;
4625 4562 toaddr = cbpage->pm_kaddr + poff;
4626 4563 ROOTNEX_DPROBE2(rootnex__sync__cpu,
4627 4564 dev_info_t *, dma->dp_dip, size_t, psize);
4628 4565 }
4629 4566
4630 4567 bcopy(fromaddr, toaddr, psize);
4631 4568 }
4632 4569
4633 4570 /*
4634 4571 * decrement size until we're done, update our offset into the
4635 4572 * buffer, and get the next page size.
4636 4573 */
4637 4574 size -= psize;
4638 4575 offset += psize;
4639 4576 psize = MIN(MMU_PAGESIZE, size);
4640 4577
4641 4578 /* page offset is zero for the rest of this loop */
4642 4579 poff = 0;
4643 4580 }
4644 4581
4645 4582 return (DDI_SUCCESS);
4646 4583 }
4647 4584
4648 4585 /*
4649 4586 * rootnex_dma_sync()
4650 4587 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4651 4588 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4652 4589 * is set, ddi_dma_sync() returns immediately passing back success.
4653 4590 */
4654 4591 /*ARGSUSED*/
4655 4592 static int
4656 4593 rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4657 4594 off_t off, size_t len, uint_t cache_flags)
4658 4595 {
4659 4596 #if defined(__amd64) && !defined(__xpv)
4660 4597 if (IOMMU_USED(rdip)) {
4661 4598 return (iommulib_nexdma_sync(dip, rdip, handle, off, len,
4662 4599 cache_flags));
4663 4600 }
4664 4601 #endif
4665 4602 return (rootnex_coredma_sync(dip, rdip, handle, off, len,
4666 4603 cache_flags));
4667 4604 }
4668 4605
4669 4606 /*
4670 4607 * rootnex_valid_sync_parms()
4671 4608 * checks the parameters passed to sync to verify they are correct.
4672 4609 */
4673 4610 static int
4674 4611 rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win,
4675 4612 off_t offset, size_t size, uint_t cache_flags)
4676 4613 {
4677 4614 off_t woffset;
4678 4615
4679 4616
4680 4617 /*
4681 4618 * the first part of the test to make sure the offset passed in is
4682 4619 * within the window.
4683 4620 */
4684 4621 if (offset < win->wd_offset) {
4685 4622 return (DDI_FAILURE);
4686 4623 }
4687 4624
4688 4625 /*
4689 4626 * second and last part of the test to make sure the offset and length
4690 4627 * passed in is within the window.
4691 4628 */
4692 4629 woffset = offset - win->wd_offset;
4693 4630 if ((woffset + size) > win->wd_size) {
4694 4631 return (DDI_FAILURE);
4695 4632 }
4696 4633
4697 4634 /*
4698 4635 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4699 4636 * be set too.
4700 4637 */
4701 4638 if ((cache_flags == DDI_DMA_SYNC_FORDEV) &&
4702 4639 (hp->dmai_rflags & DDI_DMA_WRITE)) {
4703 4640 return (DDI_SUCCESS);
4704 4641 }
4705 4642
4706 4643 /*
4707 4644 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4708 4645 * should be set. Also DDI_DMA_READ should be set in the flags.
4709 4646 */
4710 4647 if (((cache_flags == DDI_DMA_SYNC_FORCPU) ||
4711 4648 (cache_flags == DDI_DMA_SYNC_FORKERNEL)) &&
4712 4649 (hp->dmai_rflags & DDI_DMA_READ)) {
4713 4650 return (DDI_SUCCESS);
4714 4651 }
4715 4652
4716 4653 return (DDI_FAILURE);
4717 4654 }
4718 4655
4719 4656
4720 4657 /*ARGSUSED*/
4721 4658 static int
4722 4659 rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4723 4660 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4724 4661 uint_t *ccountp)
4725 4662 {
4726 4663 rootnex_window_t *window;
4727 4664 rootnex_trim_t *trim;
4728 4665 ddi_dma_impl_t *hp;
4729 4666 rootnex_dma_t *dma;
4730 4667 ddi_dma_obj_t *dmao;
4731 4668 #if !defined(__amd64)
4732 4669 rootnex_sglinfo_t *sinfo;
4733 4670 rootnex_pgmap_t *pmap;
4734 4671 uint_t pidx;
4735 4672 uint_t pcnt;
4736 4673 off_t poff;
4737 4674 int i;
4738 4675 #endif
4739 4676
4740 4677
4741 4678 hp = (ddi_dma_impl_t *)handle;
4742 4679 dma = (rootnex_dma_t *)hp->dmai_private;
4743 4680 #if !defined(__amd64)
4744 4681 sinfo = &dma->dp_sglinfo;
4745 4682 #endif
4746 4683
4747 4684 /* If we try and get a window which doesn't exist, return failure */
4748 4685 if (win >= hp->dmai_nwin) {
4749 4686 ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4750 4687 return (DDI_FAILURE);
4751 4688 }
4752 4689
4753 4690 dmao = dma->dp_dvma_used ? &dma->dp_dvma : &dma->dp_dma;
4754 4691
4755 4692 /*
4756 4693 * if we don't have any windows, and they're asking for the first
4757 4694 * window, setup the cookie pointer to the first cookie in the bind.
4758 4695 * setup our return values, then increment the cookie since we return
4759 4696 * the first cookie on the stack.
4760 4697 */
4761 4698 if (dma->dp_window == NULL) {
4762 4699 if (win != 0) {
4763 4700 ROOTNEX_DPROF_INC(
4764 4701 &rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]);
4765 4702 return (DDI_FAILURE);
4766 4703 }
4767 4704 hp->dmai_cookie = dma->dp_cookies;
4768 4705 *offp = 0;
4769 4706 *lenp = dmao->dmao_size;
4770 4707 *ccountp = dma->dp_sglinfo.si_sgl_size;
4771 4708 *cookiep = hp->dmai_cookie[0];
4772 4709 hp->dmai_cookie++;
4773 4710 return (DDI_SUCCESS);
4774 4711 }
4775 4712
4776 4713 /* sync the old window before moving on to the new one */
4777 4714 window = &dma->dp_window[dma->dp_current_win];
4778 4715 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) {
4779 4716 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4780 4717 DDI_DMA_SYNC_FORCPU);
4781 4718 }
4782 4719
4783 4720 #if !defined(__amd64)
4784 4721 /*
4785 4722 * before we move to the next window, if we need to re-map, unmap all
4786 4723 * the pages in this window.
4787 4724 */
4788 4725 if (dma->dp_cb_remaping) {
4789 4726 /*
4790 4727 * If we switch to this window again, we'll need to map in
4791 4728 * on the fly next time.
4792 4729 */
4793 4730 window->wd_remap_copybuf = B_TRUE;
4794 4731
4795 4732 /*
4796 4733 * calculate the page index into the buffer where this window
4797 4734 * starts, and the number of pages this window takes up.
4798 4735 */
4799 4736 pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4800 4737 MMU_PAGESHIFT;
4801 4738 poff = (sinfo->si_buf_offset + window->wd_offset) &
4802 4739 MMU_PAGEOFFSET;
4803 4740 pcnt = mmu_btopr(window->wd_size + poff);
4804 4741 ASSERT((pidx + pcnt) <= sinfo->si_max_pages);
4805 4742
4806 4743 /* unmap pages which are currently mapped in this window */
4807 4744 for (i = 0; i < pcnt; i++) {
4808 4745 if (dma->dp_pgmap[pidx].pm_mapped) {
4809 4746 hat_unload(kas.a_hat,
4810 4747 dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE,
4811 4748 HAT_UNLOAD);
4812 4749 dma->dp_pgmap[pidx].pm_mapped = B_FALSE;
4813 4750 }
4814 4751 pidx++;
4815 4752 }
4816 4753 }
4817 4754 #endif
4818 4755
4819 4756 /*
4820 4757 * Move to the new window.
4821 4758 * NOTE: current_win must be set for sync to work right
4822 4759 */
4823 4760 dma->dp_current_win = win;
4824 4761 window = &dma->dp_window[win];
4825 4762
4826 4763 /* if needed, adjust the first and/or last cookies for trim */
4827 4764 trim = &window->wd_trim;
4828 4765 if (trim->tr_trim_first) {
4829 4766 window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr;
4830 4767 window->wd_first_cookie->dmac_size = trim->tr_first_size;
4831 4768 #if !defined(__amd64)
4832 4769 window->wd_first_cookie->dmac_type =
4833 4770 (window->wd_first_cookie->dmac_type &
4834 4771 ROOTNEX_USES_COPYBUF) + window->wd_offset;
4835 4772 #endif
4836 4773 if (trim->tr_first_copybuf_win) {
4837 4774 dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr =
4838 4775 trim->tr_first_cbaddr;
4839 4776 #if !defined(__amd64)
4840 4777 dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr =
4841 4778 trim->tr_first_kaddr;
4842 4779 #endif
4843 4780 }
4844 4781 }
4845 4782 if (trim->tr_trim_last) {
4846 4783 trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr;
4847 4784 trim->tr_last_cookie->dmac_size = trim->tr_last_size;
4848 4785 if (trim->tr_last_copybuf_win) {
4849 4786 dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr =
4850 4787 trim->tr_last_cbaddr;
4851 4788 #if !defined(__amd64)
4852 4789 dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr =
4853 4790 trim->tr_last_kaddr;
4854 4791 #endif
4855 4792 }
4856 4793 }
4857 4794
4858 4795 /*
4859 4796 * setup the cookie pointer to the first cookie in the window. setup
4860 4797 * our return values, then increment the cookie since we return the
4861 4798 * first cookie on the stack.
4862 4799 */
4863 4800 hp->dmai_cookie = window->wd_first_cookie;
4864 4801 *offp = window->wd_offset;
4865 4802 *lenp = window->wd_size;
4866 4803 *ccountp = window->wd_cookie_cnt;
4867 4804 *cookiep = hp->dmai_cookie[0];
4868 4805 hp->dmai_cookie++;
4869 4806
4870 4807 #if !defined(__amd64)
4871 4808 /* re-map copybuf if required for this window */
4872 4809 if (dma->dp_cb_remaping) {
4873 4810 /*
4874 4811 * calculate the page index into the buffer where this
4875 4812 * window starts.
4876 4813 */
4877 4814 pidx = (sinfo->si_buf_offset + window->wd_offset) >>
4878 4815 MMU_PAGESHIFT;
4879 4816 ASSERT(pidx < sinfo->si_max_pages);
4880 4817
4881 4818 /*
4882 4819 * the first page can get unmapped if it's shared with the
4883 4820 * previous window. Even if the rest of this window is already
4884 4821 * mapped in, we need to still check this one.
4885 4822 */
4886 4823 pmap = &dma->dp_pgmap[pidx];
4887 4824 if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) {
4888 4825 if (pmap->pm_pp != NULL) {
4889 4826 pmap->pm_mapped = B_TRUE;
4890 4827 i86_pp_map(pmap->pm_pp, pmap->pm_kaddr);
4891 4828 } else if (pmap->pm_vaddr != NULL) {
4892 4829 pmap->pm_mapped = B_TRUE;
4893 4830 i86_va_map(pmap->pm_vaddr, sinfo->si_asp,
4894 4831 pmap->pm_kaddr);
4895 4832 }
4896 4833 }
4897 4834 pidx++;
4898 4835
4899 4836 /* map in the rest of the pages if required */
4900 4837 if (window->wd_remap_copybuf) {
4901 4838 window->wd_remap_copybuf = B_FALSE;
4902 4839
4903 4840 /* figure out many pages this window takes up */
4904 4841 poff = (sinfo->si_buf_offset + window->wd_offset) &
4905 4842 MMU_PAGEOFFSET;
4906 4843 pcnt = mmu_btopr(window->wd_size + poff);
4907 4844 ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages);
4908 4845
4909 4846 /* map pages which require it */
4910 4847 for (i = 1; i < pcnt; i++) {
4911 4848 pmap = &dma->dp_pgmap[pidx];
4912 4849 if (pmap->pm_uses_copybuf) {
4913 4850 ASSERT(pmap->pm_mapped == B_FALSE);
4914 4851 if (pmap->pm_pp != NULL) {
4915 4852 pmap->pm_mapped = B_TRUE;
4916 4853 i86_pp_map(pmap->pm_pp,
4917 4854 pmap->pm_kaddr);
4918 4855 } else if (pmap->pm_vaddr != NULL) {
4919 4856 pmap->pm_mapped = B_TRUE;
4920 4857 i86_va_map(pmap->pm_vaddr,
4921 4858 sinfo->si_asp,
4922 4859 pmap->pm_kaddr);
4923 4860 }
4924 4861 }
4925 4862 pidx++;
4926 4863 }
4927 4864 }
4928 4865 }
4929 4866 #endif
4930 4867
4931 4868 /* if the new window uses the copy buffer, sync it for the device */
4932 4869 if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) {
4933 4870 (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0,
4934 4871 DDI_DMA_SYNC_FORDEV);
4935 4872 }
4936 4873
4937 4874 return (DDI_SUCCESS);
4938 4875 }
4939 4876
4940 4877 /*
4941 4878 * rootnex_dma_win()
4942 4879 * called from ddi_dma_getwin()
4943 4880 */
4944 4881 /*ARGSUSED*/
4945 4882 static int
4946 4883 rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
4947 4884 uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
4948 4885 uint_t *ccountp)
4949 4886 {
4950 4887 #if defined(__amd64) && !defined(__xpv)
4951 4888 if (IOMMU_USED(rdip)) {
4952 4889 return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp,
4953 4890 cookiep, ccountp));
4954 4891 }
4955 4892 #endif
4956 4893
4957 4894 return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp,
4958 4895 cookiep, ccountp));
4959 4896 }
4960 4897
4961 4898 #if defined(__amd64) && !defined(__xpv)
4962 4899 /*ARGSUSED*/
4963 4900 static int
4964 4901 rootnex_coredma_hdl_setprivate(dev_info_t *dip, dev_info_t *rdip,
4965 4902 ddi_dma_handle_t handle, void *v)
4966 4903 {
4967 4904 ddi_dma_impl_t *hp;
4968 4905 rootnex_dma_t *dma;
4969 4906
4970 4907 hp = (ddi_dma_impl_t *)handle;
4971 4908 dma = (rootnex_dma_t *)hp->dmai_private;
4972 4909 dma->dp_iommu_private = v;
4973 4910
4974 4911 return (DDI_SUCCESS);
4975 4912 }
4976 4913
4977 4914 /*ARGSUSED*/
4978 4915 static void *
4979 4916 rootnex_coredma_hdl_getprivate(dev_info_t *dip, dev_info_t *rdip,
4980 4917 ddi_dma_handle_t handle)
4981 4918 {
4982 4919 ddi_dma_impl_t *hp;
4983 4920 rootnex_dma_t *dma;
4984 4921
4985 4922 hp = (ddi_dma_impl_t *)handle;
4986 4923 dma = (rootnex_dma_t *)hp->dmai_private;
4987 4924
4988 4925 return (dma->dp_iommu_private);
4989 4926 }
4990 4927 #endif
4991 4928
4992 4929 /*
4993 4930 * ************************
4994 4931 * obsoleted dma routines
4995 4932 * ************************
4996 4933 */
4997 4934
4998 4935 /*
4999 4936 * rootnex_dma_mctl()
5000 4937 *
5001 4938 * We don't support this legacy interface any more on x86.
5002 4939 */
5003 4940 /* ARGSUSED */
5004 4941 static int
5005 4942 rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
5006 4943 enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp,
5007 4944 uint_t cache_flags)
5008 4945 {
5009 4946 /*
5010 4947 * The only thing dma_mctl is usef for anymore is legacy SPARC
5011 4948 * dvma and sbus-specific routines.
5012 4949 */
5013 4950 return (DDI_FAILURE);
5014 4951 }
5015 4952
5016 4953 /*
5017 4954 * *********
5018 4955 * FMA Code
5019 4956 * *********
5020 4957 */
5021 4958
5022 4959 /*
5023 4960 * rootnex_fm_init()
5024 4961 * FMA init busop
5025 4962 */
5026 4963 /* ARGSUSED */
5027 4964 static int
5028 4965 rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap,
5029 4966 ddi_iblock_cookie_t *ibc)
5030 4967 {
5031 4968 *ibc = rootnex_state->r_err_ibc;
5032 4969
5033 4970 return (ddi_system_fmcap);
5034 4971 }
5035 4972
5036 4973 /*
5037 4974 * rootnex_dma_check()
5038 4975 * Function called after a dma fault occurred to find out whether the
5039 4976 * fault address is associated with a driver that is able to handle faults
5040 4977 * and recover from faults.
5041 4978 */
5042 4979 /* ARGSUSED */
5043 4980 static int
5044 4981 rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr,
5045 4982 const void *not_used)
5046 4983 {
5047 4984 rootnex_window_t *window;
5048 4985 uint64_t start_addr;
5049 4986 uint64_t fault_addr;
5050 4987 ddi_dma_impl_t *hp;
5051 4988 rootnex_dma_t *dma;
5052 4989 uint64_t end_addr;
5053 4990 size_t csize;
5054 4991 int i;
5055 4992 int j;
5056 4993
5057 4994
5058 4995 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5059 4996 hp = (ddi_dma_impl_t *)handle;
5060 4997 ASSERT(hp);
5061 4998
5062 4999 dma = (rootnex_dma_t *)hp->dmai_private;
5063 5000
5064 5001 /* Get the address that we need to search for */
5065 5002 fault_addr = *(uint64_t *)addr;
5066 5003
5067 5004 /*
5068 5005 * if we don't have any windows, we can just walk through all the
5069 5006 * cookies.
5070 5007 */
5071 5008 if (dma->dp_window == NULL) {
5072 5009 /* for each cookie */
5073 5010 for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) {
5074 5011 /*
5075 5012 * if the faulted address is within the physical address
5076 5013 * range of the cookie, return DDI_FM_NONFATAL.
5077 5014 */
5078 5015 if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) &&
5079 5016 (fault_addr <= (dma->dp_cookies[i].dmac_laddress +
5080 5017 dma->dp_cookies[i].dmac_size))) {
5081 5018 return (DDI_FM_NONFATAL);
5082 5019 }
5083 5020 }
5084 5021
5085 5022 /* fault_addr not within this DMA handle */
5086 5023 return (DDI_FM_UNKNOWN);
5087 5024 }
5088 5025
5089 5026 /* we have mutiple windows, walk through each window */
5090 5027 for (i = 0; i < hp->dmai_nwin; i++) {
5091 5028 window = &dma->dp_window[i];
5092 5029
5093 5030 /* Go through all the cookies in the window */
5094 5031 for (j = 0; j < window->wd_cookie_cnt; j++) {
5095 5032
5096 5033 start_addr = window->wd_first_cookie[j].dmac_laddress;
5097 5034 csize = window->wd_first_cookie[j].dmac_size;
5098 5035
5099 5036 /*
5100 5037 * if we are trimming the first cookie in the window,
5101 5038 * and this is the first cookie, adjust the start
5102 5039 * address and size of the cookie to account for the
5103 5040 * trim.
5104 5041 */
5105 5042 if (window->wd_trim.tr_trim_first && (j == 0)) {
5106 5043 start_addr = window->wd_trim.tr_first_paddr;
5107 5044 csize = window->wd_trim.tr_first_size;
5108 5045 }
5109 5046
5110 5047 /*
5111 5048 * if we are trimming the last cookie in the window,
5112 5049 * and this is the last cookie, adjust the start
5113 5050 * address and size of the cookie to account for the
5114 5051 * trim.
5115 5052 */
5116 5053 if (window->wd_trim.tr_trim_last &&
5117 5054 (j == (window->wd_cookie_cnt - 1))) {
5118 5055 start_addr = window->wd_trim.tr_last_paddr;
5119 5056 csize = window->wd_trim.tr_last_size;
5120 5057 }
5121 5058
5122 5059 end_addr = start_addr + csize;
5123 5060
5124 5061 /*
5125 5062 * if the faulted address is within the physical
5126 5063 * address of the cookie, return DDI_FM_NONFATAL.
5127 5064 */
5128 5065 if ((fault_addr >= start_addr) &&
5129 5066 (fault_addr <= end_addr)) {
5130 5067 return (DDI_FM_NONFATAL);
5131 5068 }
5132 5069 }
5133 5070 }
5134 5071
5135 5072 /* fault_addr not within this DMA handle */
5136 5073 return (DDI_FM_UNKNOWN);
5137 5074 }
5138 5075
5139 5076 /*ARGSUSED*/
5140 5077 static int
5141 5078 rootnex_quiesce(dev_info_t *dip)
5142 5079 {
5143 5080 #if defined(__amd64) && !defined(__xpv)
5144 5081 return (immu_quiesce());
5145 5082 #else
5146 5083 return (DDI_SUCCESS);
5147 5084 #endif
5148 5085 }
5149 5086
5150 5087 #if defined(__xpv)
5151 5088 void
5152 5089 immu_init(void)
5153 5090 {
5154 5091 ;
5155 5092 }
5156 5093
5157 5094 void
5158 5095 immu_startup(void)
5159 5096 {
5160 5097 ;
5161 5098 }
5162 5099 /*ARGSUSED*/
5163 5100 void
5164 5101 immu_physmem_update(uint64_t addr, uint64_t size)
5165 5102 {
5166 5103 ;
5167 5104 }
5168 5105 #endif
|
↓ open down ↓ |
4355 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX