Print this page
9658 Update cxgbe to deal with newer flash modules
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/cxgbe/common/t4_hw.c
+++ new/usr/src/uts/common/io/cxgbe/common/t4_hw.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14 14 *
15 15 * Copyright (C) 2003-2017 Chelsio Communications. All rights reserved.
16 16 *
17 17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 20 * release for licensing terms and conditions.
21 21 */
22 22
23 23 #include "common.h"
24 24 #include "t4_regs.h"
25 25 #include "t4_regs_values.h"
26 26 #include "t4fw_interface.h"
27 27
28 28 /**
29 29 * t4_wait_op_done_val - wait until an operation is completed
30 30 * @adapter: the adapter performing the operation
31 31 * @reg: the register to check for completion
32 32 * @mask: a single-bit field within @reg that indicates completion
33 33 * @polarity: the value of the field when the operation is completed
34 34 * @attempts: number of check iterations
35 35 * @delay: delay in usecs between iterations
36 36 * @valp: where to store the value of the register at completion time
37 37 *
38 38 * Wait until an operation is completed by checking a bit in a register
39 39 * up to @attempts times. If @valp is not NULL the value of the register
40 40 * at the time it indicated completion is stored there. Returns 0 if the
41 41 * operation completes and -EAGAIN otherwise.
42 42 */
43 43 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
44 44 int polarity, int attempts, int delay, u32 *valp)
45 45 {
46 46 while (1) {
47 47 u32 val = t4_read_reg(adapter, reg);
48 48
49 49 if (!!(val & mask) == polarity) {
50 50 if (valp)
51 51 *valp = val;
52 52 return 0;
53 53 }
54 54 if (--attempts == 0)
55 55 return -EAGAIN;
56 56 if (delay)
57 57 udelay(delay);
58 58 }
59 59 }
60 60
61 61 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
62 62 int polarity, int attempts, int delay)
63 63 {
64 64 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
65 65 delay, NULL);
66 66 }
67 67
68 68 /**
69 69 * t4_set_reg_field - set a register field to a value
70 70 * @adapter: the adapter to program
71 71 * @addr: the register address
72 72 * @mask: specifies the portion of the register to modify
73 73 * @val: the new value for the register field
74 74 *
75 75 * Sets a register field specified by the supplied mask to the
76 76 * given value.
77 77 */
78 78 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
79 79 u32 val)
80 80 {
81 81 u32 v = t4_read_reg(adapter, addr) & ~mask;
82 82
83 83 t4_write_reg(adapter, addr, v | val);
84 84 (void) t4_read_reg(adapter, addr); /* flush */
85 85 }
86 86
87 87 /**
88 88 * t4_read_indirect - read indirectly addressed registers
89 89 * @adap: the adapter
90 90 * @addr_reg: register holding the indirect address
91 91 * @data_reg: register holding the value of the indirect register
92 92 * @vals: where the read register values are stored
93 93 * @nregs: how many indirect registers to read
94 94 * @start_idx: index of first indirect register to read
95 95 *
96 96 * Reads registers that are accessed indirectly through an address/data
97 97 * register pair.
98 98 */
99 99 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
100 100 unsigned int data_reg, u32 *vals,
101 101 unsigned int nregs, unsigned int start_idx)
102 102 {
103 103 while (nregs--) {
104 104 t4_write_reg(adap, addr_reg, start_idx);
105 105 *vals++ = t4_read_reg(adap, data_reg);
106 106 start_idx++;
107 107 }
108 108 }
109 109
110 110 /**
111 111 * t4_write_indirect - write indirectly addressed registers
112 112 * @adap: the adapter
113 113 * @addr_reg: register holding the indirect addresses
114 114 * @data_reg: register holding the value for the indirect registers
115 115 * @vals: values to write
116 116 * @nregs: how many indirect registers to write
117 117 * @start_idx: address of first indirect register to write
118 118 *
119 119 * Writes a sequential block of registers that are accessed indirectly
120 120 * through an address/data register pair.
121 121 */
122 122 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
123 123 unsigned int data_reg, const u32 *vals,
124 124 unsigned int nregs, unsigned int start_idx)
125 125 {
126 126 while (nregs--) {
127 127 t4_write_reg(adap, addr_reg, start_idx++);
128 128 t4_write_reg(adap, data_reg, *vals++);
129 129 }
130 130 }
131 131
132 132 /*
133 133 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
134 134 * mechanism. This guarantees that we get the real value even if we're
135 135 * operating within a Virtual Machine and the Hypervisor is trapping our
136 136 * Configuration Space accesses.
137 137 *
138 138 * N.B. This routine should only be used as a last resort: the firmware uses
139 139 * the backdoor registers on a regular basis and we can end up
140 140 * conflicting with it's uses!
141 141 */
142 142 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
143 143 {
144 144 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
145 145
146 146 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
147 147 req |= F_ENABLE;
148 148 else
149 149 req |= F_T6_ENABLE;
150 150
151 151 if (is_t4(adap->params.chip))
152 152 req |= F_LOCALCFG;
153 153
154 154 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
155 155 *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
156 156
157 157 /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
158 158 * Configuration Space read. (None of the other fields matter when
159 159 * F_ENABLE is 0 so a simple register write is easier than a
160 160 * read-modify-write via t4_set_reg_field().)
161 161 */
162 162 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
163 163 }
164 164
165 165 /*
166 166 * t4_report_fw_error - report firmware error
167 167 * @adap: the adapter
168 168 *
169 169 * The adapter firmware can indicate error conditions to the host.
170 170 * If the firmware has indicated an error, print out the reason for
171 171 * the firmware error.
172 172 */
173 173 static void t4_report_fw_error(struct adapter *adap)
174 174 {
175 175 static const char *const reason[] = {
176 176 "Crash", /* PCIE_FW_EVAL_CRASH */
177 177 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
178 178 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
179 179 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
180 180 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
181 181 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
182 182 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
183 183 "Reserved", /* reserved */
184 184 };
185 185 u32 pcie_fw;
186 186
187 187 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
188 188 if (pcie_fw & F_PCIE_FW_ERR)
189 189 CH_ERR(adap, "Firmware reports adapter error: %s\n",
190 190 reason[G_PCIE_FW_EVAL(pcie_fw)]);
191 191 }
192 192
193 193 /*
194 194 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
195 195 */
196 196 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
197 197 u32 mbox_addr)
198 198 {
199 199 for ( ; nflit; nflit--, mbox_addr += 8)
200 200 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
201 201 }
202 202
203 203 /*
204 204 * Handle a FW assertion reported in a mailbox.
205 205 */
206 206 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
207 207 {
208 208 CH_ALERT(adap,
209 209 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
210 210 asrt->u.assert.filename_0_7,
211 211 be32_to_cpu(asrt->u.assert.line),
212 212 be32_to_cpu(asrt->u.assert.x),
213 213 be32_to_cpu(asrt->u.assert.y));
214 214 }
215 215
216 216 #define X_CIM_PF_NOACCESS 0xeeeeeeee
217 217
218 218 /*
219 219 * If the Host OS Driver needs locking arround accesses to the mailbox, this
220 220 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
221 221 */
222 222 /* makes single-statement usage a bit cleaner ... */
223 223 #ifdef T4_OS_NEEDS_MBOX_LOCKING
224 224 #define T4_OS_MBOX_LOCKING(x) x
225 225 #else
226 226 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
227 227 #endif
228 228
229 229 /*
230 230 * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
231 231 * busy loops which don't sleep.
232 232 */
233 233 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
234 234 #define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog()
235 235 #else
236 236 #define T4_OS_TOUCH_NMI_WATCHDOG()
237 237 #endif
238 238
239 239 #ifdef T4_OS_LOG_MBOX_CMDS
240 240 /**
241 241 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
242 242 * @adapter: the adapter
243 243 * @cmd: the Firmware Mailbox Command or Reply
244 244 * @size: command length in bytes
245 245 * @access: the time (ms) needed to access the Firmware Mailbox
246 246 * @execute: the time (ms) the command spent being executed
247 247 */
248 248 static void t4_record_mbox(struct adapter *adapter,
249 249 const __be64 *cmd, unsigned int size,
250 250 int access, int execute)
251 251 {
252 252 struct mbox_cmd_log *log = adapter->mbox_log;
253 253 struct mbox_cmd *entry;
254 254 int i;
255 255
256 256 entry = mbox_cmd_log_entry(log, log->cursor++);
257 257 if (log->cursor == log->size)
258 258 log->cursor = 0;
259 259
260 260 for (i = 0; i < size/8; i++)
261 261 entry->cmd[i] = be64_to_cpu(cmd[i]);
262 262 while (i < MBOX_LEN/8)
263 263 entry->cmd[i++] = 0;
264 264 entry->timestamp = t4_os_timestamp();
265 265 entry->seqno = log->seqno++;
266 266 entry->access = access;
267 267 entry->execute = execute;
268 268 }
269 269
270 270 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
271 271 t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
272 272
273 273 #else /* !T4_OS_LOG_MBOX_CMDS */
274 274
275 275 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
276 276 /* nothing */
277 277
278 278 #endif /* !T4_OS_LOG_MBOX_CMDS */
279 279
280 280 /**
281 281 * t4_record_mbox_marker - record a marker in the mailbox log
282 282 * @adapter: the adapter
283 283 * @marker: byte array marker
284 284 * @size: marker size in bytes
285 285 *
286 286 * We inject a "fake mailbox command" into the Firmware Mailbox Log
287 287 * using a known command token and then the bytes of the specified
288 288 * marker. This lets debugging code inject markers into the log to
289 289 * help identify which commands are in response to higher level code.
290 290 */
291 291 void t4_record_mbox_marker(struct adapter *adapter,
292 292 const void *marker, unsigned int size)
293 293 {
294 294 #ifdef T4_OS_LOG_MBOX_CMDS
295 295 __be64 marker_cmd[MBOX_LEN/8];
296 296 const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
297 297 unsigned int marker_cmd_size;
298 298
299 299 if (size > max_marker)
300 300 size = max_marker;
301 301
302 302 marker_cmd[0] = cpu_to_be64(~0LLU);
303 303 memcpy(&marker_cmd[1], marker, size);
304 304 memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
305 305 marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
306 306
307 307 t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
308 308 #endif /* T4_OS_LOG_MBOX_CMDS */
309 309 }
310 310
311 311 /*
312 312 * Delay time in microseconds to wait for mailbox access/fw reply
313 313 * to mailbox command
314 314 */
315 315 #define MIN_MBOX_CMD_DELAY 900
316 316 #define MBOX_CMD_DELAY 1000
317 317
318 318 /**
319 319 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
320 320 * @adap: the adapter
321 321 * @mbox: index of the mailbox to use
322 322 * @cmd: the command to write
323 323 * @size: command length in bytes
324 324 * @rpl: where to optionally store the reply
325 325 * @sleep_ok: if true we may sleep while awaiting command completion
326 326 * @timeout: time to wait for command to finish before timing out
327 327 * (negative implies @sleep_ok=false)
328 328 *
329 329 * Sends the given command to FW through the selected mailbox and waits
330 330 * for the FW to execute the command. If @rpl is not %NULL it is used to
331 331 * store the FW's reply to the command. The command and its optional
332 332 * reply are of the same length. Some FW commands like RESET and
333 333 * INITIALIZE can take a considerable amount of time to execute.
334 334 * @sleep_ok determines whether we may sleep while awaiting the response.
335 335 * If sleeping is allowed we use progressive backoff otherwise we spin.
336 336 * Note that passing in a negative @timeout is an alternate mechanism
337 337 * for specifying @sleep_ok=false. This is useful when a higher level
338 338 * interface allows for specification of @timeout but not @sleep_ok ...
339 339 *
340 340 * The return value is 0 on success or a negative errno on failure. A
341 341 * failure can happen either because we are not able to execute the
342 342 * command or FW executes it but signals an error. In the latter case
343 343 * the return value is the error code indicated by FW (negated).
344 344 */
345 345 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
346 346 int size, void *rpl, bool sleep_ok, int timeout)
347 347 {
348 348 #ifdef T4_OS_NEEDS_MBOX_LOCKING
349 349 u16 access = 0;
350 350 #endif
351 351 u32 v;
352 352 u64 res;
353 353 int i, ret;
354 354 const __be64 *p = cmd;
355 355 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
356 356 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
357 357 u32 ctl;
358 358 __be64 cmd_rpl[MBOX_LEN/8];
359 359 T4_OS_MBOX_LOCKING(t4_os_list_t entry);
360 360 u32 pcie_fw;
361 361
362 362 if ((size & 15) || size > MBOX_LEN)
363 363 return -EINVAL;
364 364
365 365 /*
366 366 * If we have a negative timeout, that implies that we can't sleep.
367 367 */
368 368 if (timeout < 0) {
369 369 sleep_ok = false;
370 370 timeout = -timeout;
371 371 }
372 372
373 373 #ifdef T4_OS_NEEDS_MBOX_LOCKING
374 374 /*
375 375 * Queue ourselves onto the mailbox access list. When our entry is at
376 376 * the front of the list, we have rights to access the mailbox. So we
377 377 * wait [for a while] till we're at the front [or bail out with an
378 378 * EBUSY] ...
379 379 */
380 380 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
381 381
382 382 for (i = 0; ; i++) {
383 383 /*
384 384 * If we've waited too long, return a busy indication. This
385 385 * really ought to be based on our initial position in the
386 386 * mailbox access list but this is a start. We very rarely
387 387 * contend on access to the mailbox ... Also check for a
388 388 * firmware error which we'll report as a device error.
389 389 */
390 390 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
391 391 if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
392 392 t4_os_atomic_list_del(&entry, &adap->mbox_lock);
393 393 t4_report_fw_error(adap);
394 394 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
395 395 T4_RECORD_MBOX(adap, cmd, size, ret, 0);
396 396 return ret;
397 397 }
398 398
399 399 /*
400 400 * If we're at the head, break out and start the mailbox
401 401 * protocol.
402 402 */
403 403 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
404 404 break;
405 405
406 406 /*
407 407 * Delay for a bit before checking again ...
408 408 */
409 409 if (sleep_ok) {
410 410 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
411 411 } else {
412 412 T4_OS_TOUCH_NMI_WATCHDOG();
413 413 udelay(MBOX_CMD_DELAY);
414 414 }
415 415 }
416 416 access = i;
417 417 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
418 418
419 419 /*
420 420 * Attempt to gain access to the mailbox.
421 421 */
422 422 for (i = 0; i < 4; i++) {
423 423 ctl = t4_read_reg(adap, ctl_reg);
424 424 v = G_MBOWNER(ctl);
425 425 if (v != X_MBOWNER_NONE)
426 426 break;
427 427 }
428 428
429 429 /*
430 430 * If we were unable to gain access, dequeue ourselves from the
431 431 * mailbox atomic access list and report the error to our caller.
432 432 */
433 433 if (v != X_MBOWNER_PL) {
434 434 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
435 435 &adap->mbox_lock));
436 436 t4_report_fw_error(adap);
437 437 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
438 438 T4_RECORD_MBOX(adap, cmd, size, access, ret);
439 439 return ret;
440 440 }
441 441
442 442 /*
443 443 * If we gain ownership of the mailbox and there's a "valid" message
444 444 * in it, this is likely an asynchronous error message from the
445 445 * firmware. So we'll report that and then proceed on with attempting
446 446 * to issue our own command ... which may well fail if the error
447 447 * presaged the firmware crashing ...
448 448 */
449 449 if (ctl & F_MBMSGVALID) {
450 450 CH_ERR(adap, "found VALID command in mbox %u: "
451 451 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
452 452 (unsigned long long)t4_read_reg64(adap, data_reg),
453 453 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
454 454 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
455 455 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
456 456 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
457 457 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
458 458 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
459 459 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
460 460 }
461 461
462 462 /*
463 463 * Copy in the new mailbox command and send it on its way ...
464 464 */
465 465 T4_RECORD_MBOX(adap, cmd, size, access, 0);
466 466 for (i = 0; i < size; i += 8, p++)
467 467 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
468 468
469 469 /*
470 470 * XXX It's not clear that we need this anymore now
471 471 * XXX that we have mailbox logging ...
472 472 */
473 473 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
474 474
475 475 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
476 476 (void) t4_read_reg(adap, ctl_reg); /* flush write */
477 477
478 478 /*
479 479 * Loop waiting for the reply; bail out if we time out or the firmware
480 480 * reports an error.
481 481 */
482 482 for (i = 0;
483 483 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
484 484 i < timeout;
485 485 i++) {
486 486 if (sleep_ok) {
487 487 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
488 488 } else {
489 489 T4_OS_TOUCH_NMI_WATCHDOG();
490 490 udelay(MBOX_CMD_DELAY);
491 491 }
492 492
493 493 v = t4_read_reg(adap, ctl_reg);
494 494 if (v == X_CIM_PF_NOACCESS)
495 495 continue;
496 496 if (G_MBOWNER(v) == X_MBOWNER_PL) {
497 497 if (!(v & F_MBMSGVALID)) {
498 498 t4_write_reg(adap, ctl_reg,
499 499 V_MBOWNER(X_MBOWNER_NONE));
500 500 continue;
501 501 }
502 502
503 503 /*
504 504 * Retrieve the command reply and release the mailbox.
505 505 */
506 506 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
507 507 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
508 508 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
509 509 &adap->mbox_lock));
510 510
511 511 T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
512 512
513 513 /*
514 514 * XXX It's not clear that we need this anymore now
515 515 * XXX that we have mailbox logging ...
516 516 */
517 517 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
518 518 CH_MSG(adap, INFO, HW,
519 519 "command completed in %d ms (%ssleeping)\n",
520 520 i + 1, sleep_ok ? "" : "non-");
521 521
522 522 res = be64_to_cpu(cmd_rpl[0]);
523 523 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
524 524 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
525 525 res = V_FW_CMD_RETVAL(EIO);
526 526 } else if (rpl)
527 527 memcpy(rpl, cmd_rpl, size);
528 528 return -G_FW_CMD_RETVAL((int)res);
529 529 }
530 530 }
531 531
532 532 /*
533 533 * We timed out waiting for a reply to our mailbox command. Report
534 534 * the error and also check to see if the firmware reported any
535 535 * errors ...
536 536 */
537 537 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock));
538 538
539 539 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
540 540 T4_RECORD_MBOX(adap, cmd, size, access, ret);
541 541 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
542 542 *(const u8 *)cmd, mbox);
543 543
544 544 t4_report_fw_error(adap);
545 545 t4_fatal_err(adap);
546 546 return ret;
547 547 }
548 548
549 549 #ifdef CONFIG_CUDBG
550 550 /*
551 551 * The maximum number of times to iterate for FW reply before
552 552 * issuing a mailbox timeout
553 553 */
554 554 #define FW_REPLY_WAIT_LOOP 6000000
555 555
556 556 /**
557 557 * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
558 558 * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
559 559 * and is only invoked during a kernel crash. Since this function is
560 560 * called through a atomic notifier chain ,we cannot sleep awaiting a
561 561 * response from FW, hence repeatedly loop until we get a reply.
562 562 *
563 563 * @adap: the adapter
564 564 * @mbox: index of the mailbox to use
565 565 * @cmd: the command to write
566 566 * @size: command length in bytes
567 567 * @rpl: where to optionally store the reply
568 568 */
569 569
570 570 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
571 571 const void *cmd, int size, void *rpl)
572 572 {
573 573 u32 v;
574 574 u64 res;
575 575 int i, ret;
576 576 u64 cnt;
577 577 const __be64 *p = cmd;
578 578 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
579 579 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
580 580 u32 ctl;
581 581 __be64 cmd_rpl[MBOX_LEN/8];
582 582 u32 pcie_fw;
583 583
584 584 if ((size & 15) || size > MBOX_LEN)
585 585 return -EINVAL;
586 586
587 587 /*
588 588 * Check for a firmware error which we'll report as a
589 589 * device error.
590 590 */
591 591 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
592 592 if (pcie_fw & F_PCIE_FW_ERR) {
593 593 t4_report_fw_error(adap);
594 594 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
595 595 return ret;
596 596 }
597 597
598 598 /*
599 599 * Attempt to gain access to the mailbox.
600 600 */
601 601 for (i = 0; i < 4; i++) {
602 602 ctl = t4_read_reg(adap, ctl_reg);
603 603 v = G_MBOWNER(ctl);
604 604 if (v != X_MBOWNER_NONE)
605 605 break;
606 606 }
607 607
608 608 /*
609 609 * If we were unable to gain access, report the error to our caller.
610 610 */
611 611 if (v != X_MBOWNER_PL) {
612 612 t4_report_fw_error(adap);
613 613 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
614 614 return ret;
615 615 }
616 616
617 617 /*
618 618 * If we gain ownership of the mailbox and there's a "valid" message
619 619 * in it, this is likely an asynchronous error message from the
620 620 * firmware. So we'll report that and then proceed on with attempting
621 621 * to issue our own command ... which may well fail if the error
622 622 * presaged the firmware crashing ...
623 623 */
624 624 if (ctl & F_MBMSGVALID) {
625 625 CH_ERR(adap, "found VALID command in mbox %u: "
626 626 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
627 627 (unsigned long long)t4_read_reg64(adap, data_reg),
628 628 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
629 629 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
630 630 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
631 631 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
632 632 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
633 633 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
634 634 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
635 635 }
636 636
637 637 /*
638 638 * Copy in the new mailbox command and send it on its way ...
639 639 */
640 640 for (i = 0; i < size; i += 8, p++)
641 641 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
642 642
643 643 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
644 644
645 645 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
646 646 t4_read_reg(adap, ctl_reg); /* flush write */
647 647
648 648 /*
649 649 * Loop waiting for the reply; bail out if we time out or the firmware
650 650 * reports an error.
651 651 */
652 652 for (cnt = 0;
653 653 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
654 654 cnt < FW_REPLY_WAIT_LOOP;
655 655 cnt++) {
656 656 v = t4_read_reg(adap, ctl_reg);
657 657 if (v == X_CIM_PF_NOACCESS)
658 658 continue;
659 659 if (G_MBOWNER(v) == X_MBOWNER_PL) {
660 660 if (!(v & F_MBMSGVALID)) {
661 661 t4_write_reg(adap, ctl_reg,
662 662 V_MBOWNER(X_MBOWNER_NONE));
663 663 continue;
664 664 }
665 665
666 666 /*
667 667 * Retrieve the command reply and release the mailbox.
668 668 */
669 669 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
670 670 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
671 671
672 672 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
673 673
674 674 res = be64_to_cpu(cmd_rpl[0]);
675 675 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
676 676 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
677 677 res = V_FW_CMD_RETVAL(EIO);
678 678 } else if (rpl)
679 679 memcpy(rpl, cmd_rpl, size);
680 680 return -G_FW_CMD_RETVAL((int)res);
681 681 }
682 682 }
683 683
684 684 /*
685 685 * We timed out waiting for a reply to our mailbox command. Report
686 686 * the error and also check to see if the firmware reported any
687 687 * errors ...
688 688 */
689 689 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
690 690 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
691 691 *(const u8 *)cmd, mbox);
692 692
693 693 t4_report_fw_error(adap);
694 694 t4_fatal_err(adap);
695 695 return ret;
696 696 }
697 697 #endif
698 698
699 699 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
700 700 void *rpl, bool sleep_ok)
701 701 {
702 702 #ifdef CONFIG_CUDBG
703 703 if (adap->flags & K_CRASH)
704 704 return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
705 705 rpl);
706 706 else
707 707 #endif
708 708 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
709 709 sleep_ok, FW_CMD_MAX_TIMEOUT);
710 710
711 711 }
712 712
713 713 static int t4_edc_err_read(struct adapter *adap, int idx)
714 714 {
715 715 u32 edc_ecc_err_addr_reg;
716 716 u32 edc_bist_status_rdata_reg;
717 717
718 718 if (is_t4(adap->params.chip)) {
719 719 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
720 720 return 0;
721 721 }
722 722 if (idx != MEM_EDC0 && idx != MEM_EDC1) {
723 723 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
724 724 return 0;
725 725 }
726 726
727 727 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
728 728 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
729 729
730 730 CH_WARN(adap,
731 731 "edc%d err addr 0x%x: 0x%x.\n",
732 732 idx, edc_ecc_err_addr_reg,
733 733 t4_read_reg(adap, edc_ecc_err_addr_reg));
734 734 CH_WARN(adap,
735 735 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
736 736 edc_bist_status_rdata_reg,
737 737 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
738 738 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
739 739 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
740 740 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
741 741 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
742 742 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
743 743 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
744 744 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
745 745 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
746 746
747 747 return 0;
748 748 }
749 749
750 750 /**
751 751 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
752 752 * @adap: the adapter
753 753 * @win: PCI-E Memory Window to use
754 754 * @addr: address within adapter memory
755 755 * @len: amount of memory to transfer
756 756 * @hbuf: host memory buffer
757 757 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
758 758 *
759 759 * Reads/writes an [almost] arbitrary memory region in the firmware: the
760 760 * firmware memory address and host buffer must be aligned on 32-bit
761 761 * boudaries; the length may be arbitrary.
762 762 *
763 763 * NOTES:
764 764 * 1. The memory is transferred as a raw byte sequence from/to the
765 765 * firmware's memory. If this memory contains data structures which
766 766 * contain multi-byte integers, it's the caller's responsibility to
767 767 * perform appropriate byte order conversions.
768 768 *
769 769 * 2. It is the Caller's responsibility to ensure that no other code
770 770 * uses the specified PCI-E Memory Window while this routine is
771 771 * using it. This is typically done via the use of OS-specific
772 772 * locks, etc.
773 773 */
774 774 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
775 775 u32 len, void *hbuf, int dir)
776 776 {
777 777 u32 pos, offset, resid;
778 778 u32 win_pf, mem_reg, mem_aperture, mem_base;
779 779 u32 *buf;
780 780
781 781 /* Argument sanity checks ...
782 782 */
783 783 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
784 784 return -EINVAL;
785 785 buf = (u32 *)hbuf;
786 786
787 787 /* It's convenient to be able to handle lengths which aren't a
788 788 * multiple of 32-bits because we often end up transferring files to
789 789 * the firmware. So we'll handle that by normalizing the length here
790 790 * and then handling any residual transfer at the end.
791 791 */
792 792 resid = len & 0x3;
793 793 len -= resid;
794 794
795 795 /* Each PCI-E Memory Window is programmed with a window size -- or
796 796 * "aperture" -- which controls the granularity of its mapping onto
797 797 * adapter memory. We need to grab that aperture in order to know
798 798 * how to use the specified window. The window is also programmed
799 799 * with the base address of the Memory Window in BAR0's address
800 800 * space. For T4 this is an absolute PCI-E Bus Address. For T5
801 801 * the address is relative to BAR0.
802 802 */
803 803 mem_reg = t4_read_reg(adap,
804 804 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
805 805 win));
806 806
807 807 /* a dead adapter will return 0xffffffff for PIO reads */
808 808 if (mem_reg == 0xffffffff) {
809 809 CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
810 810 win);
811 811 return -ENXIO;
812 812 }
813 813
814 814 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
815 815 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
816 816 if (is_t4(adap->params.chip))
817 817 mem_base -= adap->t4_bar0;
818 818 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
819 819
820 820 /* Calculate our initial PCI-E Memory Window Position and Offset into
821 821 * that Window.
822 822 */
823 823 pos = addr & ~(mem_aperture-1);
824 824 offset = addr - pos;
825 825
826 826 /* Set up initial PCI-E Memory Window to cover the start of our
827 827 * transfer. (Read it back to ensure that changes propagate before we
828 828 * attempt to use the new value.)
829 829 */
830 830 t4_write_reg(adap,
831 831 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
832 832 pos | win_pf);
833 833 t4_read_reg(adap,
834 834 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
835 835
836 836 /* Transfer data to/from the adapter as long as there's an integral
837 837 * number of 32-bit transfers to complete.
838 838 *
839 839 * A note on Endianness issues:
840 840 *
841 841 * The "register" reads and writes below from/to the PCI-E Memory
842 842 * Window invoke the standard adapter Big-Endian to PCI-E Link
843 843 * Little-Endian "swizzel." As a result, if we have the following
844 844 * data in adapter memory:
845 845 *
846 846 * Memory: ... | b0 | b1 | b2 | b3 | ...
847 847 * Address: i+0 i+1 i+2 i+3
848 848 *
849 849 * Then a read of the adapter memory via the PCI-E Memory Window
850 850 * will yield:
851 851 *
852 852 * x = readl(i)
853 853 * 31 0
854 854 * [ b3 | b2 | b1 | b0 ]
855 855 *
856 856 * If this value is stored into local memory on a Little-Endian system
857 857 * it will show up correctly in local memory as:
858 858 *
859 859 * ( ..., b0, b1, b2, b3, ... )
860 860 *
861 861 * But on a Big-Endian system, the store will show up in memory
862 862 * incorrectly swizzled as:
863 863 *
864 864 * ( ..., b3, b2, b1, b0, ... )
865 865 *
866 866 * So we need to account for this in the reads and writes to the
867 867 * PCI-E Memory Window below by undoing the register read/write
868 868 * swizzels.
869 869 */
870 870 while (len > 0) {
871 871 if (dir == T4_MEMORY_READ)
872 872 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
873 873 mem_base + offset));
874 874 else
875 875 t4_write_reg(adap, mem_base + offset,
876 876 (__force u32)cpu_to_le32(*buf++));
877 877 offset += sizeof(__be32);
878 878 len -= sizeof(__be32);
879 879
880 880 /* If we've reached the end of our current window aperture,
881 881 * move the PCI-E Memory Window on to the next. Note that
882 882 * doing this here after "len" may be 0 allows us to set up
883 883 * the PCI-E Memory Window for a possible final residual
884 884 * transfer below ...
885 885 */
886 886 if (offset == mem_aperture) {
887 887 pos += mem_aperture;
888 888 offset = 0;
889 889 t4_write_reg(adap,
890 890 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
891 891 win), pos | win_pf);
892 892 t4_read_reg(adap,
893 893 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
894 894 win));
895 895 }
896 896 }
897 897
898 898 /* If the original transfer had a length which wasn't a multiple of
899 899 * 32-bits, now's where we need to finish off the transfer of the
900 900 * residual amount. The PCI-E Memory Window has already been moved
901 901 * above (if necessary) to cover this final transfer.
902 902 */
903 903 if (resid) {
904 904 union {
905 905 u32 word;
906 906 char byte[4];
907 907 } last;
908 908 unsigned char *bp;
909 909 int i;
910 910
911 911 if (dir == T4_MEMORY_READ) {
912 912 last.word = le32_to_cpu(
913 913 (__force __le32)t4_read_reg(adap,
914 914 mem_base + offset));
915 915 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
916 916 bp[i] = last.byte[i];
917 917 } else {
918 918 last.word = *buf;
919 919 for (i = resid; i < 4; i++)
920 920 last.byte[i] = 0;
921 921 t4_write_reg(adap, mem_base + offset,
922 922 (__force u32)cpu_to_le32(last.word));
923 923 }
924 924 }
925 925
926 926 return 0;
927 927 }
928 928
929 929 /**
930 930 * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
931 931 * @adap: the adapter
932 932 * @win: PCI-E Memory Window to use
933 933 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
934 934 * @maddr: address within indicated memory type
935 935 * @len: amount of memory to transfer
936 936 * @hbuf: host memory buffer
937 937 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
938 938 *
939 939 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
940 940 * provides an (memory type, address withing memory type) interface.
941 941 */
942 942 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
943 943 u32 len, void *hbuf, int dir)
944 944 {
945 945 u32 mtype_offset;
946 946 u32 edc_size, mc_size;
947 947
948 948 /* Offset into the region of memory which is being accessed
949 949 * MEM_EDC0 = 0
950 950 * MEM_EDC1 = 1
951 951 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
952 952 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
953 953 */
954 954 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
955 955 if (mtype != MEM_MC1)
956 956 mtype_offset = (mtype * (edc_size * 1024 * 1024));
957 957 else {
958 958 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
959 959 A_MA_EXT_MEMORY0_BAR));
960 960 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
961 961 }
962 962
963 963 return t4_memory_rw_addr(adap, win,
964 964 mtype_offset + maddr, len,
965 965 hbuf, dir);
966 966 }
967 967
968 968 /*
969 969 * Return the specified PCI-E Configuration Space register from our Physical
970 970 * Function. We try first via a Firmware LDST Command (if fw_attach != 0)
971 971 * since we prefer to let the firmware own all of these registers, but if that
972 972 * fails we go for it directly ourselves.
973 973 */
974 974 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
975 975 {
976 976 u32 val;
977 977
978 978 /*
979 979 * If fw_attach != 0, construct and send the Firmware LDST Command to
980 980 * retrieve the specified PCI-E Configuration Space register.
981 981 */
982 982 if (drv_fw_attach != 0) {
983 983 struct fw_ldst_cmd ldst_cmd;
984 984 int ret;
985 985
986 986 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
987 987 ldst_cmd.op_to_addrspace =
988 988 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
989 989 F_FW_CMD_REQUEST |
990 990 F_FW_CMD_READ |
991 991 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
992 992 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
993 993 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
994 994 ldst_cmd.u.pcie.ctrl_to_fn =
995 995 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
996 996 ldst_cmd.u.pcie.r = reg;
997 997
998 998 /*
999 999 * If the LDST Command succeeds, return the result, otherwise
1000 1000 * fall through to reading it directly ourselves ...
1001 1001 */
1002 1002 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1003 1003 &ldst_cmd);
1004 1004 if (ret == 0)
1005 1005 return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1006 1006
1007 1007 CH_WARN(adap, "Firmware failed to return "
1008 1008 "Configuration Space register %d, err = %d\n",
1009 1009 reg, -ret);
1010 1010 }
1011 1011
1012 1012 /*
1013 1013 * Read the desired Configuration Space register via the PCI-E
1014 1014 * Backdoor mechanism.
1015 1015 */
1016 1016 t4_hw_pci_read_cfg4(adap, reg, &val);
1017 1017 return val;
1018 1018 }
1019 1019
1020 1020 /*
1021 1021 * Get the window based on base passed to it.
1022 1022 * Window aperture is currently unhandled, but there is no use case for it
1023 1023 * right now
1024 1024 */
1025 1025 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1026 1026 {
1027 1027 if (is_t4(adap->params.chip)) {
1028 1028 u32 bar0;
1029 1029
1030 1030 /*
1031 1031 * Truncation intentional: we only read the bottom 32-bits of
1032 1032 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
1033 1033 * mechanism to read BAR0 instead of using
1034 1034 * pci_resource_start() because we could be operating from
1035 1035 * within a Virtual Machine which is trapping our accesses to
1036 1036 * our Configuration Space and we need to set up the PCI-E
1037 1037 * Memory Window decoders with the actual addresses which will
1038 1038 * be coming across the PCI-E link.
1039 1039 */
1040 1040 bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1041 1041 bar0 &= pci_mask;
1042 1042 adap->t4_bar0 = bar0;
1043 1043
1044 1044 return bar0 + memwin_base;
1045 1045 } else {
1046 1046 /* For T5, only relative offset inside the PCIe BAR is passed */
1047 1047 return memwin_base;
1048 1048 }
1049 1049 }
1050 1050
1051 1051 /* Get the default utility window (win0) used by everyone */
1052 1052 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1053 1053 {
1054 1054 return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * Set up memory window for accessing adapter memory ranges. (Read
1059 1059 * back MA register to ensure that changes propagate before we attempt
1060 1060 * to use the new values.)
1061 1061 */
1062 1062 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1063 1063 {
1064 1064 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1065 1065 memwin_base | V_BIR(0) |
1066 1066 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1067 1067 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1068 1068 }
1069 1069
1070 1070 /**
1071 1071 * t4_get_regs_len - return the size of the chips register set
1072 1072 * @adapter: the adapter
1073 1073 *
1074 1074 * Returns the size of the chip's BAR0 register space.
1075 1075 */
1076 1076 unsigned int t4_get_regs_len(struct adapter *adapter)
1077 1077 {
1078 1078 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1079 1079
1080 1080 switch (chip_version) {
1081 1081 case CHELSIO_T4:
1082 1082 return T4_REGMAP_SIZE;
1083 1083
1084 1084 case CHELSIO_T5:
1085 1085 case CHELSIO_T6:
1086 1086 return T5_REGMAP_SIZE;
1087 1087 }
1088 1088
1089 1089 CH_ERR(adapter,
1090 1090 "Unsupported chip version %d\n", chip_version);
1091 1091 return 0;
1092 1092 }
1093 1093
1094 1094 /**
1095 1095 * t4_get_regs - read chip registers into provided buffer
1096 1096 * @adap: the adapter
1097 1097 * @buf: register buffer
1098 1098 * @buf_size: size (in bytes) of register buffer
1099 1099 *
1100 1100 * If the provided register buffer isn't large enough for the chip's
1101 1101 * full register range, the register dump will be truncated to the
1102 1102 * register buffer's size.
1103 1103 */
1104 1104 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1105 1105 {
1106 1106 static const unsigned int t4_reg_ranges[] = {
1107 1107 0x1008, 0x1108,
1108 1108 0x1180, 0x1184,
1109 1109 0x1190, 0x1194,
1110 1110 0x11a0, 0x11a4,
1111 1111 0x11b0, 0x11b4,
1112 1112 0x11fc, 0x123c,
1113 1113 0x1300, 0x173c,
1114 1114 0x1800, 0x18fc,
1115 1115 0x3000, 0x30d8,
1116 1116 0x30e0, 0x30e4,
1117 1117 0x30ec, 0x5910,
1118 1118 0x5920, 0x5924,
1119 1119 0x5960, 0x5960,
1120 1120 0x5968, 0x5968,
1121 1121 0x5970, 0x5970,
1122 1122 0x5978, 0x5978,
1123 1123 0x5980, 0x5980,
1124 1124 0x5988, 0x5988,
1125 1125 0x5990, 0x5990,
1126 1126 0x5998, 0x5998,
1127 1127 0x59a0, 0x59d4,
1128 1128 0x5a00, 0x5ae0,
1129 1129 0x5ae8, 0x5ae8,
1130 1130 0x5af0, 0x5af0,
1131 1131 0x5af8, 0x5af8,
1132 1132 0x6000, 0x6098,
1133 1133 0x6100, 0x6150,
1134 1134 0x6200, 0x6208,
1135 1135 0x6240, 0x6248,
1136 1136 0x6280, 0x62b0,
1137 1137 0x62c0, 0x6338,
1138 1138 0x6370, 0x638c,
1139 1139 0x6400, 0x643c,
1140 1140 0x6500, 0x6524,
1141 1141 0x6a00, 0x6a04,
1142 1142 0x6a14, 0x6a38,
1143 1143 0x6a60, 0x6a70,
1144 1144 0x6a78, 0x6a78,
1145 1145 0x6b00, 0x6b0c,
1146 1146 0x6b1c, 0x6b84,
1147 1147 0x6bf0, 0x6bf8,
1148 1148 0x6c00, 0x6c0c,
1149 1149 0x6c1c, 0x6c84,
1150 1150 0x6cf0, 0x6cf8,
1151 1151 0x6d00, 0x6d0c,
1152 1152 0x6d1c, 0x6d84,
1153 1153 0x6df0, 0x6df8,
1154 1154 0x6e00, 0x6e0c,
1155 1155 0x6e1c, 0x6e84,
1156 1156 0x6ef0, 0x6ef8,
1157 1157 0x6f00, 0x6f0c,
1158 1158 0x6f1c, 0x6f84,
1159 1159 0x6ff0, 0x6ff8,
1160 1160 0x7000, 0x700c,
1161 1161 0x701c, 0x7084,
1162 1162 0x70f0, 0x70f8,
1163 1163 0x7100, 0x710c,
1164 1164 0x711c, 0x7184,
1165 1165 0x71f0, 0x71f8,
1166 1166 0x7200, 0x720c,
1167 1167 0x721c, 0x7284,
1168 1168 0x72f0, 0x72f8,
1169 1169 0x7300, 0x730c,
1170 1170 0x731c, 0x7384,
1171 1171 0x73f0, 0x73f8,
1172 1172 0x7400, 0x7450,
1173 1173 0x7500, 0x7530,
1174 1174 0x7600, 0x760c,
1175 1175 0x7614, 0x761c,
1176 1176 0x7680, 0x76cc,
1177 1177 0x7700, 0x7798,
1178 1178 0x77c0, 0x77fc,
1179 1179 0x7900, 0x79fc,
1180 1180 0x7b00, 0x7b58,
1181 1181 0x7b60, 0x7b84,
1182 1182 0x7b8c, 0x7c38,
1183 1183 0x7d00, 0x7d38,
1184 1184 0x7d40, 0x7d80,
1185 1185 0x7d8c, 0x7ddc,
1186 1186 0x7de4, 0x7e04,
1187 1187 0x7e10, 0x7e1c,
1188 1188 0x7e24, 0x7e38,
1189 1189 0x7e40, 0x7e44,
1190 1190 0x7e4c, 0x7e78,
1191 1191 0x7e80, 0x7ea4,
1192 1192 0x7eac, 0x7edc,
1193 1193 0x7ee8, 0x7efc,
1194 1194 0x8dc0, 0x8e04,
1195 1195 0x8e10, 0x8e1c,
1196 1196 0x8e30, 0x8e78,
1197 1197 0x8ea0, 0x8eb8,
1198 1198 0x8ec0, 0x8f6c,
1199 1199 0x8fc0, 0x9008,
1200 1200 0x9010, 0x9058,
1201 1201 0x9060, 0x9060,
1202 1202 0x9068, 0x9074,
1203 1203 0x90fc, 0x90fc,
1204 1204 0x9400, 0x9408,
1205 1205 0x9410, 0x9458,
1206 1206 0x9600, 0x9600,
1207 1207 0x9608, 0x9638,
1208 1208 0x9640, 0x96bc,
1209 1209 0x9800, 0x9808,
1210 1210 0x9820, 0x983c,
1211 1211 0x9850, 0x9864,
1212 1212 0x9c00, 0x9c6c,
1213 1213 0x9c80, 0x9cec,
1214 1214 0x9d00, 0x9d6c,
1215 1215 0x9d80, 0x9dec,
1216 1216 0x9e00, 0x9e6c,
1217 1217 0x9e80, 0x9eec,
1218 1218 0x9f00, 0x9f6c,
1219 1219 0x9f80, 0x9fec,
1220 1220 0xd004, 0xd004,
1221 1221 0xd010, 0xd03c,
1222 1222 0xdfc0, 0xdfe0,
1223 1223 0xe000, 0xea7c,
1224 1224 0xf000, 0x11110,
1225 1225 0x11118, 0x11190,
1226 1226 0x19040, 0x1906c,
1227 1227 0x19078, 0x19080,
1228 1228 0x1908c, 0x190e4,
1229 1229 0x190f0, 0x190f8,
1230 1230 0x19100, 0x19110,
1231 1231 0x19120, 0x19124,
1232 1232 0x19150, 0x19194,
1233 1233 0x1919c, 0x191b0,
1234 1234 0x191d0, 0x191e8,
1235 1235 0x19238, 0x1924c,
1236 1236 0x193f8, 0x1943c,
1237 1237 0x1944c, 0x19474,
1238 1238 0x19490, 0x194e0,
1239 1239 0x194f0, 0x194f8,
1240 1240 0x19800, 0x19c08,
1241 1241 0x19c10, 0x19c90,
1242 1242 0x19ca0, 0x19ce4,
1243 1243 0x19cf0, 0x19d40,
1244 1244 0x19d50, 0x19d94,
1245 1245 0x19da0, 0x19de8,
1246 1246 0x19df0, 0x19e40,
1247 1247 0x19e50, 0x19e90,
1248 1248 0x19ea0, 0x19f4c,
1249 1249 0x1a000, 0x1a004,
1250 1250 0x1a010, 0x1a06c,
1251 1251 0x1a0b0, 0x1a0e4,
1252 1252 0x1a0ec, 0x1a0f4,
1253 1253 0x1a100, 0x1a108,
1254 1254 0x1a114, 0x1a120,
1255 1255 0x1a128, 0x1a130,
1256 1256 0x1a138, 0x1a138,
1257 1257 0x1a190, 0x1a1c4,
1258 1258 0x1a1fc, 0x1a1fc,
1259 1259 0x1e040, 0x1e04c,
1260 1260 0x1e284, 0x1e28c,
1261 1261 0x1e2c0, 0x1e2c0,
1262 1262 0x1e2e0, 0x1e2e0,
1263 1263 0x1e300, 0x1e384,
1264 1264 0x1e3c0, 0x1e3c8,
1265 1265 0x1e440, 0x1e44c,
1266 1266 0x1e684, 0x1e68c,
1267 1267 0x1e6c0, 0x1e6c0,
1268 1268 0x1e6e0, 0x1e6e0,
1269 1269 0x1e700, 0x1e784,
1270 1270 0x1e7c0, 0x1e7c8,
1271 1271 0x1e840, 0x1e84c,
1272 1272 0x1ea84, 0x1ea8c,
1273 1273 0x1eac0, 0x1eac0,
1274 1274 0x1eae0, 0x1eae0,
1275 1275 0x1eb00, 0x1eb84,
1276 1276 0x1ebc0, 0x1ebc8,
1277 1277 0x1ec40, 0x1ec4c,
1278 1278 0x1ee84, 0x1ee8c,
1279 1279 0x1eec0, 0x1eec0,
1280 1280 0x1eee0, 0x1eee0,
1281 1281 0x1ef00, 0x1ef84,
1282 1282 0x1efc0, 0x1efc8,
1283 1283 0x1f040, 0x1f04c,
1284 1284 0x1f284, 0x1f28c,
1285 1285 0x1f2c0, 0x1f2c0,
1286 1286 0x1f2e0, 0x1f2e0,
1287 1287 0x1f300, 0x1f384,
1288 1288 0x1f3c0, 0x1f3c8,
1289 1289 0x1f440, 0x1f44c,
1290 1290 0x1f684, 0x1f68c,
1291 1291 0x1f6c0, 0x1f6c0,
1292 1292 0x1f6e0, 0x1f6e0,
1293 1293 0x1f700, 0x1f784,
1294 1294 0x1f7c0, 0x1f7c8,
1295 1295 0x1f840, 0x1f84c,
1296 1296 0x1fa84, 0x1fa8c,
1297 1297 0x1fac0, 0x1fac0,
1298 1298 0x1fae0, 0x1fae0,
1299 1299 0x1fb00, 0x1fb84,
1300 1300 0x1fbc0, 0x1fbc8,
1301 1301 0x1fc40, 0x1fc4c,
1302 1302 0x1fe84, 0x1fe8c,
1303 1303 0x1fec0, 0x1fec0,
1304 1304 0x1fee0, 0x1fee0,
1305 1305 0x1ff00, 0x1ff84,
1306 1306 0x1ffc0, 0x1ffc8,
1307 1307 0x20000, 0x2002c,
1308 1308 0x20100, 0x2013c,
1309 1309 0x20190, 0x201a0,
1310 1310 0x201a8, 0x201b8,
1311 1311 0x201c4, 0x201c8,
1312 1312 0x20200, 0x20318,
1313 1313 0x20400, 0x204b4,
1314 1314 0x204c0, 0x20528,
1315 1315 0x20540, 0x20614,
1316 1316 0x21000, 0x21040,
1317 1317 0x2104c, 0x21060,
1318 1318 0x210c0, 0x210ec,
1319 1319 0x21200, 0x21268,
1320 1320 0x21270, 0x21284,
1321 1321 0x212fc, 0x21388,
1322 1322 0x21400, 0x21404,
1323 1323 0x21500, 0x21500,
1324 1324 0x21510, 0x21518,
1325 1325 0x2152c, 0x21530,
1326 1326 0x2153c, 0x2153c,
1327 1327 0x21550, 0x21554,
1328 1328 0x21600, 0x21600,
1329 1329 0x21608, 0x2161c,
1330 1330 0x21624, 0x21628,
1331 1331 0x21630, 0x21634,
1332 1332 0x2163c, 0x2163c,
1333 1333 0x21700, 0x2171c,
1334 1334 0x21780, 0x2178c,
1335 1335 0x21800, 0x21818,
1336 1336 0x21820, 0x21828,
1337 1337 0x21830, 0x21848,
1338 1338 0x21850, 0x21854,
1339 1339 0x21860, 0x21868,
1340 1340 0x21870, 0x21870,
1341 1341 0x21878, 0x21898,
1342 1342 0x218a0, 0x218a8,
1343 1343 0x218b0, 0x218c8,
1344 1344 0x218d0, 0x218d4,
1345 1345 0x218e0, 0x218e8,
1346 1346 0x218f0, 0x218f0,
1347 1347 0x218f8, 0x21a18,
1348 1348 0x21a20, 0x21a28,
1349 1349 0x21a30, 0x21a48,
1350 1350 0x21a50, 0x21a54,
1351 1351 0x21a60, 0x21a68,
1352 1352 0x21a70, 0x21a70,
1353 1353 0x21a78, 0x21a98,
1354 1354 0x21aa0, 0x21aa8,
1355 1355 0x21ab0, 0x21ac8,
1356 1356 0x21ad0, 0x21ad4,
1357 1357 0x21ae0, 0x21ae8,
1358 1358 0x21af0, 0x21af0,
1359 1359 0x21af8, 0x21c18,
1360 1360 0x21c20, 0x21c20,
1361 1361 0x21c28, 0x21c30,
1362 1362 0x21c38, 0x21c38,
1363 1363 0x21c80, 0x21c98,
1364 1364 0x21ca0, 0x21ca8,
1365 1365 0x21cb0, 0x21cc8,
1366 1366 0x21cd0, 0x21cd4,
1367 1367 0x21ce0, 0x21ce8,
1368 1368 0x21cf0, 0x21cf0,
1369 1369 0x21cf8, 0x21d7c,
1370 1370 0x21e00, 0x21e04,
1371 1371 0x22000, 0x2202c,
1372 1372 0x22100, 0x2213c,
1373 1373 0x22190, 0x221a0,
1374 1374 0x221a8, 0x221b8,
1375 1375 0x221c4, 0x221c8,
1376 1376 0x22200, 0x22318,
1377 1377 0x22400, 0x224b4,
1378 1378 0x224c0, 0x22528,
1379 1379 0x22540, 0x22614,
1380 1380 0x23000, 0x23040,
1381 1381 0x2304c, 0x23060,
1382 1382 0x230c0, 0x230ec,
1383 1383 0x23200, 0x23268,
1384 1384 0x23270, 0x23284,
1385 1385 0x232fc, 0x23388,
1386 1386 0x23400, 0x23404,
1387 1387 0x23500, 0x23500,
1388 1388 0x23510, 0x23518,
1389 1389 0x2352c, 0x23530,
1390 1390 0x2353c, 0x2353c,
1391 1391 0x23550, 0x23554,
1392 1392 0x23600, 0x23600,
1393 1393 0x23608, 0x2361c,
1394 1394 0x23624, 0x23628,
1395 1395 0x23630, 0x23634,
1396 1396 0x2363c, 0x2363c,
1397 1397 0x23700, 0x2371c,
1398 1398 0x23780, 0x2378c,
1399 1399 0x23800, 0x23818,
1400 1400 0x23820, 0x23828,
1401 1401 0x23830, 0x23848,
1402 1402 0x23850, 0x23854,
1403 1403 0x23860, 0x23868,
1404 1404 0x23870, 0x23870,
1405 1405 0x23878, 0x23898,
1406 1406 0x238a0, 0x238a8,
1407 1407 0x238b0, 0x238c8,
1408 1408 0x238d0, 0x238d4,
1409 1409 0x238e0, 0x238e8,
1410 1410 0x238f0, 0x238f0,
1411 1411 0x238f8, 0x23a18,
1412 1412 0x23a20, 0x23a28,
1413 1413 0x23a30, 0x23a48,
1414 1414 0x23a50, 0x23a54,
1415 1415 0x23a60, 0x23a68,
1416 1416 0x23a70, 0x23a70,
1417 1417 0x23a78, 0x23a98,
1418 1418 0x23aa0, 0x23aa8,
1419 1419 0x23ab0, 0x23ac8,
1420 1420 0x23ad0, 0x23ad4,
1421 1421 0x23ae0, 0x23ae8,
1422 1422 0x23af0, 0x23af0,
1423 1423 0x23af8, 0x23c18,
1424 1424 0x23c20, 0x23c20,
1425 1425 0x23c28, 0x23c30,
1426 1426 0x23c38, 0x23c38,
1427 1427 0x23c80, 0x23c98,
1428 1428 0x23ca0, 0x23ca8,
1429 1429 0x23cb0, 0x23cc8,
1430 1430 0x23cd0, 0x23cd4,
1431 1431 0x23ce0, 0x23ce8,
1432 1432 0x23cf0, 0x23cf0,
1433 1433 0x23cf8, 0x23d7c,
1434 1434 0x23e00, 0x23e04,
1435 1435 0x24000, 0x2402c,
1436 1436 0x24100, 0x2413c,
1437 1437 0x24190, 0x241a0,
1438 1438 0x241a8, 0x241b8,
1439 1439 0x241c4, 0x241c8,
1440 1440 0x24200, 0x24318,
1441 1441 0x24400, 0x244b4,
1442 1442 0x244c0, 0x24528,
1443 1443 0x24540, 0x24614,
1444 1444 0x25000, 0x25040,
1445 1445 0x2504c, 0x25060,
1446 1446 0x250c0, 0x250ec,
1447 1447 0x25200, 0x25268,
1448 1448 0x25270, 0x25284,
1449 1449 0x252fc, 0x25388,
1450 1450 0x25400, 0x25404,
1451 1451 0x25500, 0x25500,
1452 1452 0x25510, 0x25518,
1453 1453 0x2552c, 0x25530,
1454 1454 0x2553c, 0x2553c,
1455 1455 0x25550, 0x25554,
1456 1456 0x25600, 0x25600,
1457 1457 0x25608, 0x2561c,
1458 1458 0x25624, 0x25628,
1459 1459 0x25630, 0x25634,
1460 1460 0x2563c, 0x2563c,
1461 1461 0x25700, 0x2571c,
1462 1462 0x25780, 0x2578c,
1463 1463 0x25800, 0x25818,
1464 1464 0x25820, 0x25828,
1465 1465 0x25830, 0x25848,
1466 1466 0x25850, 0x25854,
1467 1467 0x25860, 0x25868,
1468 1468 0x25870, 0x25870,
1469 1469 0x25878, 0x25898,
1470 1470 0x258a0, 0x258a8,
1471 1471 0x258b0, 0x258c8,
1472 1472 0x258d0, 0x258d4,
1473 1473 0x258e0, 0x258e8,
1474 1474 0x258f0, 0x258f0,
1475 1475 0x258f8, 0x25a18,
1476 1476 0x25a20, 0x25a28,
1477 1477 0x25a30, 0x25a48,
1478 1478 0x25a50, 0x25a54,
1479 1479 0x25a60, 0x25a68,
1480 1480 0x25a70, 0x25a70,
1481 1481 0x25a78, 0x25a98,
1482 1482 0x25aa0, 0x25aa8,
1483 1483 0x25ab0, 0x25ac8,
1484 1484 0x25ad0, 0x25ad4,
1485 1485 0x25ae0, 0x25ae8,
1486 1486 0x25af0, 0x25af0,
1487 1487 0x25af8, 0x25c18,
1488 1488 0x25c20, 0x25c20,
1489 1489 0x25c28, 0x25c30,
1490 1490 0x25c38, 0x25c38,
1491 1491 0x25c80, 0x25c98,
1492 1492 0x25ca0, 0x25ca8,
1493 1493 0x25cb0, 0x25cc8,
1494 1494 0x25cd0, 0x25cd4,
1495 1495 0x25ce0, 0x25ce8,
1496 1496 0x25cf0, 0x25cf0,
1497 1497 0x25cf8, 0x25d7c,
1498 1498 0x25e00, 0x25e04,
1499 1499 0x26000, 0x2602c,
1500 1500 0x26100, 0x2613c,
1501 1501 0x26190, 0x261a0,
1502 1502 0x261a8, 0x261b8,
1503 1503 0x261c4, 0x261c8,
1504 1504 0x26200, 0x26318,
1505 1505 0x26400, 0x264b4,
1506 1506 0x264c0, 0x26528,
1507 1507 0x26540, 0x26614,
1508 1508 0x27000, 0x27040,
1509 1509 0x2704c, 0x27060,
1510 1510 0x270c0, 0x270ec,
1511 1511 0x27200, 0x27268,
1512 1512 0x27270, 0x27284,
1513 1513 0x272fc, 0x27388,
1514 1514 0x27400, 0x27404,
1515 1515 0x27500, 0x27500,
1516 1516 0x27510, 0x27518,
1517 1517 0x2752c, 0x27530,
1518 1518 0x2753c, 0x2753c,
1519 1519 0x27550, 0x27554,
1520 1520 0x27600, 0x27600,
1521 1521 0x27608, 0x2761c,
1522 1522 0x27624, 0x27628,
1523 1523 0x27630, 0x27634,
1524 1524 0x2763c, 0x2763c,
1525 1525 0x27700, 0x2771c,
1526 1526 0x27780, 0x2778c,
1527 1527 0x27800, 0x27818,
1528 1528 0x27820, 0x27828,
1529 1529 0x27830, 0x27848,
1530 1530 0x27850, 0x27854,
1531 1531 0x27860, 0x27868,
1532 1532 0x27870, 0x27870,
1533 1533 0x27878, 0x27898,
1534 1534 0x278a0, 0x278a8,
1535 1535 0x278b0, 0x278c8,
1536 1536 0x278d0, 0x278d4,
1537 1537 0x278e0, 0x278e8,
1538 1538 0x278f0, 0x278f0,
1539 1539 0x278f8, 0x27a18,
1540 1540 0x27a20, 0x27a28,
1541 1541 0x27a30, 0x27a48,
1542 1542 0x27a50, 0x27a54,
1543 1543 0x27a60, 0x27a68,
1544 1544 0x27a70, 0x27a70,
1545 1545 0x27a78, 0x27a98,
1546 1546 0x27aa0, 0x27aa8,
1547 1547 0x27ab0, 0x27ac8,
1548 1548 0x27ad0, 0x27ad4,
1549 1549 0x27ae0, 0x27ae8,
1550 1550 0x27af0, 0x27af0,
1551 1551 0x27af8, 0x27c18,
1552 1552 0x27c20, 0x27c20,
1553 1553 0x27c28, 0x27c30,
1554 1554 0x27c38, 0x27c38,
1555 1555 0x27c80, 0x27c98,
1556 1556 0x27ca0, 0x27ca8,
1557 1557 0x27cb0, 0x27cc8,
1558 1558 0x27cd0, 0x27cd4,
1559 1559 0x27ce0, 0x27ce8,
1560 1560 0x27cf0, 0x27cf0,
1561 1561 0x27cf8, 0x27d7c,
1562 1562 0x27e00, 0x27e04,
1563 1563 };
1564 1564
1565 1565 static const unsigned int t5_reg_ranges[] = {
1566 1566 0x1008, 0x10c0,
1567 1567 0x10cc, 0x10f8,
1568 1568 0x1100, 0x1100,
1569 1569 0x110c, 0x1148,
1570 1570 0x1180, 0x1184,
1571 1571 0x1190, 0x1194,
1572 1572 0x11a0, 0x11a4,
1573 1573 0x11b0, 0x11b4,
1574 1574 0x11fc, 0x123c,
1575 1575 0x1280, 0x173c,
1576 1576 0x1800, 0x18fc,
1577 1577 0x3000, 0x3028,
1578 1578 0x3060, 0x30b0,
1579 1579 0x30b8, 0x30d8,
1580 1580 0x30e0, 0x30fc,
1581 1581 0x3140, 0x357c,
1582 1582 0x35a8, 0x35cc,
1583 1583 0x35ec, 0x35ec,
1584 1584 0x3600, 0x5624,
1585 1585 0x56cc, 0x56ec,
1586 1586 0x56f4, 0x5720,
1587 1587 0x5728, 0x575c,
1588 1588 0x580c, 0x5814,
1589 1589 0x5890, 0x589c,
1590 1590 0x58a4, 0x58ac,
1591 1591 0x58b8, 0x58bc,
1592 1592 0x5940, 0x59c8,
1593 1593 0x59d0, 0x59dc,
1594 1594 0x59fc, 0x5a18,
1595 1595 0x5a60, 0x5a70,
1596 1596 0x5a80, 0x5a9c,
1597 1597 0x5b94, 0x5bfc,
1598 1598 0x6000, 0x6020,
1599 1599 0x6028, 0x6040,
1600 1600 0x6058, 0x609c,
1601 1601 0x60a8, 0x614c,
1602 1602 0x7700, 0x7798,
1603 1603 0x77c0, 0x78fc,
1604 1604 0x7b00, 0x7b58,
1605 1605 0x7b60, 0x7b84,
1606 1606 0x7b8c, 0x7c54,
1607 1607 0x7d00, 0x7d38,
1608 1608 0x7d40, 0x7d80,
1609 1609 0x7d8c, 0x7ddc,
1610 1610 0x7de4, 0x7e04,
1611 1611 0x7e10, 0x7e1c,
1612 1612 0x7e24, 0x7e38,
1613 1613 0x7e40, 0x7e44,
1614 1614 0x7e4c, 0x7e78,
1615 1615 0x7e80, 0x7edc,
1616 1616 0x7ee8, 0x7efc,
1617 1617 0x8dc0, 0x8de0,
1618 1618 0x8df8, 0x8e04,
1619 1619 0x8e10, 0x8e84,
1620 1620 0x8ea0, 0x8f84,
1621 1621 0x8fc0, 0x9058,
1622 1622 0x9060, 0x9060,
1623 1623 0x9068, 0x90f8,
1624 1624 0x9400, 0x9408,
1625 1625 0x9410, 0x9470,
1626 1626 0x9600, 0x9600,
1627 1627 0x9608, 0x9638,
1628 1628 0x9640, 0x96f4,
1629 1629 0x9800, 0x9808,
1630 1630 0x9820, 0x983c,
1631 1631 0x9850, 0x9864,
1632 1632 0x9c00, 0x9c6c,
1633 1633 0x9c80, 0x9cec,
1634 1634 0x9d00, 0x9d6c,
1635 1635 0x9d80, 0x9dec,
1636 1636 0x9e00, 0x9e6c,
1637 1637 0x9e80, 0x9eec,
1638 1638 0x9f00, 0x9f6c,
1639 1639 0x9f80, 0xa020,
1640 1640 0xd004, 0xd004,
1641 1641 0xd010, 0xd03c,
1642 1642 0xdfc0, 0xdfe0,
1643 1643 0xe000, 0x1106c,
1644 1644 0x11074, 0x11088,
1645 1645 0x1109c, 0x1117c,
1646 1646 0x11190, 0x11204,
1647 1647 0x19040, 0x1906c,
1648 1648 0x19078, 0x19080,
1649 1649 0x1908c, 0x190e8,
1650 1650 0x190f0, 0x190f8,
1651 1651 0x19100, 0x19110,
1652 1652 0x19120, 0x19124,
1653 1653 0x19150, 0x19194,
1654 1654 0x1919c, 0x191b0,
1655 1655 0x191d0, 0x191e8,
1656 1656 0x19238, 0x19290,
1657 1657 0x193f8, 0x19428,
1658 1658 0x19430, 0x19444,
1659 1659 0x1944c, 0x1946c,
1660 1660 0x19474, 0x19474,
1661 1661 0x19490, 0x194cc,
1662 1662 0x194f0, 0x194f8,
1663 1663 0x19c00, 0x19c08,
1664 1664 0x19c10, 0x19c60,
1665 1665 0x19c94, 0x19ce4,
1666 1666 0x19cf0, 0x19d40,
1667 1667 0x19d50, 0x19d94,
1668 1668 0x19da0, 0x19de8,
1669 1669 0x19df0, 0x19e10,
1670 1670 0x19e50, 0x19e90,
1671 1671 0x19ea0, 0x19f24,
1672 1672 0x19f34, 0x19f34,
1673 1673 0x19f40, 0x19f50,
1674 1674 0x19f90, 0x19fb4,
1675 1675 0x19fc4, 0x19fe4,
1676 1676 0x1a000, 0x1a004,
1677 1677 0x1a010, 0x1a06c,
1678 1678 0x1a0b0, 0x1a0e4,
1679 1679 0x1a0ec, 0x1a0f8,
1680 1680 0x1a100, 0x1a108,
1681 1681 0x1a114, 0x1a120,
1682 1682 0x1a128, 0x1a130,
1683 1683 0x1a138, 0x1a138,
1684 1684 0x1a190, 0x1a1c4,
1685 1685 0x1a1fc, 0x1a1fc,
1686 1686 0x1e008, 0x1e00c,
1687 1687 0x1e040, 0x1e044,
1688 1688 0x1e04c, 0x1e04c,
1689 1689 0x1e284, 0x1e290,
1690 1690 0x1e2c0, 0x1e2c0,
1691 1691 0x1e2e0, 0x1e2e0,
1692 1692 0x1e300, 0x1e384,
1693 1693 0x1e3c0, 0x1e3c8,
1694 1694 0x1e408, 0x1e40c,
1695 1695 0x1e440, 0x1e444,
1696 1696 0x1e44c, 0x1e44c,
1697 1697 0x1e684, 0x1e690,
1698 1698 0x1e6c0, 0x1e6c0,
1699 1699 0x1e6e0, 0x1e6e0,
1700 1700 0x1e700, 0x1e784,
1701 1701 0x1e7c0, 0x1e7c8,
1702 1702 0x1e808, 0x1e80c,
1703 1703 0x1e840, 0x1e844,
1704 1704 0x1e84c, 0x1e84c,
1705 1705 0x1ea84, 0x1ea90,
1706 1706 0x1eac0, 0x1eac0,
1707 1707 0x1eae0, 0x1eae0,
1708 1708 0x1eb00, 0x1eb84,
1709 1709 0x1ebc0, 0x1ebc8,
1710 1710 0x1ec08, 0x1ec0c,
1711 1711 0x1ec40, 0x1ec44,
1712 1712 0x1ec4c, 0x1ec4c,
1713 1713 0x1ee84, 0x1ee90,
1714 1714 0x1eec0, 0x1eec0,
1715 1715 0x1eee0, 0x1eee0,
1716 1716 0x1ef00, 0x1ef84,
1717 1717 0x1efc0, 0x1efc8,
1718 1718 0x1f008, 0x1f00c,
1719 1719 0x1f040, 0x1f044,
1720 1720 0x1f04c, 0x1f04c,
1721 1721 0x1f284, 0x1f290,
1722 1722 0x1f2c0, 0x1f2c0,
1723 1723 0x1f2e0, 0x1f2e0,
1724 1724 0x1f300, 0x1f384,
1725 1725 0x1f3c0, 0x1f3c8,
1726 1726 0x1f408, 0x1f40c,
1727 1727 0x1f440, 0x1f444,
1728 1728 0x1f44c, 0x1f44c,
1729 1729 0x1f684, 0x1f690,
1730 1730 0x1f6c0, 0x1f6c0,
1731 1731 0x1f6e0, 0x1f6e0,
1732 1732 0x1f700, 0x1f784,
1733 1733 0x1f7c0, 0x1f7c8,
1734 1734 0x1f808, 0x1f80c,
1735 1735 0x1f840, 0x1f844,
1736 1736 0x1f84c, 0x1f84c,
1737 1737 0x1fa84, 0x1fa90,
1738 1738 0x1fac0, 0x1fac0,
1739 1739 0x1fae0, 0x1fae0,
1740 1740 0x1fb00, 0x1fb84,
1741 1741 0x1fbc0, 0x1fbc8,
1742 1742 0x1fc08, 0x1fc0c,
1743 1743 0x1fc40, 0x1fc44,
1744 1744 0x1fc4c, 0x1fc4c,
1745 1745 0x1fe84, 0x1fe90,
1746 1746 0x1fec0, 0x1fec0,
1747 1747 0x1fee0, 0x1fee0,
1748 1748 0x1ff00, 0x1ff84,
1749 1749 0x1ffc0, 0x1ffc8,
1750 1750 0x30000, 0x30030,
1751 1751 0x30100, 0x30144,
1752 1752 0x30190, 0x301a0,
1753 1753 0x301a8, 0x301b8,
1754 1754 0x301c4, 0x301c8,
1755 1755 0x301d0, 0x301d0,
1756 1756 0x30200, 0x30318,
1757 1757 0x30400, 0x304b4,
1758 1758 0x304c0, 0x3052c,
1759 1759 0x30540, 0x3061c,
1760 1760 0x30800, 0x30828,
1761 1761 0x30834, 0x30834,
1762 1762 0x308c0, 0x30908,
1763 1763 0x30910, 0x309ac,
1764 1764 0x30a00, 0x30a14,
1765 1765 0x30a1c, 0x30a2c,
1766 1766 0x30a44, 0x30a50,
1767 1767 0x30a74, 0x30a74,
1768 1768 0x30a7c, 0x30afc,
1769 1769 0x30b08, 0x30c24,
1770 1770 0x30d00, 0x30d00,
1771 1771 0x30d08, 0x30d14,
1772 1772 0x30d1c, 0x30d20,
1773 1773 0x30d3c, 0x30d3c,
1774 1774 0x30d48, 0x30d50,
1775 1775 0x31200, 0x3120c,
1776 1776 0x31220, 0x31220,
1777 1777 0x31240, 0x31240,
1778 1778 0x31600, 0x3160c,
1779 1779 0x31a00, 0x31a1c,
1780 1780 0x31e00, 0x31e20,
1781 1781 0x31e38, 0x31e3c,
1782 1782 0x31e80, 0x31e80,
1783 1783 0x31e88, 0x31ea8,
1784 1784 0x31eb0, 0x31eb4,
1785 1785 0x31ec8, 0x31ed4,
1786 1786 0x31fb8, 0x32004,
1787 1787 0x32200, 0x32200,
1788 1788 0x32208, 0x32240,
1789 1789 0x32248, 0x32280,
1790 1790 0x32288, 0x322c0,
1791 1791 0x322c8, 0x322fc,
1792 1792 0x32600, 0x32630,
1793 1793 0x32a00, 0x32abc,
1794 1794 0x32b00, 0x32b10,
1795 1795 0x32b20, 0x32b30,
1796 1796 0x32b40, 0x32b50,
1797 1797 0x32b60, 0x32b70,
1798 1798 0x33000, 0x33028,
1799 1799 0x33030, 0x33048,
1800 1800 0x33060, 0x33068,
1801 1801 0x33070, 0x3309c,
1802 1802 0x330f0, 0x33128,
1803 1803 0x33130, 0x33148,
1804 1804 0x33160, 0x33168,
1805 1805 0x33170, 0x3319c,
1806 1806 0x331f0, 0x33238,
1807 1807 0x33240, 0x33240,
1808 1808 0x33248, 0x33250,
1809 1809 0x3325c, 0x33264,
1810 1810 0x33270, 0x332b8,
1811 1811 0x332c0, 0x332e4,
1812 1812 0x332f8, 0x33338,
1813 1813 0x33340, 0x33340,
1814 1814 0x33348, 0x33350,
1815 1815 0x3335c, 0x33364,
1816 1816 0x33370, 0x333b8,
1817 1817 0x333c0, 0x333e4,
1818 1818 0x333f8, 0x33428,
1819 1819 0x33430, 0x33448,
1820 1820 0x33460, 0x33468,
1821 1821 0x33470, 0x3349c,
1822 1822 0x334f0, 0x33528,
1823 1823 0x33530, 0x33548,
1824 1824 0x33560, 0x33568,
1825 1825 0x33570, 0x3359c,
1826 1826 0x335f0, 0x33638,
1827 1827 0x33640, 0x33640,
1828 1828 0x33648, 0x33650,
1829 1829 0x3365c, 0x33664,
1830 1830 0x33670, 0x336b8,
1831 1831 0x336c0, 0x336e4,
1832 1832 0x336f8, 0x33738,
1833 1833 0x33740, 0x33740,
1834 1834 0x33748, 0x33750,
1835 1835 0x3375c, 0x33764,
1836 1836 0x33770, 0x337b8,
1837 1837 0x337c0, 0x337e4,
1838 1838 0x337f8, 0x337fc,
1839 1839 0x33814, 0x33814,
1840 1840 0x3382c, 0x3382c,
1841 1841 0x33880, 0x3388c,
1842 1842 0x338e8, 0x338ec,
1843 1843 0x33900, 0x33928,
1844 1844 0x33930, 0x33948,
1845 1845 0x33960, 0x33968,
1846 1846 0x33970, 0x3399c,
1847 1847 0x339f0, 0x33a38,
1848 1848 0x33a40, 0x33a40,
1849 1849 0x33a48, 0x33a50,
1850 1850 0x33a5c, 0x33a64,
1851 1851 0x33a70, 0x33ab8,
1852 1852 0x33ac0, 0x33ae4,
1853 1853 0x33af8, 0x33b10,
1854 1854 0x33b28, 0x33b28,
1855 1855 0x33b3c, 0x33b50,
1856 1856 0x33bf0, 0x33c10,
1857 1857 0x33c28, 0x33c28,
1858 1858 0x33c3c, 0x33c50,
1859 1859 0x33cf0, 0x33cfc,
1860 1860 0x34000, 0x34030,
1861 1861 0x34100, 0x34144,
1862 1862 0x34190, 0x341a0,
1863 1863 0x341a8, 0x341b8,
1864 1864 0x341c4, 0x341c8,
1865 1865 0x341d0, 0x341d0,
1866 1866 0x34200, 0x34318,
1867 1867 0x34400, 0x344b4,
1868 1868 0x344c0, 0x3452c,
1869 1869 0x34540, 0x3461c,
1870 1870 0x34800, 0x34828,
1871 1871 0x34834, 0x34834,
1872 1872 0x348c0, 0x34908,
1873 1873 0x34910, 0x349ac,
1874 1874 0x34a00, 0x34a14,
1875 1875 0x34a1c, 0x34a2c,
1876 1876 0x34a44, 0x34a50,
1877 1877 0x34a74, 0x34a74,
1878 1878 0x34a7c, 0x34afc,
1879 1879 0x34b08, 0x34c24,
1880 1880 0x34d00, 0x34d00,
1881 1881 0x34d08, 0x34d14,
1882 1882 0x34d1c, 0x34d20,
1883 1883 0x34d3c, 0x34d3c,
1884 1884 0x34d48, 0x34d50,
1885 1885 0x35200, 0x3520c,
1886 1886 0x35220, 0x35220,
1887 1887 0x35240, 0x35240,
1888 1888 0x35600, 0x3560c,
1889 1889 0x35a00, 0x35a1c,
1890 1890 0x35e00, 0x35e20,
1891 1891 0x35e38, 0x35e3c,
1892 1892 0x35e80, 0x35e80,
1893 1893 0x35e88, 0x35ea8,
1894 1894 0x35eb0, 0x35eb4,
1895 1895 0x35ec8, 0x35ed4,
1896 1896 0x35fb8, 0x36004,
1897 1897 0x36200, 0x36200,
1898 1898 0x36208, 0x36240,
1899 1899 0x36248, 0x36280,
1900 1900 0x36288, 0x362c0,
1901 1901 0x362c8, 0x362fc,
1902 1902 0x36600, 0x36630,
1903 1903 0x36a00, 0x36abc,
1904 1904 0x36b00, 0x36b10,
1905 1905 0x36b20, 0x36b30,
1906 1906 0x36b40, 0x36b50,
1907 1907 0x36b60, 0x36b70,
1908 1908 0x37000, 0x37028,
1909 1909 0x37030, 0x37048,
1910 1910 0x37060, 0x37068,
1911 1911 0x37070, 0x3709c,
1912 1912 0x370f0, 0x37128,
1913 1913 0x37130, 0x37148,
1914 1914 0x37160, 0x37168,
1915 1915 0x37170, 0x3719c,
1916 1916 0x371f0, 0x37238,
1917 1917 0x37240, 0x37240,
1918 1918 0x37248, 0x37250,
1919 1919 0x3725c, 0x37264,
1920 1920 0x37270, 0x372b8,
1921 1921 0x372c0, 0x372e4,
1922 1922 0x372f8, 0x37338,
1923 1923 0x37340, 0x37340,
1924 1924 0x37348, 0x37350,
1925 1925 0x3735c, 0x37364,
1926 1926 0x37370, 0x373b8,
1927 1927 0x373c0, 0x373e4,
1928 1928 0x373f8, 0x37428,
1929 1929 0x37430, 0x37448,
1930 1930 0x37460, 0x37468,
1931 1931 0x37470, 0x3749c,
1932 1932 0x374f0, 0x37528,
1933 1933 0x37530, 0x37548,
1934 1934 0x37560, 0x37568,
1935 1935 0x37570, 0x3759c,
1936 1936 0x375f0, 0x37638,
1937 1937 0x37640, 0x37640,
1938 1938 0x37648, 0x37650,
1939 1939 0x3765c, 0x37664,
1940 1940 0x37670, 0x376b8,
1941 1941 0x376c0, 0x376e4,
1942 1942 0x376f8, 0x37738,
1943 1943 0x37740, 0x37740,
1944 1944 0x37748, 0x37750,
1945 1945 0x3775c, 0x37764,
1946 1946 0x37770, 0x377b8,
1947 1947 0x377c0, 0x377e4,
1948 1948 0x377f8, 0x377fc,
1949 1949 0x37814, 0x37814,
1950 1950 0x3782c, 0x3782c,
1951 1951 0x37880, 0x3788c,
1952 1952 0x378e8, 0x378ec,
1953 1953 0x37900, 0x37928,
1954 1954 0x37930, 0x37948,
1955 1955 0x37960, 0x37968,
1956 1956 0x37970, 0x3799c,
1957 1957 0x379f0, 0x37a38,
1958 1958 0x37a40, 0x37a40,
1959 1959 0x37a48, 0x37a50,
1960 1960 0x37a5c, 0x37a64,
1961 1961 0x37a70, 0x37ab8,
1962 1962 0x37ac0, 0x37ae4,
1963 1963 0x37af8, 0x37b10,
1964 1964 0x37b28, 0x37b28,
1965 1965 0x37b3c, 0x37b50,
1966 1966 0x37bf0, 0x37c10,
1967 1967 0x37c28, 0x37c28,
1968 1968 0x37c3c, 0x37c50,
1969 1969 0x37cf0, 0x37cfc,
1970 1970 0x38000, 0x38030,
1971 1971 0x38100, 0x38144,
1972 1972 0x38190, 0x381a0,
1973 1973 0x381a8, 0x381b8,
1974 1974 0x381c4, 0x381c8,
1975 1975 0x381d0, 0x381d0,
1976 1976 0x38200, 0x38318,
1977 1977 0x38400, 0x384b4,
1978 1978 0x384c0, 0x3852c,
1979 1979 0x38540, 0x3861c,
1980 1980 0x38800, 0x38828,
1981 1981 0x38834, 0x38834,
1982 1982 0x388c0, 0x38908,
1983 1983 0x38910, 0x389ac,
1984 1984 0x38a00, 0x38a14,
1985 1985 0x38a1c, 0x38a2c,
1986 1986 0x38a44, 0x38a50,
1987 1987 0x38a74, 0x38a74,
1988 1988 0x38a7c, 0x38afc,
1989 1989 0x38b08, 0x38c24,
1990 1990 0x38d00, 0x38d00,
1991 1991 0x38d08, 0x38d14,
1992 1992 0x38d1c, 0x38d20,
1993 1993 0x38d3c, 0x38d3c,
1994 1994 0x38d48, 0x38d50,
1995 1995 0x39200, 0x3920c,
1996 1996 0x39220, 0x39220,
1997 1997 0x39240, 0x39240,
1998 1998 0x39600, 0x3960c,
1999 1999 0x39a00, 0x39a1c,
2000 2000 0x39e00, 0x39e20,
2001 2001 0x39e38, 0x39e3c,
2002 2002 0x39e80, 0x39e80,
2003 2003 0x39e88, 0x39ea8,
2004 2004 0x39eb0, 0x39eb4,
2005 2005 0x39ec8, 0x39ed4,
2006 2006 0x39fb8, 0x3a004,
2007 2007 0x3a200, 0x3a200,
2008 2008 0x3a208, 0x3a240,
2009 2009 0x3a248, 0x3a280,
2010 2010 0x3a288, 0x3a2c0,
2011 2011 0x3a2c8, 0x3a2fc,
2012 2012 0x3a600, 0x3a630,
2013 2013 0x3aa00, 0x3aabc,
2014 2014 0x3ab00, 0x3ab10,
2015 2015 0x3ab20, 0x3ab30,
2016 2016 0x3ab40, 0x3ab50,
2017 2017 0x3ab60, 0x3ab70,
2018 2018 0x3b000, 0x3b028,
2019 2019 0x3b030, 0x3b048,
2020 2020 0x3b060, 0x3b068,
2021 2021 0x3b070, 0x3b09c,
2022 2022 0x3b0f0, 0x3b128,
2023 2023 0x3b130, 0x3b148,
2024 2024 0x3b160, 0x3b168,
2025 2025 0x3b170, 0x3b19c,
2026 2026 0x3b1f0, 0x3b238,
2027 2027 0x3b240, 0x3b240,
2028 2028 0x3b248, 0x3b250,
2029 2029 0x3b25c, 0x3b264,
2030 2030 0x3b270, 0x3b2b8,
2031 2031 0x3b2c0, 0x3b2e4,
2032 2032 0x3b2f8, 0x3b338,
2033 2033 0x3b340, 0x3b340,
2034 2034 0x3b348, 0x3b350,
2035 2035 0x3b35c, 0x3b364,
2036 2036 0x3b370, 0x3b3b8,
2037 2037 0x3b3c0, 0x3b3e4,
2038 2038 0x3b3f8, 0x3b428,
2039 2039 0x3b430, 0x3b448,
2040 2040 0x3b460, 0x3b468,
2041 2041 0x3b470, 0x3b49c,
2042 2042 0x3b4f0, 0x3b528,
2043 2043 0x3b530, 0x3b548,
2044 2044 0x3b560, 0x3b568,
2045 2045 0x3b570, 0x3b59c,
2046 2046 0x3b5f0, 0x3b638,
2047 2047 0x3b640, 0x3b640,
2048 2048 0x3b648, 0x3b650,
2049 2049 0x3b65c, 0x3b664,
2050 2050 0x3b670, 0x3b6b8,
2051 2051 0x3b6c0, 0x3b6e4,
2052 2052 0x3b6f8, 0x3b738,
2053 2053 0x3b740, 0x3b740,
2054 2054 0x3b748, 0x3b750,
2055 2055 0x3b75c, 0x3b764,
2056 2056 0x3b770, 0x3b7b8,
2057 2057 0x3b7c0, 0x3b7e4,
2058 2058 0x3b7f8, 0x3b7fc,
2059 2059 0x3b814, 0x3b814,
2060 2060 0x3b82c, 0x3b82c,
2061 2061 0x3b880, 0x3b88c,
2062 2062 0x3b8e8, 0x3b8ec,
2063 2063 0x3b900, 0x3b928,
2064 2064 0x3b930, 0x3b948,
2065 2065 0x3b960, 0x3b968,
2066 2066 0x3b970, 0x3b99c,
2067 2067 0x3b9f0, 0x3ba38,
2068 2068 0x3ba40, 0x3ba40,
2069 2069 0x3ba48, 0x3ba50,
2070 2070 0x3ba5c, 0x3ba64,
2071 2071 0x3ba70, 0x3bab8,
2072 2072 0x3bac0, 0x3bae4,
2073 2073 0x3baf8, 0x3bb10,
2074 2074 0x3bb28, 0x3bb28,
2075 2075 0x3bb3c, 0x3bb50,
2076 2076 0x3bbf0, 0x3bc10,
2077 2077 0x3bc28, 0x3bc28,
2078 2078 0x3bc3c, 0x3bc50,
2079 2079 0x3bcf0, 0x3bcfc,
2080 2080 0x3c000, 0x3c030,
2081 2081 0x3c100, 0x3c144,
2082 2082 0x3c190, 0x3c1a0,
2083 2083 0x3c1a8, 0x3c1b8,
2084 2084 0x3c1c4, 0x3c1c8,
2085 2085 0x3c1d0, 0x3c1d0,
2086 2086 0x3c200, 0x3c318,
2087 2087 0x3c400, 0x3c4b4,
2088 2088 0x3c4c0, 0x3c52c,
2089 2089 0x3c540, 0x3c61c,
2090 2090 0x3c800, 0x3c828,
2091 2091 0x3c834, 0x3c834,
2092 2092 0x3c8c0, 0x3c908,
2093 2093 0x3c910, 0x3c9ac,
2094 2094 0x3ca00, 0x3ca14,
2095 2095 0x3ca1c, 0x3ca2c,
2096 2096 0x3ca44, 0x3ca50,
2097 2097 0x3ca74, 0x3ca74,
2098 2098 0x3ca7c, 0x3cafc,
2099 2099 0x3cb08, 0x3cc24,
2100 2100 0x3cd00, 0x3cd00,
2101 2101 0x3cd08, 0x3cd14,
2102 2102 0x3cd1c, 0x3cd20,
2103 2103 0x3cd3c, 0x3cd3c,
2104 2104 0x3cd48, 0x3cd50,
2105 2105 0x3d200, 0x3d20c,
2106 2106 0x3d220, 0x3d220,
2107 2107 0x3d240, 0x3d240,
2108 2108 0x3d600, 0x3d60c,
2109 2109 0x3da00, 0x3da1c,
2110 2110 0x3de00, 0x3de20,
2111 2111 0x3de38, 0x3de3c,
2112 2112 0x3de80, 0x3de80,
2113 2113 0x3de88, 0x3dea8,
2114 2114 0x3deb0, 0x3deb4,
2115 2115 0x3dec8, 0x3ded4,
2116 2116 0x3dfb8, 0x3e004,
2117 2117 0x3e200, 0x3e200,
2118 2118 0x3e208, 0x3e240,
2119 2119 0x3e248, 0x3e280,
2120 2120 0x3e288, 0x3e2c0,
2121 2121 0x3e2c8, 0x3e2fc,
2122 2122 0x3e600, 0x3e630,
2123 2123 0x3ea00, 0x3eabc,
2124 2124 0x3eb00, 0x3eb10,
2125 2125 0x3eb20, 0x3eb30,
2126 2126 0x3eb40, 0x3eb50,
2127 2127 0x3eb60, 0x3eb70,
2128 2128 0x3f000, 0x3f028,
2129 2129 0x3f030, 0x3f048,
2130 2130 0x3f060, 0x3f068,
2131 2131 0x3f070, 0x3f09c,
2132 2132 0x3f0f0, 0x3f128,
2133 2133 0x3f130, 0x3f148,
2134 2134 0x3f160, 0x3f168,
2135 2135 0x3f170, 0x3f19c,
2136 2136 0x3f1f0, 0x3f238,
2137 2137 0x3f240, 0x3f240,
2138 2138 0x3f248, 0x3f250,
2139 2139 0x3f25c, 0x3f264,
2140 2140 0x3f270, 0x3f2b8,
2141 2141 0x3f2c0, 0x3f2e4,
2142 2142 0x3f2f8, 0x3f338,
2143 2143 0x3f340, 0x3f340,
2144 2144 0x3f348, 0x3f350,
2145 2145 0x3f35c, 0x3f364,
2146 2146 0x3f370, 0x3f3b8,
2147 2147 0x3f3c0, 0x3f3e4,
2148 2148 0x3f3f8, 0x3f428,
2149 2149 0x3f430, 0x3f448,
2150 2150 0x3f460, 0x3f468,
2151 2151 0x3f470, 0x3f49c,
2152 2152 0x3f4f0, 0x3f528,
2153 2153 0x3f530, 0x3f548,
2154 2154 0x3f560, 0x3f568,
2155 2155 0x3f570, 0x3f59c,
2156 2156 0x3f5f0, 0x3f638,
2157 2157 0x3f640, 0x3f640,
2158 2158 0x3f648, 0x3f650,
2159 2159 0x3f65c, 0x3f664,
2160 2160 0x3f670, 0x3f6b8,
2161 2161 0x3f6c0, 0x3f6e4,
2162 2162 0x3f6f8, 0x3f738,
2163 2163 0x3f740, 0x3f740,
2164 2164 0x3f748, 0x3f750,
2165 2165 0x3f75c, 0x3f764,
2166 2166 0x3f770, 0x3f7b8,
2167 2167 0x3f7c0, 0x3f7e4,
2168 2168 0x3f7f8, 0x3f7fc,
2169 2169 0x3f814, 0x3f814,
2170 2170 0x3f82c, 0x3f82c,
2171 2171 0x3f880, 0x3f88c,
2172 2172 0x3f8e8, 0x3f8ec,
2173 2173 0x3f900, 0x3f928,
2174 2174 0x3f930, 0x3f948,
2175 2175 0x3f960, 0x3f968,
2176 2176 0x3f970, 0x3f99c,
2177 2177 0x3f9f0, 0x3fa38,
2178 2178 0x3fa40, 0x3fa40,
2179 2179 0x3fa48, 0x3fa50,
2180 2180 0x3fa5c, 0x3fa64,
2181 2181 0x3fa70, 0x3fab8,
2182 2182 0x3fac0, 0x3fae4,
2183 2183 0x3faf8, 0x3fb10,
2184 2184 0x3fb28, 0x3fb28,
2185 2185 0x3fb3c, 0x3fb50,
2186 2186 0x3fbf0, 0x3fc10,
2187 2187 0x3fc28, 0x3fc28,
2188 2188 0x3fc3c, 0x3fc50,
2189 2189 0x3fcf0, 0x3fcfc,
2190 2190 0x40000, 0x4000c,
2191 2191 0x40040, 0x40050,
2192 2192 0x40060, 0x40068,
2193 2193 0x4007c, 0x4008c,
2194 2194 0x40094, 0x400b0,
2195 2195 0x400c0, 0x40144,
2196 2196 0x40180, 0x4018c,
2197 2197 0x40200, 0x40254,
2198 2198 0x40260, 0x40264,
2199 2199 0x40270, 0x40288,
2200 2200 0x40290, 0x40298,
2201 2201 0x402ac, 0x402c8,
2202 2202 0x402d0, 0x402e0,
2203 2203 0x402f0, 0x402f0,
2204 2204 0x40300, 0x4033c,
2205 2205 0x403f8, 0x403fc,
2206 2206 0x41304, 0x413c4,
2207 2207 0x41400, 0x4140c,
2208 2208 0x41414, 0x4141c,
2209 2209 0x41480, 0x414d0,
2210 2210 0x44000, 0x44054,
2211 2211 0x4405c, 0x44078,
2212 2212 0x440c0, 0x44174,
2213 2213 0x44180, 0x441ac,
2214 2214 0x441b4, 0x441b8,
2215 2215 0x441c0, 0x44254,
2216 2216 0x4425c, 0x44278,
2217 2217 0x442c0, 0x44374,
2218 2218 0x44380, 0x443ac,
2219 2219 0x443b4, 0x443b8,
2220 2220 0x443c0, 0x44454,
2221 2221 0x4445c, 0x44478,
2222 2222 0x444c0, 0x44574,
2223 2223 0x44580, 0x445ac,
2224 2224 0x445b4, 0x445b8,
2225 2225 0x445c0, 0x44654,
2226 2226 0x4465c, 0x44678,
2227 2227 0x446c0, 0x44774,
2228 2228 0x44780, 0x447ac,
2229 2229 0x447b4, 0x447b8,
2230 2230 0x447c0, 0x44854,
2231 2231 0x4485c, 0x44878,
2232 2232 0x448c0, 0x44974,
2233 2233 0x44980, 0x449ac,
2234 2234 0x449b4, 0x449b8,
2235 2235 0x449c0, 0x449fc,
2236 2236 0x45000, 0x45004,
2237 2237 0x45010, 0x45030,
2238 2238 0x45040, 0x45060,
2239 2239 0x45068, 0x45068,
2240 2240 0x45080, 0x45084,
2241 2241 0x450a0, 0x450b0,
2242 2242 0x45200, 0x45204,
2243 2243 0x45210, 0x45230,
2244 2244 0x45240, 0x45260,
2245 2245 0x45268, 0x45268,
2246 2246 0x45280, 0x45284,
2247 2247 0x452a0, 0x452b0,
2248 2248 0x460c0, 0x460e4,
2249 2249 0x47000, 0x4703c,
2250 2250 0x47044, 0x4708c,
2251 2251 0x47200, 0x47250,
2252 2252 0x47400, 0x47408,
2253 2253 0x47414, 0x47420,
2254 2254 0x47600, 0x47618,
2255 2255 0x47800, 0x47814,
2256 2256 0x48000, 0x4800c,
2257 2257 0x48040, 0x48050,
2258 2258 0x48060, 0x48068,
2259 2259 0x4807c, 0x4808c,
2260 2260 0x48094, 0x480b0,
2261 2261 0x480c0, 0x48144,
2262 2262 0x48180, 0x4818c,
2263 2263 0x48200, 0x48254,
2264 2264 0x48260, 0x48264,
2265 2265 0x48270, 0x48288,
2266 2266 0x48290, 0x48298,
2267 2267 0x482ac, 0x482c8,
2268 2268 0x482d0, 0x482e0,
2269 2269 0x482f0, 0x482f0,
2270 2270 0x48300, 0x4833c,
2271 2271 0x483f8, 0x483fc,
2272 2272 0x49304, 0x493c4,
2273 2273 0x49400, 0x4940c,
2274 2274 0x49414, 0x4941c,
2275 2275 0x49480, 0x494d0,
2276 2276 0x4c000, 0x4c054,
2277 2277 0x4c05c, 0x4c078,
2278 2278 0x4c0c0, 0x4c174,
2279 2279 0x4c180, 0x4c1ac,
2280 2280 0x4c1b4, 0x4c1b8,
2281 2281 0x4c1c0, 0x4c254,
2282 2282 0x4c25c, 0x4c278,
2283 2283 0x4c2c0, 0x4c374,
2284 2284 0x4c380, 0x4c3ac,
2285 2285 0x4c3b4, 0x4c3b8,
2286 2286 0x4c3c0, 0x4c454,
2287 2287 0x4c45c, 0x4c478,
2288 2288 0x4c4c0, 0x4c574,
2289 2289 0x4c580, 0x4c5ac,
2290 2290 0x4c5b4, 0x4c5b8,
2291 2291 0x4c5c0, 0x4c654,
2292 2292 0x4c65c, 0x4c678,
2293 2293 0x4c6c0, 0x4c774,
2294 2294 0x4c780, 0x4c7ac,
2295 2295 0x4c7b4, 0x4c7b8,
2296 2296 0x4c7c0, 0x4c854,
2297 2297 0x4c85c, 0x4c878,
2298 2298 0x4c8c0, 0x4c974,
2299 2299 0x4c980, 0x4c9ac,
2300 2300 0x4c9b4, 0x4c9b8,
2301 2301 0x4c9c0, 0x4c9fc,
2302 2302 0x4d000, 0x4d004,
2303 2303 0x4d010, 0x4d030,
2304 2304 0x4d040, 0x4d060,
2305 2305 0x4d068, 0x4d068,
2306 2306 0x4d080, 0x4d084,
2307 2307 0x4d0a0, 0x4d0b0,
2308 2308 0x4d200, 0x4d204,
2309 2309 0x4d210, 0x4d230,
2310 2310 0x4d240, 0x4d260,
2311 2311 0x4d268, 0x4d268,
2312 2312 0x4d280, 0x4d284,
2313 2313 0x4d2a0, 0x4d2b0,
2314 2314 0x4e0c0, 0x4e0e4,
2315 2315 0x4f000, 0x4f03c,
2316 2316 0x4f044, 0x4f08c,
2317 2317 0x4f200, 0x4f250,
2318 2318 0x4f400, 0x4f408,
2319 2319 0x4f414, 0x4f420,
2320 2320 0x4f600, 0x4f618,
2321 2321 0x4f800, 0x4f814,
2322 2322 0x50000, 0x50084,
2323 2323 0x50090, 0x500cc,
2324 2324 0x50400, 0x50400,
2325 2325 0x50800, 0x50884,
2326 2326 0x50890, 0x508cc,
2327 2327 0x50c00, 0x50c00,
2328 2328 0x51000, 0x5101c,
2329 2329 0x51300, 0x51308,
2330 2330 };
2331 2331
2332 2332 static const unsigned int t6_reg_ranges[] = {
2333 2333 0x1008, 0x101c,
2334 2334 0x1024, 0x10a8,
2335 2335 0x10b4, 0x10f8,
2336 2336 0x1100, 0x1114,
2337 2337 0x111c, 0x112c,
2338 2338 0x1138, 0x113c,
2339 2339 0x1144, 0x114c,
2340 2340 0x1180, 0x1184,
2341 2341 0x1190, 0x1194,
2342 2342 0x11a0, 0x11a4,
2343 2343 0x11b0, 0x11b4,
2344 2344 0x11fc, 0x1274,
2345 2345 0x1280, 0x133c,
2346 2346 0x1800, 0x18fc,
2347 2347 0x3000, 0x302c,
2348 2348 0x3060, 0x30b0,
2349 2349 0x30b8, 0x30d8,
2350 2350 0x30e0, 0x30fc,
2351 2351 0x3140, 0x357c,
2352 2352 0x35a8, 0x35cc,
2353 2353 0x35ec, 0x35ec,
2354 2354 0x3600, 0x5624,
2355 2355 0x56cc, 0x56ec,
2356 2356 0x56f4, 0x5720,
2357 2357 0x5728, 0x575c,
2358 2358 0x580c, 0x5814,
2359 2359 0x5890, 0x589c,
2360 2360 0x58a4, 0x58ac,
2361 2361 0x58b8, 0x58bc,
2362 2362 0x5940, 0x595c,
2363 2363 0x5980, 0x598c,
2364 2364 0x59b0, 0x59c8,
2365 2365 0x59d0, 0x59dc,
2366 2366 0x59fc, 0x5a18,
2367 2367 0x5a60, 0x5a6c,
2368 2368 0x5a80, 0x5a8c,
2369 2369 0x5a94, 0x5a9c,
2370 2370 0x5b94, 0x5bfc,
2371 2371 0x5c10, 0x5e48,
2372 2372 0x5e50, 0x5e94,
2373 2373 0x5ea0, 0x5eb0,
2374 2374 0x5ec0, 0x5ec0,
2375 2375 0x5ec8, 0x5ed0,
2376 2376 0x5ee0, 0x5ee0,
2377 2377 0x5ef0, 0x5ef0,
2378 2378 0x5f00, 0x5f00,
2379 2379 0x6000, 0x6020,
2380 2380 0x6028, 0x6040,
2381 2381 0x6058, 0x609c,
2382 2382 0x60a8, 0x619c,
2383 2383 0x7700, 0x7798,
2384 2384 0x77c0, 0x7880,
2385 2385 0x78cc, 0x78fc,
2386 2386 0x7b00, 0x7b58,
2387 2387 0x7b60, 0x7b84,
2388 2388 0x7b8c, 0x7c54,
2389 2389 0x7d00, 0x7d38,
2390 2390 0x7d40, 0x7d84,
2391 2391 0x7d8c, 0x7ddc,
2392 2392 0x7de4, 0x7e04,
2393 2393 0x7e10, 0x7e1c,
2394 2394 0x7e24, 0x7e38,
2395 2395 0x7e40, 0x7e44,
2396 2396 0x7e4c, 0x7e78,
2397 2397 0x7e80, 0x7edc,
2398 2398 0x7ee8, 0x7efc,
2399 2399 0x8dc0, 0x8de4,
2400 2400 0x8df8, 0x8e04,
2401 2401 0x8e10, 0x8e84,
2402 2402 0x8ea0, 0x8f88,
2403 2403 0x8fb8, 0x9058,
2404 2404 0x9060, 0x9060,
2405 2405 0x9068, 0x90f8,
2406 2406 0x9100, 0x9124,
2407 2407 0x9400, 0x9470,
2408 2408 0x9600, 0x9600,
2409 2409 0x9608, 0x9638,
2410 2410 0x9640, 0x9704,
2411 2411 0x9710, 0x971c,
2412 2412 0x9800, 0x9808,
2413 2413 0x9820, 0x983c,
2414 2414 0x9850, 0x9864,
2415 2415 0x9c00, 0x9c6c,
2416 2416 0x9c80, 0x9cec,
2417 2417 0x9d00, 0x9d6c,
2418 2418 0x9d80, 0x9dec,
2419 2419 0x9e00, 0x9e6c,
2420 2420 0x9e80, 0x9eec,
2421 2421 0x9f00, 0x9f6c,
2422 2422 0x9f80, 0xa020,
2423 2423 0xd004, 0xd03c,
2424 2424 0xd100, 0xd118,
2425 2425 0xd200, 0xd214,
2426 2426 0xd220, 0xd234,
2427 2427 0xd240, 0xd254,
2428 2428 0xd260, 0xd274,
2429 2429 0xd280, 0xd294,
2430 2430 0xd2a0, 0xd2b4,
2431 2431 0xd2c0, 0xd2d4,
2432 2432 0xd2e0, 0xd2f4,
2433 2433 0xd300, 0xd31c,
2434 2434 0xdfc0, 0xdfe0,
2435 2435 0xe000, 0xf008,
2436 2436 0xf010, 0xf018,
2437 2437 0xf020, 0xf028,
2438 2438 0x11000, 0x11014,
2439 2439 0x11048, 0x1106c,
2440 2440 0x11074, 0x11088,
2441 2441 0x11098, 0x11120,
2442 2442 0x1112c, 0x1117c,
2443 2443 0x11190, 0x112e0,
2444 2444 0x11300, 0x1130c,
2445 2445 0x12000, 0x1206c,
2446 2446 0x19040, 0x1906c,
2447 2447 0x19078, 0x19080,
2448 2448 0x1908c, 0x190e8,
2449 2449 0x190f0, 0x190f8,
2450 2450 0x19100, 0x19110,
2451 2451 0x19120, 0x19124,
2452 2452 0x19150, 0x19194,
2453 2453 0x1919c, 0x191b0,
2454 2454 0x191d0, 0x191e8,
2455 2455 0x19238, 0x19290,
2456 2456 0x192a4, 0x192b0,
2457 2457 0x192bc, 0x192bc,
2458 2458 0x19348, 0x1934c,
2459 2459 0x193f8, 0x19418,
2460 2460 0x19420, 0x19428,
2461 2461 0x19430, 0x19444,
2462 2462 0x1944c, 0x1946c,
2463 2463 0x19474, 0x19474,
2464 2464 0x19490, 0x194cc,
2465 2465 0x194f0, 0x194f8,
2466 2466 0x19c00, 0x19c48,
2467 2467 0x19c50, 0x19c80,
2468 2468 0x19c94, 0x19c98,
2469 2469 0x19ca0, 0x19cbc,
2470 2470 0x19ce4, 0x19ce4,
2471 2471 0x19cf0, 0x19cf8,
2472 2472 0x19d00, 0x19d28,
2473 2473 0x19d50, 0x19d78,
2474 2474 0x19d94, 0x19d98,
2475 2475 0x19da0, 0x19dc8,
2476 2476 0x19df0, 0x19e10,
2477 2477 0x19e50, 0x19e6c,
2478 2478 0x19ea0, 0x19ebc,
2479 2479 0x19ec4, 0x19ef4,
2480 2480 0x19f04, 0x19f2c,
2481 2481 0x19f34, 0x19f34,
2482 2482 0x19f40, 0x19f50,
2483 2483 0x19f90, 0x19fac,
2484 2484 0x19fc4, 0x19fc8,
2485 2485 0x19fd0, 0x19fe4,
2486 2486 0x1a000, 0x1a004,
2487 2487 0x1a010, 0x1a06c,
2488 2488 0x1a0b0, 0x1a0e4,
2489 2489 0x1a0ec, 0x1a0f8,
2490 2490 0x1a100, 0x1a108,
2491 2491 0x1a114, 0x1a120,
2492 2492 0x1a128, 0x1a130,
2493 2493 0x1a138, 0x1a138,
2494 2494 0x1a190, 0x1a1c4,
2495 2495 0x1a1fc, 0x1a1fc,
2496 2496 0x1e008, 0x1e00c,
2497 2497 0x1e040, 0x1e044,
2498 2498 0x1e04c, 0x1e04c,
2499 2499 0x1e284, 0x1e290,
2500 2500 0x1e2c0, 0x1e2c0,
2501 2501 0x1e2e0, 0x1e2e0,
2502 2502 0x1e300, 0x1e384,
2503 2503 0x1e3c0, 0x1e3c8,
2504 2504 0x1e408, 0x1e40c,
2505 2505 0x1e440, 0x1e444,
2506 2506 0x1e44c, 0x1e44c,
2507 2507 0x1e684, 0x1e690,
2508 2508 0x1e6c0, 0x1e6c0,
2509 2509 0x1e6e0, 0x1e6e0,
2510 2510 0x1e700, 0x1e784,
2511 2511 0x1e7c0, 0x1e7c8,
2512 2512 0x1e808, 0x1e80c,
2513 2513 0x1e840, 0x1e844,
2514 2514 0x1e84c, 0x1e84c,
2515 2515 0x1ea84, 0x1ea90,
2516 2516 0x1eac0, 0x1eac0,
2517 2517 0x1eae0, 0x1eae0,
2518 2518 0x1eb00, 0x1eb84,
2519 2519 0x1ebc0, 0x1ebc8,
2520 2520 0x1ec08, 0x1ec0c,
2521 2521 0x1ec40, 0x1ec44,
2522 2522 0x1ec4c, 0x1ec4c,
2523 2523 0x1ee84, 0x1ee90,
2524 2524 0x1eec0, 0x1eec0,
2525 2525 0x1eee0, 0x1eee0,
2526 2526 0x1ef00, 0x1ef84,
2527 2527 0x1efc0, 0x1efc8,
2528 2528 0x1f008, 0x1f00c,
2529 2529 0x1f040, 0x1f044,
2530 2530 0x1f04c, 0x1f04c,
2531 2531 0x1f284, 0x1f290,
2532 2532 0x1f2c0, 0x1f2c0,
2533 2533 0x1f2e0, 0x1f2e0,
2534 2534 0x1f300, 0x1f384,
2535 2535 0x1f3c0, 0x1f3c8,
2536 2536 0x1f408, 0x1f40c,
2537 2537 0x1f440, 0x1f444,
2538 2538 0x1f44c, 0x1f44c,
2539 2539 0x1f684, 0x1f690,
2540 2540 0x1f6c0, 0x1f6c0,
2541 2541 0x1f6e0, 0x1f6e0,
2542 2542 0x1f700, 0x1f784,
2543 2543 0x1f7c0, 0x1f7c8,
2544 2544 0x1f808, 0x1f80c,
2545 2545 0x1f840, 0x1f844,
2546 2546 0x1f84c, 0x1f84c,
2547 2547 0x1fa84, 0x1fa90,
2548 2548 0x1fac0, 0x1fac0,
2549 2549 0x1fae0, 0x1fae0,
2550 2550 0x1fb00, 0x1fb84,
2551 2551 0x1fbc0, 0x1fbc8,
2552 2552 0x1fc08, 0x1fc0c,
2553 2553 0x1fc40, 0x1fc44,
2554 2554 0x1fc4c, 0x1fc4c,
2555 2555 0x1fe84, 0x1fe90,
2556 2556 0x1fec0, 0x1fec0,
2557 2557 0x1fee0, 0x1fee0,
2558 2558 0x1ff00, 0x1ff84,
2559 2559 0x1ffc0, 0x1ffc8,
2560 2560 0x30000, 0x30030,
2561 2561 0x30100, 0x30168,
2562 2562 0x30190, 0x301a0,
2563 2563 0x301a8, 0x301b8,
2564 2564 0x301c4, 0x301c8,
2565 2565 0x301d0, 0x301d0,
2566 2566 0x30200, 0x30320,
2567 2567 0x30400, 0x304b4,
2568 2568 0x304c0, 0x3052c,
2569 2569 0x30540, 0x3061c,
2570 2570 0x30800, 0x308a0,
2571 2571 0x308c0, 0x30908,
2572 2572 0x30910, 0x309b8,
2573 2573 0x30a00, 0x30a04,
2574 2574 0x30a0c, 0x30a14,
2575 2575 0x30a1c, 0x30a2c,
2576 2576 0x30a44, 0x30a50,
2577 2577 0x30a74, 0x30a74,
2578 2578 0x30a7c, 0x30afc,
2579 2579 0x30b08, 0x30c24,
2580 2580 0x30d00, 0x30d14,
2581 2581 0x30d1c, 0x30d3c,
2582 2582 0x30d44, 0x30d4c,
2583 2583 0x30d54, 0x30d74,
2584 2584 0x30d7c, 0x30d7c,
2585 2585 0x30de0, 0x30de0,
2586 2586 0x30e00, 0x30ed4,
2587 2587 0x30f00, 0x30fa4,
2588 2588 0x30fc0, 0x30fc4,
2589 2589 0x31000, 0x31004,
2590 2590 0x31080, 0x310fc,
2591 2591 0x31208, 0x31220,
2592 2592 0x3123c, 0x31254,
2593 2593 0x31300, 0x31300,
2594 2594 0x31308, 0x3131c,
2595 2595 0x31338, 0x3133c,
2596 2596 0x31380, 0x31380,
2597 2597 0x31388, 0x313a8,
2598 2598 0x313b4, 0x313b4,
2599 2599 0x31400, 0x31420,
2600 2600 0x31438, 0x3143c,
2601 2601 0x31480, 0x31480,
2602 2602 0x314a8, 0x314a8,
2603 2603 0x314b0, 0x314b4,
2604 2604 0x314c8, 0x314d4,
2605 2605 0x31a40, 0x31a4c,
2606 2606 0x31af0, 0x31b20,
2607 2607 0x31b38, 0x31b3c,
2608 2608 0x31b80, 0x31b80,
2609 2609 0x31ba8, 0x31ba8,
2610 2610 0x31bb0, 0x31bb4,
2611 2611 0x31bc8, 0x31bd4,
2612 2612 0x32140, 0x3218c,
2613 2613 0x321f0, 0x321f4,
2614 2614 0x32200, 0x32200,
2615 2615 0x32218, 0x32218,
2616 2616 0x32400, 0x32400,
2617 2617 0x32408, 0x3241c,
2618 2618 0x32618, 0x32620,
2619 2619 0x32664, 0x32664,
2620 2620 0x326a8, 0x326a8,
2621 2621 0x326ec, 0x326ec,
2622 2622 0x32a00, 0x32abc,
2623 2623 0x32b00, 0x32b18,
2624 2624 0x32b20, 0x32b38,
2625 2625 0x32b40, 0x32b58,
2626 2626 0x32b60, 0x32b78,
2627 2627 0x32c00, 0x32c00,
2628 2628 0x32c08, 0x32c3c,
2629 2629 0x33000, 0x3302c,
2630 2630 0x33034, 0x33050,
2631 2631 0x33058, 0x33058,
2632 2632 0x33060, 0x3308c,
2633 2633 0x3309c, 0x330ac,
2634 2634 0x330c0, 0x330c0,
2635 2635 0x330c8, 0x330d0,
2636 2636 0x330d8, 0x330e0,
2637 2637 0x330ec, 0x3312c,
2638 2638 0x33134, 0x33150,
2639 2639 0x33158, 0x33158,
2640 2640 0x33160, 0x3318c,
2641 2641 0x3319c, 0x331ac,
2642 2642 0x331c0, 0x331c0,
2643 2643 0x331c8, 0x331d0,
2644 2644 0x331d8, 0x331e0,
2645 2645 0x331ec, 0x33290,
2646 2646 0x33298, 0x332c4,
2647 2647 0x332e4, 0x33390,
2648 2648 0x33398, 0x333c4,
2649 2649 0x333e4, 0x3342c,
2650 2650 0x33434, 0x33450,
2651 2651 0x33458, 0x33458,
2652 2652 0x33460, 0x3348c,
2653 2653 0x3349c, 0x334ac,
2654 2654 0x334c0, 0x334c0,
2655 2655 0x334c8, 0x334d0,
2656 2656 0x334d8, 0x334e0,
2657 2657 0x334ec, 0x3352c,
2658 2658 0x33534, 0x33550,
2659 2659 0x33558, 0x33558,
2660 2660 0x33560, 0x3358c,
2661 2661 0x3359c, 0x335ac,
2662 2662 0x335c0, 0x335c0,
2663 2663 0x335c8, 0x335d0,
2664 2664 0x335d8, 0x335e0,
2665 2665 0x335ec, 0x33690,
2666 2666 0x33698, 0x336c4,
2667 2667 0x336e4, 0x33790,
2668 2668 0x33798, 0x337c4,
2669 2669 0x337e4, 0x337fc,
2670 2670 0x33814, 0x33814,
2671 2671 0x33854, 0x33868,
2672 2672 0x33880, 0x3388c,
2673 2673 0x338c0, 0x338d0,
2674 2674 0x338e8, 0x338ec,
2675 2675 0x33900, 0x3392c,
2676 2676 0x33934, 0x33950,
2677 2677 0x33958, 0x33958,
2678 2678 0x33960, 0x3398c,
2679 2679 0x3399c, 0x339ac,
2680 2680 0x339c0, 0x339c0,
2681 2681 0x339c8, 0x339d0,
2682 2682 0x339d8, 0x339e0,
2683 2683 0x339ec, 0x33a90,
2684 2684 0x33a98, 0x33ac4,
2685 2685 0x33ae4, 0x33b10,
2686 2686 0x33b24, 0x33b28,
2687 2687 0x33b38, 0x33b50,
2688 2688 0x33bf0, 0x33c10,
2689 2689 0x33c24, 0x33c28,
2690 2690 0x33c38, 0x33c50,
2691 2691 0x33cf0, 0x33cfc,
2692 2692 0x34000, 0x34030,
2693 2693 0x34100, 0x34168,
2694 2694 0x34190, 0x341a0,
2695 2695 0x341a8, 0x341b8,
2696 2696 0x341c4, 0x341c8,
2697 2697 0x341d0, 0x341d0,
2698 2698 0x34200, 0x34320,
2699 2699 0x34400, 0x344b4,
2700 2700 0x344c0, 0x3452c,
2701 2701 0x34540, 0x3461c,
2702 2702 0x34800, 0x348a0,
2703 2703 0x348c0, 0x34908,
2704 2704 0x34910, 0x349b8,
2705 2705 0x34a00, 0x34a04,
2706 2706 0x34a0c, 0x34a14,
2707 2707 0x34a1c, 0x34a2c,
2708 2708 0x34a44, 0x34a50,
2709 2709 0x34a74, 0x34a74,
2710 2710 0x34a7c, 0x34afc,
2711 2711 0x34b08, 0x34c24,
2712 2712 0x34d00, 0x34d14,
2713 2713 0x34d1c, 0x34d3c,
2714 2714 0x34d44, 0x34d4c,
2715 2715 0x34d54, 0x34d74,
2716 2716 0x34d7c, 0x34d7c,
2717 2717 0x34de0, 0x34de0,
2718 2718 0x34e00, 0x34ed4,
2719 2719 0x34f00, 0x34fa4,
2720 2720 0x34fc0, 0x34fc4,
2721 2721 0x35000, 0x35004,
2722 2722 0x35080, 0x350fc,
2723 2723 0x35208, 0x35220,
2724 2724 0x3523c, 0x35254,
2725 2725 0x35300, 0x35300,
2726 2726 0x35308, 0x3531c,
2727 2727 0x35338, 0x3533c,
2728 2728 0x35380, 0x35380,
2729 2729 0x35388, 0x353a8,
2730 2730 0x353b4, 0x353b4,
2731 2731 0x35400, 0x35420,
2732 2732 0x35438, 0x3543c,
2733 2733 0x35480, 0x35480,
2734 2734 0x354a8, 0x354a8,
2735 2735 0x354b0, 0x354b4,
2736 2736 0x354c8, 0x354d4,
2737 2737 0x35a40, 0x35a4c,
2738 2738 0x35af0, 0x35b20,
2739 2739 0x35b38, 0x35b3c,
2740 2740 0x35b80, 0x35b80,
2741 2741 0x35ba8, 0x35ba8,
2742 2742 0x35bb0, 0x35bb4,
2743 2743 0x35bc8, 0x35bd4,
2744 2744 0x36140, 0x3618c,
2745 2745 0x361f0, 0x361f4,
2746 2746 0x36200, 0x36200,
2747 2747 0x36218, 0x36218,
2748 2748 0x36400, 0x36400,
2749 2749 0x36408, 0x3641c,
2750 2750 0x36618, 0x36620,
2751 2751 0x36664, 0x36664,
2752 2752 0x366a8, 0x366a8,
2753 2753 0x366ec, 0x366ec,
2754 2754 0x36a00, 0x36abc,
2755 2755 0x36b00, 0x36b18,
2756 2756 0x36b20, 0x36b38,
2757 2757 0x36b40, 0x36b58,
2758 2758 0x36b60, 0x36b78,
2759 2759 0x36c00, 0x36c00,
2760 2760 0x36c08, 0x36c3c,
2761 2761 0x37000, 0x3702c,
2762 2762 0x37034, 0x37050,
2763 2763 0x37058, 0x37058,
2764 2764 0x37060, 0x3708c,
2765 2765 0x3709c, 0x370ac,
2766 2766 0x370c0, 0x370c0,
2767 2767 0x370c8, 0x370d0,
2768 2768 0x370d8, 0x370e0,
2769 2769 0x370ec, 0x3712c,
2770 2770 0x37134, 0x37150,
2771 2771 0x37158, 0x37158,
2772 2772 0x37160, 0x3718c,
2773 2773 0x3719c, 0x371ac,
2774 2774 0x371c0, 0x371c0,
2775 2775 0x371c8, 0x371d0,
2776 2776 0x371d8, 0x371e0,
2777 2777 0x371ec, 0x37290,
2778 2778 0x37298, 0x372c4,
2779 2779 0x372e4, 0x37390,
2780 2780 0x37398, 0x373c4,
2781 2781 0x373e4, 0x3742c,
2782 2782 0x37434, 0x37450,
2783 2783 0x37458, 0x37458,
2784 2784 0x37460, 0x3748c,
2785 2785 0x3749c, 0x374ac,
2786 2786 0x374c0, 0x374c0,
2787 2787 0x374c8, 0x374d0,
2788 2788 0x374d8, 0x374e0,
2789 2789 0x374ec, 0x3752c,
2790 2790 0x37534, 0x37550,
2791 2791 0x37558, 0x37558,
2792 2792 0x37560, 0x3758c,
2793 2793 0x3759c, 0x375ac,
2794 2794 0x375c0, 0x375c0,
2795 2795 0x375c8, 0x375d0,
2796 2796 0x375d8, 0x375e0,
2797 2797 0x375ec, 0x37690,
2798 2798 0x37698, 0x376c4,
2799 2799 0x376e4, 0x37790,
2800 2800 0x37798, 0x377c4,
2801 2801 0x377e4, 0x377fc,
2802 2802 0x37814, 0x37814,
2803 2803 0x37854, 0x37868,
2804 2804 0x37880, 0x3788c,
2805 2805 0x378c0, 0x378d0,
2806 2806 0x378e8, 0x378ec,
2807 2807 0x37900, 0x3792c,
2808 2808 0x37934, 0x37950,
2809 2809 0x37958, 0x37958,
2810 2810 0x37960, 0x3798c,
2811 2811 0x3799c, 0x379ac,
2812 2812 0x379c0, 0x379c0,
2813 2813 0x379c8, 0x379d0,
2814 2814 0x379d8, 0x379e0,
2815 2815 0x379ec, 0x37a90,
2816 2816 0x37a98, 0x37ac4,
2817 2817 0x37ae4, 0x37b10,
2818 2818 0x37b24, 0x37b28,
2819 2819 0x37b38, 0x37b50,
2820 2820 0x37bf0, 0x37c10,
2821 2821 0x37c24, 0x37c28,
2822 2822 0x37c38, 0x37c50,
2823 2823 0x37cf0, 0x37cfc,
2824 2824 0x40040, 0x40040,
2825 2825 0x40080, 0x40084,
2826 2826 0x40100, 0x40100,
2827 2827 0x40140, 0x401bc,
2828 2828 0x40200, 0x40214,
2829 2829 0x40228, 0x40228,
2830 2830 0x40240, 0x40258,
2831 2831 0x40280, 0x40280,
2832 2832 0x40304, 0x40304,
2833 2833 0x40330, 0x4033c,
2834 2834 0x41304, 0x413c8,
2835 2835 0x413d0, 0x413dc,
2836 2836 0x413f0, 0x413f0,
2837 2837 0x41400, 0x4140c,
2838 2838 0x41414, 0x4141c,
2839 2839 0x41480, 0x414d0,
2840 2840 0x44000, 0x4407c,
2841 2841 0x440c0, 0x441ac,
2842 2842 0x441b4, 0x4427c,
2843 2843 0x442c0, 0x443ac,
2844 2844 0x443b4, 0x4447c,
2845 2845 0x444c0, 0x445ac,
2846 2846 0x445b4, 0x4467c,
2847 2847 0x446c0, 0x447ac,
2848 2848 0x447b4, 0x4487c,
2849 2849 0x448c0, 0x449ac,
2850 2850 0x449b4, 0x44a7c,
2851 2851 0x44ac0, 0x44bac,
2852 2852 0x44bb4, 0x44c7c,
2853 2853 0x44cc0, 0x44dac,
2854 2854 0x44db4, 0x44e7c,
2855 2855 0x44ec0, 0x44fac,
2856 2856 0x44fb4, 0x4507c,
2857 2857 0x450c0, 0x451ac,
2858 2858 0x451b4, 0x451fc,
2859 2859 0x45800, 0x45804,
2860 2860 0x45810, 0x45830,
2861 2861 0x45840, 0x45860,
2862 2862 0x45868, 0x45868,
2863 2863 0x45880, 0x45884,
2864 2864 0x458a0, 0x458b0,
2865 2865 0x45a00, 0x45a04,
2866 2866 0x45a10, 0x45a30,
2867 2867 0x45a40, 0x45a60,
2868 2868 0x45a68, 0x45a68,
2869 2869 0x45a80, 0x45a84,
2870 2870 0x45aa0, 0x45ab0,
2871 2871 0x460c0, 0x460e4,
2872 2872 0x47000, 0x4703c,
2873 2873 0x47044, 0x4708c,
2874 2874 0x47200, 0x47250,
2875 2875 0x47400, 0x47408,
2876 2876 0x47414, 0x47420,
2877 2877 0x47600, 0x47618,
2878 2878 0x47800, 0x47814,
2879 2879 0x47820, 0x4782c,
2880 2880 0x50000, 0x50084,
2881 2881 0x50090, 0x500cc,
2882 2882 0x50300, 0x50384,
2883 2883 0x50400, 0x50400,
2884 2884 0x50800, 0x50884,
2885 2885 0x50890, 0x508cc,
2886 2886 0x50b00, 0x50b84,
2887 2887 0x50c00, 0x50c00,
2888 2888 0x51000, 0x51020,
2889 2889 0x51028, 0x510b0,
2890 2890 0x51300, 0x51324,
2891 2891 };
2892 2892
2893 2893 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2894 2894 const unsigned int *reg_ranges;
2895 2895 int reg_ranges_size, range;
2896 2896 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2897 2897
2898 2898 /* Select the right set of register ranges to dump depending on the
2899 2899 * adapter chip type.
2900 2900 */
2901 2901 switch (chip_version) {
2902 2902 case CHELSIO_T4:
2903 2903 reg_ranges = t4_reg_ranges;
2904 2904 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2905 2905 break;
2906 2906
2907 2907 case CHELSIO_T5:
2908 2908 reg_ranges = t5_reg_ranges;
2909 2909 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2910 2910 break;
2911 2911
2912 2912 case CHELSIO_T6:
2913 2913 reg_ranges = t6_reg_ranges;
2914 2914 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2915 2915 break;
2916 2916
2917 2917 default:
2918 2918 CH_ERR(adap,
2919 2919 "Unsupported chip version %d\n", chip_version);
2920 2920 return;
2921 2921 }
2922 2922
2923 2923 /* Clear the register buffer and insert the appropriate register
2924 2924 * values selected by the above register ranges.
2925 2925 */
2926 2926 memset(buf, 0, buf_size);
2927 2927 for (range = 0; range < reg_ranges_size; range += 2) {
2928 2928 unsigned int reg = reg_ranges[range];
2929 2929 unsigned int last_reg = reg_ranges[range + 1];
2930 2930 u32 *bufp = (u32 *)((char *)buf + reg);
2931 2931
2932 2932 /* Iterate across the register range filling in the register
2933 2933 * buffer but don't write past the end of the register buffer.
2934 2934 */
2935 2935 while (reg <= last_reg && bufp < buf_end) {
2936 2936 *bufp++ = t4_read_reg(adap, reg);
2937 2937 reg += sizeof(u32);
2938 2938 }
2939 2939 }
2940 2940 }
2941 2941
2942 2942 /*
2943 2943 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2944 2944 */
2945 2945 #define EEPROM_DELAY 10 // 10us per poll spin
2946 2946 #define EEPROM_MAX_POLL 5000 // x 5000 == 50ms
2947 2947
2948 2948 #define EEPROM_STAT_ADDR 0x7bfc
2949 2949 #define VPD_SIZE 0x800
2950 2950 #define VPD_BASE 0x400
2951 2951 #define VPD_BASE_OLD 0
2952 2952 #define VPD_LEN 1024
2953 2953 #define VPD_INFO_FLD_HDR_SIZE 3
2954 2954 #define CHELSIO_VPD_UNIQUE_ID 0x82
2955 2955
2956 2956 /*
2957 2957 * Small utility function to wait till any outstanding VPD Access is complete.
2958 2958 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2959 2959 * VPD Access in flight. This allows us to handle the problem of having a
2960 2960 * previous VPD Access time out and prevent an attempt to inject a new VPD
2961 2961 * Request before any in-flight VPD reguest has completed.
2962 2962 */
2963 2963 static int t4_seeprom_wait(struct adapter *adapter)
2964 2964 {
2965 2965 unsigned int base = adapter->params.pci.vpd_cap_addr;
2966 2966 int max_poll;
2967 2967
2968 2968 /*
2969 2969 * If no VPD Access is in flight, we can just return success right
2970 2970 * away.
2971 2971 */
2972 2972 if (!adapter->vpd_busy)
2973 2973 return 0;
2974 2974
2975 2975 /*
2976 2976 * Poll the VPD Capability Address/Flag register waiting for it
2977 2977 * to indicate that the operation is complete.
2978 2978 */
2979 2979 max_poll = EEPROM_MAX_POLL;
2980 2980 do {
2981 2981 u16 val;
2982 2982
2983 2983 udelay(EEPROM_DELAY);
2984 2984 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2985 2985
2986 2986 /*
2987 2987 * If the operation is complete, mark the VPD as no longer
2988 2988 * busy and return success.
2989 2989 */
2990 2990 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2991 2991 adapter->vpd_busy = 0;
2992 2992 return 0;
2993 2993 }
2994 2994 } while (--max_poll);
2995 2995
2996 2996 /*
2997 2997 * Failure! Note that we leave the VPD Busy status set in order to
2998 2998 * avoid pushing a new VPD Access request into the VPD Capability till
2999 2999 * the current operation eventually succeeds. It's a bug to issue a
3000 3000 * new request when an existing request is in flight and will result
3001 3001 * in corrupt hardware state.
3002 3002 */
3003 3003 return -ETIMEDOUT;
3004 3004 }
3005 3005
3006 3006 /**
3007 3007 * t4_seeprom_read - read a serial EEPROM location
3008 3008 * @adapter: adapter to read
3009 3009 * @addr: EEPROM virtual address
3010 3010 * @data: where to store the read data
3011 3011 *
3012 3012 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
3013 3013 * VPD capability. Note that this function must be called with a virtual
3014 3014 * address.
3015 3015 */
3016 3016 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3017 3017 {
3018 3018 unsigned int base = adapter->params.pci.vpd_cap_addr;
3019 3019 int ret;
3020 3020
3021 3021 /*
3022 3022 * VPD Accesses must alway be 4-byte aligned!
3023 3023 */
3024 3024 if (addr >= EEPROMVSIZE || (addr & 3))
3025 3025 return -EINVAL;
3026 3026
3027 3027 /*
3028 3028 * Wait for any previous operation which may still be in flight to
3029 3029 * complete.
3030 3030 */
3031 3031 ret = t4_seeprom_wait(adapter);
3032 3032 if (ret) {
3033 3033 CH_ERR(adapter, "VPD still busy from previous operation\n");
3034 3034 return ret;
3035 3035 }
3036 3036
3037 3037 /*
3038 3038 * Issue our new VPD Read request, mark the VPD as being busy and wait
3039 3039 * for our request to complete. If it doesn't complete, note the
3040 3040 * error and return it to our caller. Note that we do not reset the
3041 3041 * VPD Busy status!
3042 3042 */
3043 3043 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3044 3044 adapter->vpd_busy = 1;
3045 3045 adapter->vpd_flag = PCI_VPD_ADDR_F;
3046 3046 ret = t4_seeprom_wait(adapter);
3047 3047 if (ret) {
3048 3048 CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3049 3049 return ret;
3050 3050 }
3051 3051
3052 3052 /*
3053 3053 * Grab the returned data, swizzle it into our endianess and
3054 3054 * return success.
3055 3055 */
3056 3056 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3057 3057 *data = le32_to_cpu(*data);
3058 3058 return 0;
3059 3059 }
3060 3060
3061 3061 /**
3062 3062 * t4_seeprom_write - write a serial EEPROM location
3063 3063 * @adapter: adapter to write
3064 3064 * @addr: virtual EEPROM address
3065 3065 * @data: value to write
3066 3066 *
3067 3067 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
3068 3068 * VPD capability. Note that this function must be called with a virtual
3069 3069 * address.
3070 3070 */
3071 3071 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3072 3072 {
3073 3073 unsigned int base = adapter->params.pci.vpd_cap_addr;
3074 3074 int ret;
3075 3075 u32 stats_reg;
3076 3076 int max_poll;
3077 3077
3078 3078 /*
3079 3079 * VPD Accesses must alway be 4-byte aligned!
3080 3080 */
3081 3081 if (addr >= EEPROMVSIZE || (addr & 3))
3082 3082 return -EINVAL;
3083 3083
3084 3084 /*
3085 3085 * Wait for any previous operation which may still be in flight to
3086 3086 * complete.
3087 3087 */
3088 3088 ret = t4_seeprom_wait(adapter);
3089 3089 if (ret) {
3090 3090 CH_ERR(adapter, "VPD still busy from previous operation\n");
3091 3091 return ret;
3092 3092 }
3093 3093
3094 3094 /*
3095 3095 * Issue our new VPD Read request, mark the VPD as being busy and wait
3096 3096 * for our request to complete. If it doesn't complete, note the
3097 3097 * error and return it to our caller. Note that we do not reset the
3098 3098 * VPD Busy status!
3099 3099 */
3100 3100 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3101 3101 cpu_to_le32(data));
3102 3102 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3103 3103 (u16)addr | PCI_VPD_ADDR_F);
3104 3104 adapter->vpd_busy = 1;
3105 3105 adapter->vpd_flag = 0;
3106 3106 ret = t4_seeprom_wait(adapter);
3107 3107 if (ret) {
3108 3108 CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3109 3109 return ret;
3110 3110 }
3111 3111
3112 3112 /*
3113 3113 * Reset PCI_VPD_DATA register after a transaction and wait for our
3114 3114 * request to complete. If it doesn't complete, return error.
3115 3115 */
3116 3116 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3117 3117 max_poll = EEPROM_MAX_POLL;
3118 3118 do {
3119 3119 udelay(EEPROM_DELAY);
3120 3120 ret = t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3121 3121 if (!ret && (stats_reg & 0x1))
3122 3122 break;
3123 3123 } while (--max_poll);
3124 3124 if (!max_poll)
3125 3125 return -ETIMEDOUT;
3126 3126
3127 3127 /* Return success! */
3128 3128 return 0;
3129 3129 }
3130 3130
3131 3131 /**
3132 3132 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
3133 3133 * @phys_addr: the physical EEPROM address
3134 3134 * @fn: the PCI function number
3135 3135 * @sz: size of function-specific area
3136 3136 *
3137 3137 * Translate a physical EEPROM address to virtual. The first 1K is
3138 3138 * accessed through virtual addresses starting at 31K, the rest is
3139 3139 * accessed through virtual addresses starting at 0.
3140 3140 *
3141 3141 * The mapping is as follows:
3142 3142 * [0..1K) -> [31K..32K)
3143 3143 * [1K..1K+A) -> [ES-A..ES)
3144 3144 * [1K+A..ES) -> [0..ES-A-1K)
3145 3145 *
3146 3146 * where A = @fn * @sz, and ES = EEPROM size.
3147 3147 */
3148 3148 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3149 3149 {
3150 3150 fn *= sz;
3151 3151 if (phys_addr < 1024)
3152 3152 return phys_addr + (31 << 10);
3153 3153 if (phys_addr < 1024 + fn)
3154 3154 return EEPROMSIZE - fn + phys_addr - 1024;
3155 3155 if (phys_addr < EEPROMSIZE)
3156 3156 return phys_addr - 1024 - fn;
3157 3157 return -EINVAL;
3158 3158 }
3159 3159
3160 3160 /**
3161 3161 * t4_seeprom_wp - enable/disable EEPROM write protection
3162 3162 * @adapter: the adapter
3163 3163 * @enable: whether to enable or disable write protection
3164 3164 *
3165 3165 * Enables or disables write protection on the serial EEPROM.
3166 3166 */
3167 3167 int t4_seeprom_wp(struct adapter *adapter, int enable)
3168 3168 {
3169 3169 return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3170 3170 }
3171 3171
3172 3172 /**
3173 3173 * get_vpd_keyword_val - Locates an information field keyword in the VPD
3174 3174 * @v: Pointer to buffered vpd data structure
3175 3175 * @kw: The keyword to search for
3176 3176 *
3177 3177 * Returns the value of the information field keyword or
3178 3178 * -ENOENT otherwise.
3179 3179 */
3180 3180 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3181 3181 {
3182 3182 int i;
3183 3183 unsigned int offset , len;
3184 3184 const u8 *buf = (const u8 *)v;
3185 3185 const u8 *vpdr_len = &v->vpdr_len[0];
3186 3186 offset = sizeof(struct t4_vpd_hdr);
3187 3187 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3188 3188
3189 3189 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3190 3190 return -ENOENT;
3191 3191 }
3192 3192
3193 3193 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3194 3194 if(memcmp(buf + i , kw , 2) == 0){
3195 3195 i += VPD_INFO_FLD_HDR_SIZE;
3196 3196 return i;
3197 3197 }
3198 3198
3199 3199 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3200 3200 }
3201 3201
3202 3202 return -ENOENT;
3203 3203 }
3204 3204
3205 3205 /*
3206 3206 * str_strip
3207 3207 * Removes trailing whitespaces from string "s"
3208 3208 * Based on strstrip() implementation in string.c
3209 3209 */
3210 3210 static void str_strip(char *s)
3211 3211 {
3212 3212 size_t size;
3213 3213 char *end;
3214 3214
3215 3215 size = strlen(s);
3216 3216 if (!size)
3217 3217 return;
3218 3218
3219 3219 end = s + size - 1;
3220 3220 while (end >= s && isspace(*end))
3221 3221 end--;
3222 3222 *(end + 1) = '\0';
3223 3223 }
3224 3224
3225 3225 /**
3226 3226 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3227 3227 * @adapter: adapter to read
3228 3228 * @p: where to store the parameters
3229 3229 *
3230 3230 * Reads card parameters stored in VPD EEPROM.
3231 3231 */
3232 3232 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3233 3233 {
3234 3234 int i, ret = 0, addr;
3235 3235 int ec, sn, pn, na;
3236 3236 u8 *vpd, csum;
3237 3237 const struct t4_vpd_hdr *v;
3238 3238
3239 3239 vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3240 3240 if (!vpd)
3241 3241 return -ENOMEM;
3242 3242
3243 3243 /* We have two VPD data structures stored in the adapter VPD area.
3244 3244 * By default, Linux calculates the size of the VPD area by traversing
3245 3245 * the first VPD area at offset 0x0, so we need to tell the OS what
3246 3246 * our real VPD size is.
3247 3247 */
3248 3248 ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3249 3249 if (ret < 0)
3250 3250 goto out;
3251 3251
3252 3252 /* Card information normally starts at VPD_BASE but early cards had
3253 3253 * it at 0.
3254 3254 */
3255 3255 ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3256 3256 if (ret)
3257 3257 goto out;
3258 3258
3259 3259 /* The VPD shall have a unique identifier specified by the PCI SIG.
3260 3260 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3261 3261 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3262 3262 * is expected to automatically put this entry at the
3263 3263 * beginning of the VPD.
3264 3264 */
3265 3265 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3266 3266
3267 3267 for (i = 0; i < VPD_LEN; i += 4) {
3268 3268 ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3269 3269 if (ret)
3270 3270 goto out;
3271 3271 }
3272 3272 v = (const struct t4_vpd_hdr *)vpd;
3273 3273
3274 3274 #define FIND_VPD_KW(var,name) do { \
3275 3275 var = get_vpd_keyword_val(v , name); \
3276 3276 if (var < 0) { \
3277 3277 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3278 3278 ret = -EINVAL; \
3279 3279 goto out; \
3280 3280 } \
3281 3281 } while (0)
3282 3282
3283 3283 FIND_VPD_KW(i, "RV");
3284 3284 for (csum = 0; i >= 0; i--)
3285 3285 csum += vpd[i];
3286 3286
3287 3287 if (csum) {
3288 3288 CH_ERR(adapter,
3289 3289 "corrupted VPD EEPROM, actual csum %u\n", csum);
3290 3290 ret = -EINVAL;
3291 3291 goto out;
3292 3292 }
3293 3293
3294 3294 FIND_VPD_KW(ec, "EC");
3295 3295 FIND_VPD_KW(sn, "SN");
3296 3296 FIND_VPD_KW(pn, "PN");
3297 3297 FIND_VPD_KW(na, "NA");
3298 3298 #undef FIND_VPD_KW
3299 3299
3300 3300 memcpy(p->id, v->id_data, ID_LEN);
3301 3301 str_strip((char *)p->id);
3302 3302 memcpy(p->ec, vpd + ec, EC_LEN);
3303 3303 str_strip((char *)p->ec);
3304 3304 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3305 3305 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3306 3306 str_strip((char *)p->sn);
3307 3307 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3308 3308 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3309 3309 str_strip((char *)p->pn);
3310 3310 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3311 3311 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3312 3312 str_strip((char *)p->na);
3313 3313
3314 3314 out:
3315 3315 kmem_free(vpd, sizeof(u8) * VPD_LEN);
3316 3316 return ret < 0 ? ret : 0;
3317 3317 }
3318 3318
3319 3319 /**
3320 3320 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3321 3321 * @adapter: adapter to read
3322 3322 * @p: where to store the parameters
3323 3323 *
3324 3324 * Reads card parameters stored in VPD EEPROM and retrieves the Core
3325 3325 * Clock. This can only be called after a connection to the firmware
3326 3326 * is established.
3327 3327 */
3328 3328 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3329 3329 {
3330 3330 u32 cclk_param, cclk_val;
3331 3331 int ret;
3332 3332
3333 3333 /*
3334 3334 * Grab the raw VPD parameters.
3335 3335 */
3336 3336 ret = t4_get_raw_vpd_params(adapter, p);
3337 3337 if (ret)
3338 3338 return ret;
3339 3339
3340 3340 /*
3341 3341 * Ask firmware for the Core Clock since it knows how to translate the
3342 3342 * Reference Clock ('V2') VPD field into a Core Clock value ...
3343 3343 */
3344 3344 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3345 3345 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3346 3346 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3347 3347 1, &cclk_param, &cclk_val);
3348 3348
3349 3349 if (ret)
3350 3350 return ret;
3351 3351 p->cclk = cclk_val;
3352 3352
3353 3353 return 0;
3354 3354 }
3355 3355
3356 3356 /* serial flash and firmware constants and flash config file constants */
3357 3357 enum {
3358 3358 SF_ATTEMPTS = 10, /* max retries for SF operations */
3359 3359
3360 3360 /* flash command opcodes */
3361 3361 SF_PROG_PAGE = 2, /* program page */
3362 3362 SF_WR_DISABLE = 4, /* disable writes */
3363 3363 SF_RD_STATUS = 5, /* read status register */
3364 3364 SF_WR_ENABLE = 6, /* enable writes */
3365 3365 SF_RD_DATA_FAST = 0xb, /* read flash */
3366 3366 SF_RD_ID = 0x9f, /* read ID */
3367 3367 SF_ERASE_SECTOR = 0xd8, /* erase sector */
3368 3368 };
3369 3369
3370 3370 /**
3371 3371 * sf1_read - read data from the serial flash
3372 3372 * @adapter: the adapter
3373 3373 * @byte_cnt: number of bytes to read
3374 3374 * @cont: whether another operation will be chained
3375 3375 * @lock: whether to lock SF for PL access only
3376 3376 * @valp: where to store the read data
3377 3377 *
3378 3378 * Reads up to 4 bytes of data from the serial flash. The location of
3379 3379 * the read needs to be specified prior to calling this by issuing the
3380 3380 * appropriate commands to the serial flash.
3381 3381 */
3382 3382 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3383 3383 int lock, u32 *valp)
3384 3384 {
3385 3385 int ret;
3386 3386
3387 3387 if (!byte_cnt || byte_cnt > 4)
3388 3388 return -EINVAL;
3389 3389 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3390 3390 return -EBUSY;
3391 3391 t4_write_reg(adapter, A_SF_OP,
3392 3392 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3393 3393 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3394 3394 if (!ret)
3395 3395 *valp = t4_read_reg(adapter, A_SF_DATA);
3396 3396 return ret;
3397 3397 }
3398 3398
3399 3399 /**
3400 3400 * sf1_write - write data to the serial flash
3401 3401 * @adapter: the adapter
3402 3402 * @byte_cnt: number of bytes to write
3403 3403 * @cont: whether another operation will be chained
3404 3404 * @lock: whether to lock SF for PL access only
3405 3405 * @val: value to write
3406 3406 *
3407 3407 * Writes up to 4 bytes of data to the serial flash. The location of
3408 3408 * the write needs to be specified prior to calling this by issuing the
3409 3409 * appropriate commands to the serial flash.
3410 3410 */
3411 3411 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3412 3412 int lock, u32 val)
3413 3413 {
3414 3414 if (!byte_cnt || byte_cnt > 4)
3415 3415 return -EINVAL;
3416 3416 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3417 3417 return -EBUSY;
3418 3418 t4_write_reg(adapter, A_SF_DATA, val);
3419 3419 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3420 3420 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3421 3421 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3422 3422 }
3423 3423
3424 3424 /**
3425 3425 * flash_wait_op - wait for a flash operation to complete
3426 3426 * @adapter: the adapter
3427 3427 * @attempts: max number of polls of the status register
3428 3428 * @delay: delay between polls in ms
3429 3429 *
3430 3430 * Wait for a flash operation to complete by polling the status register.
3431 3431 */
3432 3432 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3433 3433 {
3434 3434 int ret;
3435 3435 u32 status;
3436 3436
3437 3437 while (1) {
3438 3438 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3439 3439 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3440 3440 return ret;
3441 3441 if (!(status & 1))
3442 3442 return 0;
3443 3443 if (--attempts == 0)
3444 3444 return -EAGAIN;
3445 3445 if (ch_delay) {
3446 3446 #ifdef CONFIG_CUDBG
3447 3447 if (adapter->flags & K_CRASH)
3448 3448 mdelay(ch_delay);
3449 3449 else
3450 3450 #endif
3451 3451 msleep(ch_delay);
3452 3452 }
3453 3453 }
3454 3454 }
3455 3455
3456 3456 /**
3457 3457 * t4_read_flash - read words from serial flash
3458 3458 * @adapter: the adapter
3459 3459 * @addr: the start address for the read
3460 3460 * @nwords: how many 32-bit words to read
3461 3461 * @data: where to store the read data
3462 3462 * @byte_oriented: whether to store data as bytes or as words
3463 3463 *
3464 3464 * Read the specified number of 32-bit words from the serial flash.
3465 3465 * If @byte_oriented is set the read data is stored as a byte array
3466 3466 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3467 3467 * natural endianness.
3468 3468 */
3469 3469 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3470 3470 unsigned int nwords, u32 *data, int byte_oriented)
3471 3471 {
3472 3472 int ret;
3473 3473
3474 3474 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3475 3475 return -EINVAL;
3476 3476
3477 3477 addr = swab32(addr) | SF_RD_DATA_FAST;
3478 3478
3479 3479 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3480 3480 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3481 3481 return ret;
3482 3482
3483 3483 for ( ; nwords; nwords--, data++) {
3484 3484 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3485 3485 if (nwords == 1)
3486 3486 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3487 3487 if (ret)
3488 3488 return ret;
3489 3489 if (byte_oriented)
3490 3490 *data = (__force __u32)(cpu_to_be32(*data));
3491 3491 }
3492 3492 return 0;
3493 3493 }
3494 3494
3495 3495 /**
3496 3496 * t4_write_flash - write up to a page of data to the serial flash
3497 3497 * @adapter: the adapter
3498 3498 * @addr: the start address to write
3499 3499 * @n: length of data to write in bytes
3500 3500 * @data: the data to write
3501 3501 * @byte_oriented: whether to store data as bytes or as words
3502 3502 *
3503 3503 * Writes up to a page of data (256 bytes) to the serial flash starting
3504 3504 * at the given address. All the data must be written to the same page.
3505 3505 * If @byte_oriented is set the write data is stored as byte stream
3506 3506 * (i.e. matches what on disk), otherwise in big-endian.
3507 3507 */
3508 3508 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3509 3509 unsigned int n, const u8 *data, int byte_oriented)
3510 3510 {
3511 3511 int ret;
3512 3512 u32 buf[64];
3513 3513 unsigned int i, c, left, val, offset = addr & 0xff;
3514 3514
3515 3515 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3516 3516 return -EINVAL;
3517 3517
3518 3518 val = swab32(addr) | SF_PROG_PAGE;
3519 3519
3520 3520 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3521 3521 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3522 3522 goto unlock;
3523 3523
3524 3524 for (left = n; left; left -= c) {
3525 3525 c = min(left, 4U);
3526 3526 for (val = 0, i = 0; i < c; ++i)
3527 3527 val = (val << 8) + *data++;
3528 3528
3529 3529 if (!byte_oriented)
3530 3530 val = cpu_to_be32(val);
3531 3531
3532 3532 ret = sf1_write(adapter, c, c != left, 1, val);
3533 3533 if (ret)
3534 3534 goto unlock;
3535 3535 }
3536 3536 ret = flash_wait_op(adapter, 8, 1);
3537 3537 if (ret)
3538 3538 goto unlock;
3539 3539
3540 3540 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3541 3541
3542 3542 /* Read the page to verify the write succeeded */
3543 3543 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3544 3544 byte_oriented);
3545 3545 if (ret)
3546 3546 return ret;
3547 3547
3548 3548 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3549 3549 CH_ERR(adapter,
3550 3550 "failed to correctly write the flash page at %#x\n",
3551 3551 addr);
3552 3552 return -EIO;
3553 3553 }
3554 3554 return 0;
3555 3555
3556 3556 unlock:
3557 3557 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
3558 3558 return ret;
3559 3559 }
3560 3560
3561 3561 /**
3562 3562 * t4_get_fw_version - read the firmware version
3563 3563 * @adapter: the adapter
3564 3564 * @vers: where to place the version
3565 3565 *
3566 3566 * Reads the FW version from flash.
3567 3567 */
3568 3568 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3569 3569 {
3570 3570 return t4_read_flash(adapter, FLASH_FW_START +
3571 3571 offsetof(struct fw_hdr, fw_ver), 1,
3572 3572 vers, 0);
3573 3573 }
3574 3574
3575 3575 /**
3576 3576 * t4_get_bs_version - read the firmware bootstrap version
3577 3577 * @adapter: the adapter
3578 3578 * @vers: where to place the version
3579 3579 *
3580 3580 * Reads the FW Bootstrap version from flash.
3581 3581 */
3582 3582 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3583 3583 {
3584 3584 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3585 3585 offsetof(struct fw_hdr, fw_ver), 1,
3586 3586 vers, 0);
3587 3587 }
3588 3588
3589 3589 /**
3590 3590 * t4_get_tp_version - read the TP microcode version
3591 3591 * @adapter: the adapter
3592 3592 * @vers: where to place the version
3593 3593 *
3594 3594 * Reads the TP microcode version from flash.
3595 3595 */
3596 3596 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3597 3597 {
3598 3598 return t4_read_flash(adapter, FLASH_FW_START +
3599 3599 offsetof(struct fw_hdr, tp_microcode_ver),
3600 3600 1, vers, 0);
3601 3601 }
3602 3602
3603 3603 /**
3604 3604 * t4_get_exprom_version - return the Expansion ROM version (if any)
3605 3605 * @adapter: the adapter
3606 3606 * @vers: where to place the version
3607 3607 *
3608 3608 * Reads the Expansion ROM header from FLASH and returns the version
3609 3609 * number (if present) through the @vers return value pointer. We return
3610 3610 * this in the Firmware Version Format since it's convenient. Return
3611 3611 * 0 on success, -ENOENT if no Expansion ROM is present.
3612 3612 */
3613 3613 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3614 3614 {
3615 3615 struct exprom_header {
3616 3616 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3617 3617 unsigned char hdr_ver[4]; /* Expansion ROM version */
3618 3618 } *hdr;
3619 3619 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3620 3620 sizeof(u32))];
3621 3621 int ret;
3622 3622
3623 3623 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3624 3624 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3625 3625 0);
3626 3626 if (ret)
3627 3627 return ret;
3628 3628
3629 3629 hdr = (struct exprom_header *)exprom_header_buf;
3630 3630 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3631 3631 return -ENOENT;
3632 3632
3633 3633 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3634 3634 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3635 3635 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3636 3636 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3637 3637 return 0;
3638 3638 }
3639 3639
3640 3640 /**
3641 3641 * t4_get_scfg_version - return the Serial Configuration version
3642 3642 * @adapter: the adapter
3643 3643 * @vers: where to place the version
3644 3644 *
3645 3645 * Reads the Serial Configuration Version via the Firmware interface
3646 3646 * (thus this can only be called once we're ready to issue Firmware
3647 3647 * commands). The format of the Serial Configuration version is
3648 3648 * adapter specific. Returns 0 on success, an error on failure.
3649 3649 *
3650 3650 * Note that early versions of the Firmware didn't include the ability
3651 3651 * to retrieve the Serial Configuration version, so we zero-out the
3652 3652 * return-value parameter in that case to avoid leaving it with
3653 3653 * garbage in it.
3654 3654 *
3655 3655 * Also note that the Firmware will return its cached copy of the Serial
3656 3656 * Initialization Revision ID, not the actual Revision ID as written in
3657 3657 * the Serial EEPROM. This is only an issue if a new VPD has been written
3658 3658 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3659 3659 * it's best to defer calling this routine till after a FW_RESET_CMD has
3660 3660 * been issued if the Host Driver will be performing a full adapter
3661 3661 * initialization.
3662 3662 */
3663 3663 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3664 3664 {
3665 3665 u32 scfgrev_param;
3666 3666 int ret;
3667 3667
3668 3668 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3669 3669 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3670 3670 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3671 3671 1, &scfgrev_param, vers);
3672 3672 if (ret)
3673 3673 *vers = 0;
3674 3674 return ret;
3675 3675 }
3676 3676
3677 3677 /**
3678 3678 * t4_get_vpd_version - return the VPD version
3679 3679 * @adapter: the adapter
3680 3680 * @vers: where to place the version
3681 3681 *
3682 3682 * Reads the VPD via the Firmware interface (thus this can only be called
3683 3683 * once we're ready to issue Firmware commands). The format of the
3684 3684 * VPD version is adapter specific. Returns 0 on success, an error on
3685 3685 * failure.
3686 3686 *
3687 3687 * Note that early versions of the Firmware didn't include the ability
3688 3688 * to retrieve the VPD version, so we zero-out the return-value parameter
3689 3689 * in that case to avoid leaving it with garbage in it.
3690 3690 *
3691 3691 * Also note that the Firmware will return its cached copy of the VPD
3692 3692 * Revision ID, not the actual Revision ID as written in the Serial
3693 3693 * EEPROM. This is only an issue if a new VPD has been written and the
3694 3694 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3695 3695 * to defer calling this routine till after a FW_RESET_CMD has been issued
3696 3696 * if the Host Driver will be performing a full adapter initialization.
3697 3697 */
3698 3698 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3699 3699 {
3700 3700 u32 vpdrev_param;
3701 3701 int ret;
3702 3702
3703 3703 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3704 3704 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3705 3705 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3706 3706 1, &vpdrev_param, vers);
3707 3707 if (ret)
3708 3708 *vers = 0;
3709 3709 return ret;
3710 3710 }
3711 3711
3712 3712 /**
3713 3713 * t4_get_version_info - extract various chip/firmware version information
3714 3714 * @adapter: the adapter
3715 3715 *
3716 3716 * Reads various chip/firmware version numbers and stores them into the
3717 3717 * adapter Adapter Parameters structure. If any of the efforts fails
3718 3718 * the first failure will be returned, but all of the version numbers
3719 3719 * will be read.
3720 3720 */
3721 3721 int t4_get_version_info(struct adapter *adapter)
3722 3722 {
3723 3723 int ret = 0;
3724 3724
3725 3725 #define FIRST_RET(__getvinfo) \
3726 3726 do { \
3727 3727 int __ret = __getvinfo; \
3728 3728 if (__ret && !ret) \
3729 3729 ret = __ret; \
3730 3730 } while (0)
3731 3731
3732 3732 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3733 3733 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3734 3734 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3735 3735 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3736 3736 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3737 3737 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3738 3738
3739 3739 #undef FIRST_RET
3740 3740
3741 3741 return ret;
3742 3742 }
3743 3743
3744 3744 /**
3745 3745 * t4_dump_version_info - dump all of the adapter configuration IDs
3746 3746 * @adapter: the adapter
3747 3747 *
3748 3748 * Dumps all of the various bits of adapter configuration version/revision
3749 3749 * IDs information. This is typically called at some point after
3750 3750 * t4_get_version_info() has been called.
3751 3751 */
3752 3752 void t4_dump_version_info(struct adapter *adapter)
3753 3753 {
3754 3754 /*
3755 3755 * Device information.
3756 3756 */
3757 3757 CH_INFO(adapter, "Chelsio %s rev %d\n",
3758 3758 adapter->params.vpd.id,
3759 3759 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3760 3760 CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3761 3761 adapter->params.vpd.sn,
3762 3762 adapter->params.vpd.pn);
3763 3763
3764 3764 /*
3765 3765 * Firmware Version.
3766 3766 */
3767 3767 if (!adapter->params.fw_vers)
3768 3768 CH_WARN(adapter, "No firmware loaded\n");
3769 3769 else
3770 3770 CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3771 3771 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3772 3772 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3773 3773 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3774 3774 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3775 3775
3776 3776 /*
3777 3777 * Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3778 3778 * Firmware, so dev_info() is more appropriate here.)
3779 3779 */
3780 3780 if (!adapter->params.bs_vers)
3781 3781 CH_INFO(adapter, "No bootstrap loaded\n");
3782 3782 else
3783 3783 CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3784 3784 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3785 3785 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3786 3786 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3787 3787 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3788 3788
3789 3789 /*
3790 3790 * TP Microcode Version.
3791 3791 */
3792 3792 if (!adapter->params.tp_vers)
3793 3793 CH_WARN(adapter, "No TP Microcode loaded\n");
3794 3794 else
3795 3795 CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3796 3796 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3797 3797 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3798 3798 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3799 3799 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3800 3800
3801 3801 /*
3802 3802 * Expansion ROM version.
3803 3803 */
3804 3804 if (!adapter->params.er_vers)
3805 3805 CH_INFO(adapter, "No Expansion ROM loaded\n");
3806 3806 else
3807 3807 CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3808 3808 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3809 3809 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3810 3810 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3811 3811 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3812 3812
3813 3813
3814 3814 /*
3815 3815 * Serial Configuration version.
3816 3816 */
3817 3817 CH_INFO(adapter, "Serial Configuration version: %x\n",
3818 3818 adapter->params.scfg_vers);
3819 3819
3820 3820 /*
3821 3821 * VPD version.
3822 3822 */
3823 3823 CH_INFO(adapter, "VPD version: %x\n",
3824 3824 adapter->params.vpd_vers);
3825 3825 }
3826 3826
3827 3827 /**
3828 3828 * t4_check_fw_version - check if the FW is supported with this driver
3829 3829 * @adap: the adapter
3830 3830 *
3831 3831 * Checks if an adapter's FW is compatible with the driver. Returns 0
3832 3832 * if there's exact match, a negative error if the version could not be
3833 3833 * read or there's a major version mismatch
3834 3834 */
3835 3835 int t4_check_fw_version(struct adapter *adap)
3836 3836 {
3837 3837 int ret, major, minor, micro;
3838 3838 int exp_major, exp_minor, exp_micro;
3839 3839 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3840 3840
3841 3841 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3842 3842 if (ret)
3843 3843 return ret;
3844 3844
3845 3845 major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3846 3846 minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3847 3847 micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3848 3848
3849 3849 switch (chip_version) {
3850 3850 case CHELSIO_T4:
3851 3851 exp_major = T4FW_MIN_VERSION_MAJOR;
3852 3852 exp_minor = T4FW_MIN_VERSION_MINOR;
3853 3853 exp_micro = T4FW_MIN_VERSION_MICRO;
3854 3854 break;
3855 3855 case CHELSIO_T5:
3856 3856 exp_major = T5FW_MIN_VERSION_MAJOR;
3857 3857 exp_minor = T5FW_MIN_VERSION_MINOR;
3858 3858 exp_micro = T5FW_MIN_VERSION_MICRO;
3859 3859 break;
3860 3860 case CHELSIO_T6:
3861 3861 exp_major = T6FW_MIN_VERSION_MAJOR;
3862 3862 exp_minor = T6FW_MIN_VERSION_MINOR;
3863 3863 exp_micro = T6FW_MIN_VERSION_MICRO;
3864 3864 break;
3865 3865 default:
3866 3866 CH_ERR(adap, "Unsupported chip type, %x\n",
3867 3867 adap->params.chip);
3868 3868 return -EINVAL;
3869 3869 }
3870 3870
3871 3871 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3872 3872 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3873 3873 CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
3874 3874 "supported firmware is %u.%u.%u.\n", major, minor,
3875 3875 micro, exp_major, exp_minor, exp_micro);
3876 3876 return -EFAULT;
3877 3877 }
3878 3878 return 0;
3879 3879 }
3880 3880
3881 3881 /* Is the given firmware API compatible with the one the driver was compiled
3882 3882 * with?
3883 3883 */
3884 3884 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3885 3885 {
3886 3886
3887 3887 /* short circuit if it's the exact same firmware version */
3888 3888 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3889 3889 return 1;
3890 3890
3891 3891 /*
3892 3892 * XXX: Is this too conservative? Perhaps I should limit this to the
3893 3893 * features that are supported in the driver.
3894 3894 */
3895 3895 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3896 3896 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3897 3897 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3898 3898 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3899 3899 return 1;
3900 3900 #undef SAME_INTF
3901 3901
3902 3902 return 0;
3903 3903 }
3904 3904
3905 3905 /* The firmware in the filesystem is usable, but should it be installed?
3906 3906 * This routine explains itself in detail if it indicates the filesystem
3907 3907 * firmware should be installed.
3908 3908 */
3909 3909 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3910 3910 int k, int c, int t4_fw_install)
3911 3911 {
3912 3912 const char *reason;
3913 3913
3914 3914 if (!card_fw_usable) {
3915 3915 reason = "incompatible or unusable";
3916 3916 goto install;
3917 3917 }
3918 3918
3919 3919 if (k > c) {
3920 3920 reason = "older than the version bundled with this driver";
3921 3921 goto install;
3922 3922 }
3923 3923
3924 3924 if (t4_fw_install == 2 && k != c) {
3925 3925 reason = "different than the version bundled with this driver";
3926 3926 goto install;
3927 3927 }
3928 3928
3929 3929 return 0;
3930 3930
3931 3931 install:
3932 3932 if (t4_fw_install == 0) {
3933 3933 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3934 3934 "but the driver is prohibited from installing a "
3935 3935 "different firmware on the card.\n",
3936 3936 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3937 3937 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3938 3938 reason);
3939 3939
3940 3940 return (0);
3941 3941 }
3942 3942
3943 3943 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3944 3944 "installing firmware %u.%u.%u.%u on card.\n",
3945 3945 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3946 3946 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3947 3947 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3948 3948 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3949 3949
3950 3950 return 1;
3951 3951 }
3952 3952
3953 3953 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3954 3954 const u8 *fw_data, unsigned int fw_size,
3955 3955 struct fw_hdr *card_fw, const int t4_fw_install,
3956 3956 enum dev_state state, int *reset)
3957 3957 {
3958 3958 int ret, card_fw_usable, fs_fw_usable;
3959 3959 const struct fw_hdr *fs_fw;
3960 3960 const struct fw_hdr *drv_fw;
3961 3961
3962 3962 drv_fw = &fw_info->fw_hdr;
3963 3963
3964 3964 /* Read the header of the firmware on the card */
3965 3965 ret = -t4_read_flash(adap, FLASH_FW_START,
3966 3966 sizeof(*card_fw) / sizeof(uint32_t),
3967 3967 (uint32_t *)card_fw, 1);
3968 3968 if (ret == 0) {
3969 3969 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3970 3970 } else {
3971 3971 CH_ERR(adap,
3972 3972 "Unable to read card's firmware header: %d\n", ret);
3973 3973 card_fw_usable = 0;
3974 3974 }
3975 3975
3976 3976 if (fw_data != NULL) {
3977 3977 fs_fw = (const void *)fw_data;
3978 3978 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3979 3979 } else {
3980 3980 fs_fw = NULL;
3981 3981 fs_fw_usable = 0;
3982 3982 }
3983 3983
3984 3984 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3985 3985 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3986 3986 /* Common case: the firmware on the card is an exact match and
3987 3987 * the filesystem one is an exact match too, or the filesystem
3988 3988 * one is absent/incompatible. Note that t4_fw_install = 2
3989 3989 * is ignored here -- use cxgbtool loadfw if you want to
3990 3990 * reinstall the same firmware as the one on the card.
3991 3991 */
3992 3992 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3993 3993 should_install_fs_fw(adap, card_fw_usable,
3994 3994 be32_to_cpu(fs_fw->fw_ver),
3995 3995 be32_to_cpu(card_fw->fw_ver),
3996 3996 t4_fw_install)) {
3997 3997
3998 3998 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3999 3999 fw_size, 0);
4000 4000 if (ret != 0) {
4001 4001 CH_ERR(adap,
4002 4002 "failed to install firmware: %d\n", ret);
4003 4003 goto bye;
4004 4004 }
4005 4005
4006 4006 /* Installed successfully, update cached information */
4007 4007 memcpy(card_fw, fs_fw, sizeof(*card_fw));
4008 4008 (void)t4_init_devlog_params(adap, 1);
4009 4009 card_fw_usable = 1;
4010 4010 *reset = 0; /* already reset as part of load_fw */
4011 4011 }
4012 4012
4013 4013 if (!card_fw_usable) {
4014 4014 uint32_t d, c, k;
4015 4015
4016 4016 d = be32_to_cpu(drv_fw->fw_ver);
4017 4017 c = be32_to_cpu(card_fw->fw_ver);
4018 4018 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4019 4019
4020 4020 CH_ERR(adap, "Cannot find a usable firmware: "
4021 4021 "fw_install %d, chip state %d, "
4022 4022 "driver compiled with %d.%d.%d.%d, "
4023 4023 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4024 4024 t4_fw_install, state,
4025 4025 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4026 4026 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4027 4027 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4028 4028 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4029 4029 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4030 4030 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4031 4031 ret = EINVAL;
4032 4032 goto bye;
4033 4033 }
4034 4034
4035 4035 /* We're using whatever's on the card and it's known to be good. */
4036 4036 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4037 4037 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4038 4038
4039 4039 bye:
4040 4040 return ret;
4041 4041
4042 4042 }
4043 4043
4044 4044 /**
4045 4045 * t4_flash_erase_sectors - erase a range of flash sectors
4046 4046 * @adapter: the adapter
4047 4047 * @start: the first sector to erase
4048 4048 * @end: the last sector to erase
4049 4049 *
4050 4050 * Erases the sectors in the given inclusive range.
4051 4051 */
4052 4052 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4053 4053 {
4054 4054 int ret = 0;
4055 4055
4056 4056 if (end >= adapter->params.sf_nsec)
4057 4057 return -EINVAL;
4058 4058
4059 4059 while (start <= end) {
4060 4060 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4061 4061 (ret = sf1_write(adapter, 4, 0, 1,
4062 4062 SF_ERASE_SECTOR | (start << 8))) != 0 ||
4063 4063 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4064 4064 CH_ERR(adapter,
4065 4065 "erase of flash sector %d failed, error %d\n",
4066 4066 start, ret);
4067 4067 break;
4068 4068 }
4069 4069 start++;
4070 4070 }
4071 4071 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4072 4072 return ret;
4073 4073 }
4074 4074
4075 4075 /**
4076 4076 * t4_flash_cfg_addr - return the address of the flash configuration file
4077 4077 * @adapter: the adapter
4078 4078 *
4079 4079 * Return the address within the flash where the Firmware Configuration
4080 4080 * File is stored, or an error if the device FLASH is too small to contain
4081 4081 * a Firmware Configuration File.
4082 4082 */
4083 4083 int t4_flash_cfg_addr(struct adapter *adapter)
4084 4084 {
4085 4085 /*
4086 4086 * If the device FLASH isn't large enough to hold a Firmware
4087 4087 * Configuration File, return an error.
4088 4088 */
4089 4089 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4090 4090 return -ENOSPC;
4091 4091
4092 4092 return FLASH_CFG_START;
4093 4093 }
4094 4094
4095 4095 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
4096 4096 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
4097 4097 * and emit an error message for mismatched firmware to save our caller the
4098 4098 * effort ...
4099 4099 */
4100 4100 static int t4_fw_matches_chip(const struct adapter *adap,
4101 4101 const struct fw_hdr *hdr)
4102 4102 {
4103 4103 /*
4104 4104 * The expression below will return FALSE for any unsupported adapter
4105 4105 * which will keep us "honest" in the future ...
4106 4106 */
4107 4107 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4108 4108 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4109 4109 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4110 4110 return 1;
4111 4111
4112 4112 CH_ERR(adap,
4113 4113 "FW image (%d) is not suitable for this adapter (%d)\n",
4114 4114 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4115 4115 return 0;
4116 4116 }
4117 4117
4118 4118 /**
4119 4119 * t4_load_fw - download firmware
4120 4120 * @adap: the adapter
4121 4121 * @fw_data: the firmware image to write
4122 4122 * @size: image size
4123 4123 * @bootstrap: indicates if the binary is a bootstrap fw
4124 4124 *
4125 4125 * Write the supplied firmware image to the card's serial flash.
4126 4126 */
4127 4127 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4128 4128 unsigned int bootstrap)
4129 4129 {
4130 4130 u32 csum;
4131 4131 int ret, addr;
4132 4132 unsigned int i;
4133 4133 u8 first_page[SF_PAGE_SIZE];
4134 4134 const __be32 *p = (const __be32 *)fw_data;
4135 4135 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4136 4136 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4137 4137 unsigned int fw_start_sec;
4138 4138 unsigned int fw_start;
4139 4139 unsigned int fw_size;
4140 4140
4141 4141 if (bootstrap) {
4142 4142 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4143 4143 fw_start = FLASH_FWBOOTSTRAP_START;
4144 4144 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4145 4145 } else {
4146 4146 fw_start_sec = FLASH_FW_START_SEC;
4147 4147 fw_start = FLASH_FW_START;
4148 4148 fw_size = FLASH_FW_MAX_SIZE;
4149 4149 }
4150 4150
4151 4151 if (!size) {
4152 4152 CH_ERR(adap, "FW image has no data\n");
4153 4153 return -EINVAL;
4154 4154 }
4155 4155 if (size & 511) {
4156 4156 CH_ERR(adap,
4157 4157 "FW image size not multiple of 512 bytes\n");
4158 4158 return -EINVAL;
4159 4159 }
4160 4160 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4161 4161 CH_ERR(adap,
4162 4162 "FW image size differs from size in FW header\n");
4163 4163 return -EINVAL;
4164 4164 }
4165 4165 if (size > fw_size) {
4166 4166 CH_ERR(adap, "FW image too large, max is %u bytes\n",
4167 4167 fw_size);
4168 4168 return -EFBIG;
4169 4169 }
4170 4170 if (!t4_fw_matches_chip(adap, hdr))
4171 4171 return -EINVAL;
4172 4172
4173 4173 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4174 4174 csum += be32_to_cpu(p[i]);
4175 4175
4176 4176 if (csum != 0xffffffff) {
4177 4177 CH_ERR(adap,
4178 4178 "corrupted firmware image, checksum %#x\n", csum);
4179 4179 return -EINVAL;
4180 4180 }
4181 4181
4182 4182 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
4183 4183 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4184 4184 if (ret)
4185 4185 goto out;
4186 4186
4187 4187 /*
4188 4188 * We write the correct version at the end so the driver can see a bad
4189 4189 * version if the FW write fails. Start by writing a copy of the
4190 4190 * first page with a bad version.
4191 4191 */
4192 4192 memcpy(first_page, fw_data, SF_PAGE_SIZE);
4193 4193 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4194 4194 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4195 4195 if (ret)
4196 4196 goto out;
4197 4197
4198 4198 addr = fw_start;
4199 4199 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4200 4200 addr += SF_PAGE_SIZE;
4201 4201 fw_data += SF_PAGE_SIZE;
4202 4202 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4203 4203 if (ret)
4204 4204 goto out;
4205 4205 }
4206 4206
4207 4207 ret = t4_write_flash(adap,
4208 4208 fw_start + offsetof(struct fw_hdr, fw_ver),
4209 4209 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4210 4210 out:
4211 4211 if (ret)
4212 4212 CH_ERR(adap, "firmware download failed, error %d\n",
4213 4213 ret);
4214 4214 else {
4215 4215 if (bootstrap)
4216 4216 ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4217 4217 else
4218 4218 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4219 4219 }
4220 4220 return ret;
4221 4221 }
4222 4222
4223 4223 /**
4224 4224 * t4_phy_fw_ver - return current PHY firmware version
4225 4225 * @adap: the adapter
4226 4226 * @phy_fw_ver: return value buffer for PHY firmware version
4227 4227 *
4228 4228 * Returns the current version of external PHY firmware on the
4229 4229 * adapter.
4230 4230 */
4231 4231 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4232 4232 {
4233 4233 u32 param, val;
4234 4234 int ret;
4235 4235
4236 4236 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4237 4237 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4238 4238 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4239 4239 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4240 4240 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4241 4241 ¶m, &val);
4242 4242 if (ret < 0)
4243 4243 return ret;
4244 4244 *phy_fw_ver = val;
4245 4245 return 0;
4246 4246 }
4247 4247
4248 4248 /**
4249 4249 * t4_load_phy_fw - download port PHY firmware
4250 4250 * @adap: the adapter
4251 4251 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
4252 4252 * @lock: the lock to use to guard the memory copy
4253 4253 * @phy_fw_version: function to check PHY firmware versions
4254 4254 * @phy_fw_data: the PHY firmware image to write
4255 4255 * @phy_fw_size: image size
4256 4256 *
4257 4257 * Transfer the specified PHY firmware to the adapter. If a non-NULL
4258 4258 * @phy_fw_version is supplied, then it will be used to determine if
4259 4259 * it's necessary to perform the transfer by comparing the version
4260 4260 * of any existing adapter PHY firmware with that of the passed in
4261 4261 * PHY firmware image. If @lock is non-NULL then it will be used
4262 4262 * around the call to t4_memory_rw() which transfers the PHY firmware
4263 4263 * to the adapter.
4264 4264 *
4265 4265 * A negative error number will be returned if an error occurs. If
4266 4266 * version number support is available and there's no need to upgrade
4267 4267 * the firmware, 0 will be returned. If firmware is successfully
4268 4268 * transferred to the adapter, 1 will be retured.
4269 4269 *
4270 4270 * NOTE: some adapters only have local RAM to store the PHY firmware. As
4271 4271 * a result, a RESET of the adapter would cause that RAM to lose its
4272 4272 * contents. Thus, loading PHY firmware on such adapters must happen after any
4273 4273 * FW_RESET_CMDs ...
4274 4274 */
4275 4275 int t4_load_phy_fw(struct adapter *adap,
4276 4276 int win, t4_os_lock_t *lock,
4277 4277 int (*phy_fw_version)(const u8 *, size_t),
4278 4278 const u8 *phy_fw_data, size_t phy_fw_size)
4279 4279 {
4280 4280 unsigned long mtype = 0, maddr = 0;
4281 4281 u32 param, val;
4282 4282 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4283 4283 int ret;
4284 4284
4285 4285 /*
4286 4286 * If we have version number support, then check to see if the adapter
4287 4287 * already has up-to-date PHY firmware loaded.
4288 4288 */
4289 4289 if (phy_fw_version) {
4290 4290 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4291 4291 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4292 4292 if (ret < 0)
4293 4293 return ret;;
4294 4294
4295 4295 if (cur_phy_fw_ver >= new_phy_fw_vers) {
4296 4296 CH_WARN(adap, "PHY Firmware already up-to-date, "
4297 4297 "version %#x\n", cur_phy_fw_ver);
4298 4298 return 0;
4299 4299 }
4300 4300 }
4301 4301
4302 4302 /*
4303 4303 * Ask the firmware where it wants us to copy the PHY firmware image.
4304 4304 * The size of the file requires a special version of the READ coommand
4305 4305 * which will pass the file size via the values field in PARAMS_CMD and
4306 4306 * retreive the return value from firmware and place it in the same
4307 4307 * buffer values
4308 4308 */
4309 4309 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4310 4310 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4311 4311 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4312 4312 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4313 4313 val = phy_fw_size;
4314 4314 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4315 4315 ¶m, &val, 1, true);
4316 4316 if (ret < 0)
4317 4317 return ret;
4318 4318 mtype = val >> 8;
4319 4319 maddr = (val & 0xff) << 16;
4320 4320
4321 4321 /*
4322 4322 * Copy the supplied PHY Firmware image to the adapter memory location
4323 4323 * allocated by the adapter firmware.
4324 4324 */
4325 4325 if (lock)
4326 4326 t4_os_lock(lock);
4327 4327 ret = t4_memory_rw(adap, win, mtype, maddr,
4328 4328 phy_fw_size, (__be32*)phy_fw_data,
4329 4329 T4_MEMORY_WRITE);
4330 4330 if (lock)
4331 4331 t4_os_unlock(lock);
4332 4332 if (ret)
4333 4333 return ret;
4334 4334
4335 4335 /*
4336 4336 * Tell the firmware that the PHY firmware image has been written to
4337 4337 * RAM and it can now start copying it over to the PHYs. The chip
4338 4338 * firmware will RESET the affected PHYs as part of this operation
4339 4339 * leaving them running the new PHY firmware image.
4340 4340 */
4341 4341 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4342 4342 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4343 4343 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4344 4344 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4345 4345 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4346 4346 ¶m, &val, 30000);
4347 4347
4348 4348 /*
4349 4349 * If we have version number support, then check to see that the new
4350 4350 * firmware got loaded properly.
4351 4351 */
4352 4352 if (phy_fw_version) {
4353 4353 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4354 4354 if (ret < 0)
4355 4355 return ret;
4356 4356
4357 4357 if (cur_phy_fw_ver != new_phy_fw_vers) {
4358 4358 CH_WARN(adap, "PHY Firmware did not update: "
4359 4359 "version on adapter %#x, "
4360 4360 "version flashed %#x\n",
4361 4361 cur_phy_fw_ver, new_phy_fw_vers);
4362 4362 return -ENXIO;
4363 4363 }
4364 4364 }
4365 4365
4366 4366 return 1;
4367 4367 }
4368 4368
4369 4369 /**
4370 4370 * t4_fwcache - firmware cache operation
4371 4371 * @adap: the adapter
4372 4372 * @op : the operation (flush or flush and invalidate)
4373 4373 */
4374 4374 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4375 4375 {
4376 4376 struct fw_params_cmd c;
4377 4377
4378 4378 memset(&c, 0, sizeof(c));
4379 4379 c.op_to_vfn =
4380 4380 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4381 4381 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4382 4382 V_FW_PARAMS_CMD_PFN(adap->pf) |
4383 4383 V_FW_PARAMS_CMD_VFN(0));
4384 4384 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4385 4385 c.param[0].mnem =
4386 4386 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4387 4387 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4388 4388 c.param[0].val = (__force __be32)op;
4389 4389
4390 4390 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4391 4391 }
4392 4392
4393 4393 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4394 4394 unsigned int *pif_req_wrptr,
4395 4395 unsigned int *pif_rsp_wrptr)
4396 4396 {
4397 4397 int i, j;
4398 4398 u32 cfg, val, req, rsp;
4399 4399
4400 4400 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4401 4401 if (cfg & F_LADBGEN)
4402 4402 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4403 4403
4404 4404 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4405 4405 req = G_POLADBGWRPTR(val);
4406 4406 rsp = G_PILADBGWRPTR(val);
4407 4407 if (pif_req_wrptr)
4408 4408 *pif_req_wrptr = req;
4409 4409 if (pif_rsp_wrptr)
4410 4410 *pif_rsp_wrptr = rsp;
4411 4411
4412 4412 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4413 4413 for (j = 0; j < 6; j++) {
4414 4414 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4415 4415 V_PILADBGRDPTR(rsp));
4416 4416 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4417 4417 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4418 4418 req++;
4419 4419 rsp++;
4420 4420 }
4421 4421 req = (req + 2) & M_POLADBGRDPTR;
4422 4422 rsp = (rsp + 2) & M_PILADBGRDPTR;
4423 4423 }
4424 4424 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4425 4425 }
4426 4426
4427 4427 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4428 4428 {
4429 4429 u32 cfg;
4430 4430 int i, j, idx;
4431 4431
4432 4432 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4433 4433 if (cfg & F_LADBGEN)
4434 4434 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4435 4435
4436 4436 for (i = 0; i < CIM_MALA_SIZE; i++) {
4437 4437 for (j = 0; j < 5; j++) {
4438 4438 idx = 8 * i + j;
4439 4439 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4440 4440 V_PILADBGRDPTR(idx));
4441 4441 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4442 4442 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4443 4443 }
4444 4444 }
4445 4445 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4446 4446 }
4447 4447
4448 4448 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4449 4449 {
4450 4450 unsigned int i, j;
4451 4451
4452 4452 for (i = 0; i < 8; i++) {
4453 4453 u32 *p = la_buf + i;
4454 4454
4455 4455 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4456 4456 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4457 4457 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4458 4458 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4459 4459 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4460 4460 }
4461 4461 }
4462 4462
4463 4463 #define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
4464 4464 FW_PORT_CAP_ANEG)
4465 4465
4466 4466 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4467 4467 static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause)
4468 4468 {
4469 4469 unsigned int cc_pause = 0;
4470 4470
4471 4471 if (fw_pause & FW_PORT_CAP_FC_RX)
4472 4472 cc_pause |= PAUSE_RX;
4473 4473 if (fw_pause & FW_PORT_CAP_FC_TX)
4474 4474 cc_pause |= PAUSE_TX;
4475 4475
4476 4476 return cc_pause;
4477 4477 }
4478 4478
4479 4479 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4480 4480 static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause)
4481 4481 {
4482 4482 unsigned int fw_pause = 0;
4483 4483
4484 4484 if (cc_pause & PAUSE_RX)
4485 4485 fw_pause |= FW_PORT_CAP_FC_RX;
4486 4486 if (cc_pause & PAUSE_TX)
4487 4487 fw_pause |= FW_PORT_CAP_FC_TX;
4488 4488
4489 4489 return fw_pause;
4490 4490 }
4491 4491
4492 4492 /* Translate Firmware Forward Error Correction specification to Common Code */
4493 4493 static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec)
4494 4494 {
4495 4495 unsigned int cc_fec = 0;
4496 4496
4497 4497 if (fw_fec & FW_PORT_CAP_FEC_RS)
4498 4498 cc_fec |= FEC_RS;
4499 4499 if (fw_fec & FW_PORT_CAP_FEC_BASER_RS)
4500 4500 cc_fec |= FEC_BASER_RS;
4501 4501
4502 4502 return cc_fec;
4503 4503 }
4504 4504
4505 4505 /* Translate Common Code Forward Error Correction specification to Firmware */
4506 4506 static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec)
4507 4507 {
4508 4508 unsigned int fw_fec = 0;
4509 4509
4510 4510 if (cc_fec & FEC_RS)
4511 4511 fw_fec |= FW_PORT_CAP_FEC_RS;
4512 4512 if (cc_fec & FEC_BASER_RS)
4513 4513 fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
4514 4514
4515 4515 return fw_fec;
4516 4516 }
4517 4517
4518 4518 /**
4519 4519 * t4_link_l1cfg - apply link configuration to MAC/PHY
4520 4520 * @phy: the PHY to setup
4521 4521 * @mac: the MAC to setup
4522 4522 * @lc: the requested link configuration
4523 4523 *
4524 4524 * Set up a port's MAC and PHY according to a desired link configuration.
4525 4525 * - If the PHY can auto-negotiate first decide what to advertise, then
4526 4526 * enable/disable auto-negotiation as desired, and reset.
4527 4527 * - If the PHY does not auto-negotiate just reset it.
4528 4528 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4529 4529 * otherwise do it later based on the outcome of auto-negotiation.
4530 4530 */
4531 4531 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
4532 4532 struct link_config *lc)
4533 4533 {
4534 4534 struct fw_port_cmd c;
4535 4535 unsigned int fw_mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
4536 4536 unsigned int fw_fc, cc_fec, fw_fec;
4537 4537
4538 4538 lc->link_ok = 0;
4539 4539
4540 4540 /*
4541 4541 * Convert driver coding of Pause Frame Flow Control settings into the
4542 4542 * Firmware's API.
4543 4543 */
4544 4544 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4545 4545
4546 4546 /*
4547 4547 * Convert Common Code Forward Error Control settings into the
4548 4548 * Firmware's API. If the current Requested FEC has "Automatic"
4549 4549 * (IEEE 802.3) specified, then we use whatever the Firmware
4550 4550 * sent us as part of it's IEEE 802.3-based interpratation of
4551 4551 * the Transceiver Module EPROM FEC parameters. Otherwise we
4552 4552 * use whatever is in the current Requested FEC settings.
4553 4553 */
4554 4554 if (lc->requested_fec & FEC_AUTO)
4555 4555 cc_fec = lc->auto_fec;
4556 4556 else
4557 4557 cc_fec = lc->requested_fec;
4558 4558 fw_fec = cc_to_fwcap_fec(cc_fec);
4559 4559
4560 4560 memset(&c, 0, sizeof(c));
4561 4561 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4562 4562 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4563 4563 V_FW_PORT_CMD_PORTID(port));
4564 4564 c.action_to_len16 =
4565 4565 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4566 4566 FW_LEN16(c));
4567 4567
4568 4568 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
4569 4569 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
4570 4570 fw_fc | fw_fec);
4571 4571 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4572 4572 lc->fec = cc_fec;
4573 4573 } else if (lc->autoneg == AUTONEG_DISABLE) {
4574 4574 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed |
4575 4575 fw_fc | fw_fec | fw_mdi);
4576 4576 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4577 4577 lc->fec = cc_fec;
4578 4578 } else
4579 4579 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising |
4580 4580 fw_fc | fw_fec | fw_mdi);
4581 4581
4582 4582 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4583 4583 }
4584 4584
4585 4585 /**
4586 4586 * t4_restart_aneg - restart autonegotiation
4587 4587 * @adap: the adapter
4588 4588 * @mbox: mbox to use for the FW command
4589 4589 * @port: the port id
4590 4590 *
4591 4591 * Restarts autonegotiation for the selected port.
4592 4592 */
4593 4593 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4594 4594 {
4595 4595 struct fw_port_cmd c;
4596 4596
4597 4597 memset(&c, 0, sizeof(c));
4598 4598 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4599 4599 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4600 4600 V_FW_PORT_CMD_PORTID(port));
4601 4601 c.action_to_len16 =
4602 4602 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4603 4603 FW_LEN16(c));
4604 4604 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4605 4605 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4606 4606 }
4607 4607
4608 4608 typedef void (*int_handler_t)(struct adapter *adap);
4609 4609
4610 4610 struct intr_info {
4611 4611 unsigned int mask; /* bits to check in interrupt status */
4612 4612 const char *msg; /* message to print or NULL */
4613 4613 short stat_idx; /* stat counter to increment or -1 */
4614 4614 unsigned short fatal; /* whether the condition reported is fatal */
4615 4615 int_handler_t int_handler; /* platform-specific int handler */
4616 4616 };
4617 4617
4618 4618 /**
4619 4619 * t4_handle_intr_status - table driven interrupt handler
4620 4620 * @adapter: the adapter that generated the interrupt
4621 4621 * @reg: the interrupt status register to process
4622 4622 * @acts: table of interrupt actions
4623 4623 *
4624 4624 * A table driven interrupt handler that applies a set of masks to an
4625 4625 * interrupt status word and performs the corresponding actions if the
4626 4626 * interrupts described by the mask have occurred. The actions include
4627 4627 * optionally emitting a warning or alert message. The table is terminated
4628 4628 * by an entry specifying mask 0. Returns the number of fatal interrupt
4629 4629 * conditions.
4630 4630 */
4631 4631 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4632 4632 const struct intr_info *acts)
4633 4633 {
4634 4634 int fatal = 0;
4635 4635 unsigned int mask = 0;
4636 4636 unsigned int status = t4_read_reg(adapter, reg);
4637 4637
4638 4638 for ( ; acts->mask; ++acts) {
4639 4639 if (!(status & acts->mask))
4640 4640 continue;
4641 4641 if (acts->fatal) {
4642 4642 fatal++;
4643 4643 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4644 4644 status & acts->mask);
4645 4645 } else if (acts->msg)
4646 4646 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4647 4647 status & acts->mask);
4648 4648 if (acts->int_handler)
4649 4649 acts->int_handler(adapter);
4650 4650 mask |= acts->mask;
4651 4651 }
4652 4652 status &= mask;
4653 4653 if (status) /* clear processed interrupts */
4654 4654 t4_write_reg(adapter, reg, status);
4655 4655 return fatal;
4656 4656 }
4657 4657
4658 4658 /*
4659 4659 * Interrupt handler for the PCIE module.
4660 4660 */
4661 4661 static void pcie_intr_handler(struct adapter *adapter)
4662 4662 {
4663 4663 static const struct intr_info sysbus_intr_info[] = {
4664 4664 { F_RNPP, "RXNP array parity error", -1, 1 },
4665 4665 { F_RPCP, "RXPC array parity error", -1, 1 },
4666 4666 { F_RCIP, "RXCIF array parity error", -1, 1 },
4667 4667 { F_RCCP, "Rx completions control array parity error", -1, 1 },
4668 4668 { F_RFTP, "RXFT array parity error", -1, 1 },
4669 4669 { 0 }
4670 4670 };
4671 4671 static const struct intr_info pcie_port_intr_info[] = {
4672 4672 { F_TPCP, "TXPC array parity error", -1, 1 },
4673 4673 { F_TNPP, "TXNP array parity error", -1, 1 },
4674 4674 { F_TFTP, "TXFT array parity error", -1, 1 },
4675 4675 { F_TCAP, "TXCA array parity error", -1, 1 },
4676 4676 { F_TCIP, "TXCIF array parity error", -1, 1 },
4677 4677 { F_RCAP, "RXCA array parity error", -1, 1 },
4678 4678 { F_OTDD, "outbound request TLP discarded", -1, 1 },
4679 4679 { F_RDPE, "Rx data parity error", -1, 1 },
4680 4680 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
4681 4681 { 0 }
4682 4682 };
4683 4683 static const struct intr_info pcie_intr_info[] = {
4684 4684 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4685 4685 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4686 4686 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4687 4687 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4688 4688 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4689 4689 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4690 4690 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4691 4691 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4692 4692 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4693 4693 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4694 4694 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4695 4695 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4696 4696 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4697 4697 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4698 4698 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4699 4699 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4700 4700 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4701 4701 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4702 4702 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4703 4703 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4704 4704 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4705 4705 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4706 4706 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4707 4707 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4708 4708 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4709 4709 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4710 4710 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4711 4711 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
4712 4712 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
4713 4713 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4714 4714 0 },
4715 4715 { 0 }
4716 4716 };
4717 4717
4718 4718 static struct intr_info t5_pcie_intr_info[] = {
4719 4719 { F_MSTGRPPERR, "Master Response Read Queue parity error",
4720 4720 -1, 1 },
4721 4721 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4722 4722 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4723 4723 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4724 4724 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4725 4725 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4726 4726 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4727 4727 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4728 4728 -1, 1 },
4729 4729 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4730 4730 -1, 1 },
4731 4731 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4732 4732 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4733 4733 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4734 4734 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4735 4735 { F_DREQWRPERR, "PCI DMA channel write request parity error",
4736 4736 -1, 1 },
4737 4737 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4738 4738 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4739 4739 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4740 4740 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4741 4741 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4742 4742 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4743 4743 { F_FIDPERR, "PCI FID parity error", -1, 1 },
4744 4744 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4745 4745 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4746 4746 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4747 4747 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4748 4748 -1, 1 },
4749 4749 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4750 4750 -1, 1 },
4751 4751 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4752 4752 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4753 4753 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4754 4754 { F_READRSPERR, "Outbound read error", -1,
4755 4755 0 },
4756 4756 { 0 }
4757 4757 };
4758 4758
4759 4759 int fat;
4760 4760
4761 4761 if (is_t4(adapter->params.chip))
4762 4762 fat = t4_handle_intr_status(adapter,
4763 4763 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4764 4764 sysbus_intr_info) +
4765 4765 t4_handle_intr_status(adapter,
4766 4766 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4767 4767 pcie_port_intr_info) +
4768 4768 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4769 4769 pcie_intr_info);
4770 4770 else
4771 4771 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4772 4772 t5_pcie_intr_info);
4773 4773 if (fat)
4774 4774 t4_fatal_err(adapter);
4775 4775 }
4776 4776
4777 4777 /*
4778 4778 * TP interrupt handler.
4779 4779 */
4780 4780 static void tp_intr_handler(struct adapter *adapter)
4781 4781 {
4782 4782 static const struct intr_info tp_intr_info[] = {
4783 4783 { 0x3fffffff, "TP parity error", -1, 1 },
4784 4784 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4785 4785 { 0 }
4786 4786 };
4787 4787
4788 4788 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4789 4789 t4_fatal_err(adapter);
4790 4790 }
4791 4791
4792 4792 /*
4793 4793 * SGE interrupt handler.
4794 4794 */
4795 4795 static void sge_intr_handler(struct adapter *adapter)
4796 4796 {
4797 4797 u64 v;
4798 4798 u32 err;
4799 4799
4800 4800 static const struct intr_info sge_intr_info[] = {
4801 4801 { F_ERR_CPL_EXCEED_IQE_SIZE,
4802 4802 "SGE received CPL exceeding IQE size", -1, 1 },
4803 4803 { F_ERR_INVALID_CIDX_INC,
4804 4804 "SGE GTS CIDX increment too large", -1, 0 },
4805 4805 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4806 4806 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4807 4807 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4808 4808 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4809 4809 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4810 4810 0 },
4811 4811 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4812 4812 0 },
4813 4813 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4814 4814 0 },
4815 4815 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4816 4816 0 },
4817 4817 { F_ERR_ING_CTXT_PRIO,
4818 4818 "SGE too many priority ingress contexts", -1, 0 },
4819 4819 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4820 4820 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4821 4821 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4822 4822 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4823 4823 "SGE PCIe error for a DBP thread", -1, 0 },
4824 4824 { 0 }
4825 4825 };
4826 4826
4827 4827 static struct intr_info t4t5_sge_intr_info[] = {
4828 4828 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4829 4829 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4830 4830 { F_ERR_EGR_CTXT_PRIO,
4831 4831 "SGE too many priority egress contexts", -1, 0 },
4832 4832 { 0 }
4833 4833 };
4834 4834
4835 4835 /*
4836 4836 * For now, treat below interrupts as fatal so that we disable SGE and
4837 4837 * get better debug */
4838 4838 static struct intr_info t6_sge_intr_info[] = {
4839 4839 { F_FATAL_WRE_LEN,
4840 4840 "SGE Actual WRE packet is less than advertized length",
4841 4841 -1, 1 },
4842 4842 { 0 }
4843 4843 };
4844 4844
4845 4845 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4846 4846 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4847 4847 if (v) {
4848 4848 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4849 4849 (unsigned long long)v);
4850 4850 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4851 4851 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4852 4852 }
4853 4853
4854 4854 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4855 4855 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4856 4856 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4857 4857 t4t5_sge_intr_info);
4858 4858 else
4859 4859 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4860 4860 t6_sge_intr_info);
4861 4861
4862 4862 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4863 4863 if (err & F_ERROR_QID_VALID) {
4864 4864 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4865 4865 if (err & F_UNCAPTURED_ERROR)
4866 4866 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4867 4867 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4868 4868 F_UNCAPTURED_ERROR);
4869 4869 }
4870 4870
4871 4871 if (v != 0)
4872 4872 t4_fatal_err(adapter);
4873 4873 }
4874 4874
4875 4875 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4876 4876 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4877 4877 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4878 4878 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4879 4879
4880 4880 /*
4881 4881 * CIM interrupt handler.
4882 4882 */
4883 4883 static void cim_intr_handler(struct adapter *adapter)
4884 4884 {
4885 4885 static const struct intr_info cim_intr_info[] = {
4886 4886 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4887 4887 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4888 4888 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4889 4889 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4890 4890 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4891 4891 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4892 4892 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4893 4893 { 0 }
4894 4894 };
4895 4895 static const struct intr_info cim_upintr_info[] = {
4896 4896 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4897 4897 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4898 4898 { F_ILLWRINT, "CIM illegal write", -1, 1 },
4899 4899 { F_ILLRDINT, "CIM illegal read", -1, 1 },
4900 4900 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4901 4901 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4902 4902 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4903 4903 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4904 4904 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4905 4905 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4906 4906 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4907 4907 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4908 4908 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4909 4909 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4910 4910 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4911 4911 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4912 4912 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4913 4913 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4914 4914 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4915 4915 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4916 4916 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4917 4917 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4918 4918 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4919 4919 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4920 4920 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4921 4921 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4922 4922 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4923 4923 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4924 4924 { 0 }
4925 4925 };
4926 4926 int fat;
4927 4927
4928 4928 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4929 4929 t4_report_fw_error(adapter);
4930 4930
4931 4931 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4932 4932 cim_intr_info) +
4933 4933 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4934 4934 cim_upintr_info);
4935 4935 if (fat)
4936 4936 t4_fatal_err(adapter);
4937 4937 }
4938 4938
4939 4939 /*
4940 4940 * ULP RX interrupt handler.
4941 4941 */
4942 4942 static void ulprx_intr_handler(struct adapter *adapter)
4943 4943 {
4944 4944 static const struct intr_info ulprx_intr_info[] = {
4945 4945 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4946 4946 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4947 4947 { 0x7fffff, "ULPRX parity error", -1, 1 },
4948 4948 { 0 }
4949 4949 };
4950 4950
4951 4951 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4952 4952 t4_fatal_err(adapter);
4953 4953 }
4954 4954
4955 4955 /*
4956 4956 * ULP TX interrupt handler.
4957 4957 */
4958 4958 static void ulptx_intr_handler(struct adapter *adapter)
4959 4959 {
4960 4960 static const struct intr_info ulptx_intr_info[] = {
4961 4961 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4962 4962 0 },
4963 4963 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4964 4964 0 },
4965 4965 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4966 4966 0 },
4967 4967 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4968 4968 0 },
4969 4969 { 0xfffffff, "ULPTX parity error", -1, 1 },
4970 4970 { 0 }
4971 4971 };
4972 4972
4973 4973 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4974 4974 t4_fatal_err(adapter);
4975 4975 }
4976 4976
4977 4977 /*
4978 4978 * PM TX interrupt handler.
4979 4979 */
4980 4980 static void pmtx_intr_handler(struct adapter *adapter)
4981 4981 {
4982 4982 static const struct intr_info pmtx_intr_info[] = {
4983 4983 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4984 4984 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4985 4985 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4986 4986 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4987 4987 { 0xffffff0, "PMTX framing error", -1, 1 },
4988 4988 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4989 4989 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4990 4990 1 },
4991 4991 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4992 4992 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4993 4993 { 0 }
4994 4994 };
4995 4995
4996 4996 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4997 4997 t4_fatal_err(adapter);
4998 4998 }
4999 4999
5000 5000 /*
5001 5001 * PM RX interrupt handler.
5002 5002 */
5003 5003 static void pmrx_intr_handler(struct adapter *adapter)
5004 5004 {
5005 5005 static const struct intr_info pmrx_intr_info[] = {
5006 5006 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5007 5007 { 0x3ffff0, "PMRX framing error", -1, 1 },
5008 5008 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5009 5009 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5010 5010 1 },
5011 5011 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5012 5012 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5013 5013 { 0 }
5014 5014 };
5015 5015
5016 5016 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5017 5017 t4_fatal_err(adapter);
5018 5018 }
5019 5019
5020 5020 /*
5021 5021 * CPL switch interrupt handler.
5022 5022 */
5023 5023 static void cplsw_intr_handler(struct adapter *adapter)
5024 5024 {
5025 5025 static const struct intr_info cplsw_intr_info[] = {
5026 5026 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5027 5027 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5028 5028 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5029 5029 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5030 5030 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5031 5031 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5032 5032 { 0 }
5033 5033 };
5034 5034
5035 5035 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5036 5036 t4_fatal_err(adapter);
5037 5037 }
5038 5038
5039 5039 /*
5040 5040 * LE interrupt handler.
5041 5041 */
5042 5042 static void le_intr_handler(struct adapter *adap)
5043 5043 {
5044 5044 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5045 5045 static const struct intr_info le_intr_info[] = {
5046 5046 { F_LIPMISS, "LE LIP miss", -1, 0 },
5047 5047 { F_LIP0, "LE 0 LIP error", -1, 0 },
5048 5048 { F_PARITYERR, "LE parity error", -1, 1 },
5049 5049 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5050 5050 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
5051 5051 { 0 }
5052 5052 };
5053 5053
5054 5054 static struct intr_info t6_le_intr_info[] = {
5055 5055 { F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5056 5056 { F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5057 5057 { F_TCAMINTPERR, "LE parity error", -1, 1 },
5058 5058 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5059 5059 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5060 5060 { 0 }
5061 5061 };
5062 5062
5063 5063 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5064 5064 (chip_ver <= CHELSIO_T5) ?
5065 5065 le_intr_info : t6_le_intr_info))
5066 5066 t4_fatal_err(adap);
5067 5067 }
5068 5068
5069 5069 /*
5070 5070 * MPS interrupt handler.
5071 5071 */
5072 5072 static void mps_intr_handler(struct adapter *adapter)
5073 5073 {
5074 5074 static const struct intr_info mps_rx_intr_info[] = {
5075 5075 { 0xffffff, "MPS Rx parity error", -1, 1 },
5076 5076 { 0 }
5077 5077 };
5078 5078 static const struct intr_info mps_tx_intr_info[] = {
5079 5079 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5080 5080 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5081 5081 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5082 5082 -1, 1 },
5083 5083 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5084 5084 -1, 1 },
5085 5085 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
5086 5086 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5087 5087 { F_FRMERR, "MPS Tx framing error", -1, 1 },
5088 5088 { 0 }
5089 5089 };
5090 5090 static const struct intr_info mps_trc_intr_info[] = {
5091 5091 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5092 5092 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5093 5093 1 },
5094 5094 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5095 5095 { 0 }
5096 5096 };
5097 5097 static const struct intr_info mps_stat_sram_intr_info[] = {
5098 5098 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5099 5099 { 0 }
5100 5100 };
5101 5101 static const struct intr_info mps_stat_tx_intr_info[] = {
5102 5102 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5103 5103 { 0 }
5104 5104 };
5105 5105 static const struct intr_info mps_stat_rx_intr_info[] = {
5106 5106 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5107 5107 { 0 }
5108 5108 };
5109 5109 static const struct intr_info mps_cls_intr_info[] = {
5110 5110 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5111 5111 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5112 5112 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5113 5113 { 0 }
5114 5114 };
5115 5115
5116 5116 int fat;
5117 5117
5118 5118 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5119 5119 mps_rx_intr_info) +
5120 5120 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5121 5121 mps_tx_intr_info) +
5122 5122 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5123 5123 mps_trc_intr_info) +
5124 5124 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5125 5125 mps_stat_sram_intr_info) +
5126 5126 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5127 5127 mps_stat_tx_intr_info) +
5128 5128 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5129 5129 mps_stat_rx_intr_info) +
5130 5130 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5131 5131 mps_cls_intr_info);
5132 5132
5133 5133 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5134 5134 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
5135 5135 if (fat)
5136 5136 t4_fatal_err(adapter);
5137 5137 }
5138 5138
5139 5139 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5140 5140 F_ECC_UE_INT_CAUSE)
5141 5141
5142 5142 /*
5143 5143 * EDC/MC interrupt handler.
5144 5144 */
5145 5145 static void mem_intr_handler(struct adapter *adapter, int idx)
5146 5146 {
5147 5147 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5148 5148
5149 5149 unsigned int addr, cnt_addr, v;
5150 5150
5151 5151 if (idx <= MEM_EDC1) {
5152 5152 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5153 5153 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5154 5154 } else if (idx == MEM_MC) {
5155 5155 if (is_t4(adapter->params.chip)) {
5156 5156 addr = A_MC_INT_CAUSE;
5157 5157 cnt_addr = A_MC_ECC_STATUS;
5158 5158 } else {
5159 5159 addr = A_MC_P_INT_CAUSE;
5160 5160 cnt_addr = A_MC_P_ECC_STATUS;
5161 5161 }
5162 5162 } else {
5163 5163 addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5164 5164 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5165 5165 }
5166 5166
5167 5167 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5168 5168 if (v & F_PERR_INT_CAUSE)
5169 5169 CH_ALERT(adapter, "%s FIFO parity error\n",
5170 5170 name[idx]);
5171 5171 if (v & F_ECC_CE_INT_CAUSE) {
5172 5172 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5173 5173
5174 5174 if (idx <= MEM_EDC1)
5175 5175 t4_edc_err_read(adapter, idx);
5176 5176
5177 5177 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5178 5178 CH_WARN_RATELIMIT(adapter,
5179 5179 "%u %s correctable ECC data error%s\n",
5180 5180 cnt, name[idx], cnt > 1 ? "s" : "");
5181 5181 }
5182 5182 if (v & F_ECC_UE_INT_CAUSE)
5183 5183 CH_ALERT(adapter,
5184 5184 "%s uncorrectable ECC data error\n", name[idx]);
5185 5185
5186 5186 t4_write_reg(adapter, addr, v);
5187 5187 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5188 5188 t4_fatal_err(adapter);
5189 5189 }
5190 5190
5191 5191 /*
5192 5192 * MA interrupt handler.
5193 5193 */
5194 5194 static void ma_intr_handler(struct adapter *adapter)
5195 5195 {
5196 5196 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5197 5197
5198 5198 if (status & F_MEM_PERR_INT_CAUSE) {
5199 5199 CH_ALERT(adapter,
5200 5200 "MA parity error, parity status %#x\n",
5201 5201 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5202 5202 if (is_t5(adapter->params.chip))
5203 5203 CH_ALERT(adapter,
5204 5204 "MA parity error, parity status %#x\n",
5205 5205 t4_read_reg(adapter,
5206 5206 A_MA_PARITY_ERROR_STATUS2));
5207 5207 }
5208 5208 if (status & F_MEM_WRAP_INT_CAUSE) {
5209 5209 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5210 5210 CH_ALERT(adapter, "MA address wrap-around error by "
5211 5211 "client %u to address %#x\n",
5212 5212 G_MEM_WRAP_CLIENT_NUM(v),
5213 5213 G_MEM_WRAP_ADDRESS(v) << 4);
5214 5214 }
5215 5215 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5216 5216 t4_fatal_err(adapter);
5217 5217 }
5218 5218
5219 5219 /*
5220 5220 * SMB interrupt handler.
5221 5221 */
5222 5222 static void smb_intr_handler(struct adapter *adap)
5223 5223 {
5224 5224 static const struct intr_info smb_intr_info[] = {
5225 5225 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5226 5226 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5227 5227 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5228 5228 { 0 }
5229 5229 };
5230 5230
5231 5231 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5232 5232 t4_fatal_err(adap);
5233 5233 }
5234 5234
5235 5235 /*
5236 5236 * NC-SI interrupt handler.
5237 5237 */
5238 5238 static void ncsi_intr_handler(struct adapter *adap)
5239 5239 {
5240 5240 static const struct intr_info ncsi_intr_info[] = {
5241 5241 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5242 5242 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5243 5243 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5244 5244 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5245 5245 { 0 }
5246 5246 };
5247 5247
5248 5248 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5249 5249 t4_fatal_err(adap);
5250 5250 }
5251 5251
5252 5252 /*
5253 5253 * XGMAC interrupt handler.
5254 5254 */
5255 5255 static void xgmac_intr_handler(struct adapter *adap, int port)
5256 5256 {
5257 5257 u32 v, int_cause_reg;
5258 5258
5259 5259 if (is_t4(adap->params.chip))
5260 5260 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5261 5261 else
5262 5262 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5263 5263
5264 5264 v = t4_read_reg(adap, int_cause_reg);
5265 5265
5266 5266 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5267 5267 if (!v)
5268 5268 return;
5269 5269
5270 5270 if (v & F_TXFIFO_PRTY_ERR)
5271 5271 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5272 5272 port);
5273 5273 if (v & F_RXFIFO_PRTY_ERR)
5274 5274 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5275 5275 port);
5276 5276 t4_write_reg(adap, int_cause_reg, v);
5277 5277 t4_fatal_err(adap);
5278 5278 }
5279 5279
5280 5280 /*
5281 5281 * PL interrupt handler.
5282 5282 */
5283 5283 static void pl_intr_handler(struct adapter *adap)
5284 5284 {
5285 5285 static const struct intr_info pl_intr_info[] = {
5286 5286 { F_FATALPERR, "Fatal parity error", -1, 1 },
5287 5287 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5288 5288 { 0 }
5289 5289 };
5290 5290
5291 5291 static struct intr_info t5_pl_intr_info[] = {
5292 5292 { F_FATALPERR, "Fatal parity error", -1, 1 },
5293 5293 { 0 }
5294 5294 };
5295 5295
5296 5296 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5297 5297 is_t4(adap->params.chip) ?
5298 5298 pl_intr_info : t5_pl_intr_info))
5299 5299 t4_fatal_err(adap);
5300 5300 }
5301 5301
5302 5302 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5303 5303
5304 5304 /**
5305 5305 * t4_slow_intr_handler - control path interrupt handler
5306 5306 * @adapter: the adapter
5307 5307 *
5308 5308 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5309 5309 * The designation 'slow' is because it involves register reads, while
5310 5310 * data interrupts typically don't involve any MMIOs.
5311 5311 */
5312 5312 int t4_slow_intr_handler(struct adapter *adapter)
5313 5313 {
5314 5314 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5315 5315
5316 5316 if (!(cause & GLBL_INTR_MASK))
5317 5317 return 0;
5318 5318 if (cause & F_CIM)
5319 5319 cim_intr_handler(adapter);
5320 5320 if (cause & F_MPS)
5321 5321 mps_intr_handler(adapter);
5322 5322 if (cause & F_NCSI)
5323 5323 ncsi_intr_handler(adapter);
5324 5324 if (cause & F_PL)
5325 5325 pl_intr_handler(adapter);
5326 5326 if (cause & F_SMB)
5327 5327 smb_intr_handler(adapter);
5328 5328 if (cause & F_MAC0)
5329 5329 xgmac_intr_handler(adapter, 0);
5330 5330 if (cause & F_MAC1)
5331 5331 xgmac_intr_handler(adapter, 1);
5332 5332 if (cause & F_MAC2)
5333 5333 xgmac_intr_handler(adapter, 2);
5334 5334 if (cause & F_MAC3)
5335 5335 xgmac_intr_handler(adapter, 3);
5336 5336 if (cause & F_PCIE)
5337 5337 pcie_intr_handler(adapter);
5338 5338 if (cause & F_MC0)
5339 5339 mem_intr_handler(adapter, MEM_MC);
5340 5340 if (is_t5(adapter->params.chip) && (cause & F_MC1))
5341 5341 mem_intr_handler(adapter, MEM_MC1);
5342 5342 if (cause & F_EDC0)
5343 5343 mem_intr_handler(adapter, MEM_EDC0);
5344 5344 if (cause & F_EDC1)
5345 5345 mem_intr_handler(adapter, MEM_EDC1);
5346 5346 if (cause & F_LE)
5347 5347 le_intr_handler(adapter);
5348 5348 if (cause & F_TP)
5349 5349 tp_intr_handler(adapter);
5350 5350 if (cause & F_MA)
5351 5351 ma_intr_handler(adapter);
5352 5352 if (cause & F_PM_TX)
5353 5353 pmtx_intr_handler(adapter);
5354 5354 if (cause & F_PM_RX)
5355 5355 pmrx_intr_handler(adapter);
5356 5356 if (cause & F_ULP_RX)
5357 5357 ulprx_intr_handler(adapter);
5358 5358 if (cause & F_CPL_SWITCH)
5359 5359 cplsw_intr_handler(adapter);
5360 5360 if (cause & F_SGE)
5361 5361 sge_intr_handler(adapter);
5362 5362 if (cause & F_ULP_TX)
5363 5363 ulptx_intr_handler(adapter);
5364 5364
5365 5365 /* Clear the interrupts just processed for which we are the master. */
5366 5366 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
5367 5367 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
5368 5368 return 1;
5369 5369 }
5370 5370
5371 5371 /**
5372 5372 * t4_intr_enable - enable interrupts
5373 5373 * @adapter: the adapter whose interrupts should be enabled
5374 5374 *
5375 5375 * Enable PF-specific interrupts for the calling function and the top-level
5376 5376 * interrupt concentrator for global interrupts. Interrupts are already
5377 5377 * enabled at each module, here we just enable the roots of the interrupt
5378 5378 * hierarchies.
5379 5379 *
5380 5380 * Note: this function should be called only when the driver manages
5381 5381 * non PF-specific interrupts from the various HW modules. Only one PCI
5382 5382 * function at a time should be doing this.
5383 5383 */
5384 5384 void t4_intr_enable(struct adapter *adapter)
5385 5385 {
5386 5386 u32 val = 0;
5387 5387 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5388 5388 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5389 5389 ? G_SOURCEPF(whoami)
5390 5390 : G_T6_SOURCEPF(whoami));
5391 5391
5392 5392 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5393 5393 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5394 5394 else
5395 5395 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5396 5396 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5397 5397 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5398 5398 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5399 5399 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5400 5400 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5401 5401 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5402 5402 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5403 5403 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5404 5404 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5405 5405 }
5406 5406
5407 5407 /**
5408 5408 * t4_intr_disable - disable interrupts
5409 5409 * @adapter: the adapter whose interrupts should be disabled
5410 5410 *
5411 5411 * Disable interrupts. We only disable the top-level interrupt
5412 5412 * concentrators. The caller must be a PCI function managing global
5413 5413 * interrupts.
5414 5414 */
5415 5415 void t4_intr_disable(struct adapter *adapter)
5416 5416 {
5417 5417 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5418 5418 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5419 5419 ? G_SOURCEPF(whoami)
5420 5420 : G_T6_SOURCEPF(whoami));
5421 5421
5422 5422 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5423 5423 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5424 5424 }
5425 5425
5426 5426 /**
5427 5427 * t4_config_rss_range - configure a portion of the RSS mapping table
5428 5428 * @adapter: the adapter
5429 5429 * @mbox: mbox to use for the FW command
5430 5430 * @viid: virtual interface whose RSS subtable is to be written
5431 5431 * @start: start entry in the table to write
5432 5432 * @n: how many table entries to write
5433 5433 * @rspq: values for the "response queue" (Ingress Queue) lookup table
5434 5434 * @nrspq: number of values in @rspq
5435 5435 *
5436 5436 * Programs the selected part of the VI's RSS mapping table with the
5437 5437 * provided values. If @nrspq < @n the supplied values are used repeatedly
5438 5438 * until the full table range is populated.
5439 5439 *
5440 5440 * The caller must ensure the values in @rspq are in the range allowed for
5441 5441 * @viid.
5442 5442 */
5443 5443 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5444 5444 int start, int n, const u16 *rspq, unsigned int nrspq)
5445 5445 {
5446 5446 int ret;
5447 5447 const u16 *rsp = rspq;
5448 5448 const u16 *rsp_end = rspq + nrspq;
5449 5449 struct fw_rss_ind_tbl_cmd cmd;
5450 5450
5451 5451 memset(&cmd, 0, sizeof(cmd));
5452 5452 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5453 5453 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5454 5454 V_FW_RSS_IND_TBL_CMD_VIID(viid));
5455 5455 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5456 5456
5457 5457 /* Each firmware RSS command can accommodate up to 32 RSS Ingress
5458 5458 * Queue Identifiers. These Ingress Queue IDs are packed three to
5459 5459 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5460 5460 * reserved.
5461 5461 */
5462 5462 while (n > 0) {
5463 5463 int nq = min(n, 32);
5464 5464 int nq_packed = 0;
5465 5465 __be32 *qp = &cmd.iq0_to_iq2;
5466 5466
5467 5467 /* Set up the firmware RSS command header to send the next
5468 5468 * "nq" Ingress Queue IDs to the firmware.
5469 5469 */
5470 5470 cmd.niqid = cpu_to_be16(nq);
5471 5471 cmd.startidx = cpu_to_be16(start);
5472 5472
5473 5473 /* "nq" more done for the start of the next loop.
5474 5474 */
5475 5475 start += nq;
5476 5476 n -= nq;
5477 5477
5478 5478 /* While there are still Ingress Queue IDs to stuff into the
5479 5479 * current firmware RSS command, retrieve them from the
5480 5480 * Ingress Queue ID array and insert them into the command.
5481 5481 */
5482 5482 while (nq > 0) {
5483 5483 /* Grab up to the next 3 Ingress Queue IDs (wrapping
5484 5484 * around the Ingress Queue ID array if necessary) and
5485 5485 * insert them into the firmware RSS command at the
5486 5486 * current 3-tuple position within the commad.
5487 5487 */
5488 5488 u16 qbuf[3];
5489 5489 u16 *qbp = qbuf;
5490 5490 int nqbuf = min(3, nq);
5491 5491
5492 5492 nq -= nqbuf;
5493 5493 qbuf[0] = qbuf[1] = qbuf[2] = 0;
5494 5494 while (nqbuf && nq_packed < 32) {
5495 5495 nqbuf--;
5496 5496 nq_packed++;
5497 5497 *qbp++ = *rsp++;
5498 5498 if (rsp >= rsp_end)
5499 5499 rsp = rspq;
5500 5500 }
5501 5501 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5502 5502 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5503 5503 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5504 5504 }
5505 5505
5506 5506 /* Send this portion of the RRS table update to the firmware;
5507 5507 * bail out on any errors.
5508 5508 */
5509 5509 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5510 5510 if (ret)
5511 5511 return ret;
5512 5512 }
5513 5513 return 0;
5514 5514 }
5515 5515
5516 5516 /**
5517 5517 * t4_config_glbl_rss - configure the global RSS mode
5518 5518 * @adapter: the adapter
5519 5519 * @mbox: mbox to use for the FW command
5520 5520 * @mode: global RSS mode
5521 5521 * @flags: mode-specific flags
5522 5522 *
5523 5523 * Sets the global RSS mode.
5524 5524 */
5525 5525 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5526 5526 unsigned int flags)
5527 5527 {
5528 5528 struct fw_rss_glb_config_cmd c;
5529 5529
5530 5530 memset(&c, 0, sizeof(c));
5531 5531 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5532 5532 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5533 5533 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5534 5534 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5535 5535 c.u.manual.mode_pkd =
5536 5536 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5537 5537 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5538 5538 c.u.basicvirtual.mode_keymode =
5539 5539 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5540 5540 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5541 5541 } else
5542 5542 return -EINVAL;
5543 5543 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5544 5544 }
5545 5545
5546 5546 /**
5547 5547 * t4_config_vi_rss - configure per VI RSS settings
5548 5548 * @adapter: the adapter
5549 5549 * @mbox: mbox to use for the FW command
5550 5550 * @viid: the VI id
5551 5551 * @flags: RSS flags
5552 5552 * @defq: id of the default RSS queue for the VI.
5553 5553 * @skeyidx: RSS secret key table index for non-global mode
5554 5554 * @skey: RSS vf_scramble key for VI.
5555 5555 *
5556 5556 * Configures VI-specific RSS properties.
5557 5557 */
5558 5558 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5559 5559 unsigned int flags, unsigned int defq, unsigned int skeyidx,
5560 5560 unsigned int skey)
5561 5561 {
5562 5562 struct fw_rss_vi_config_cmd c;
5563 5563
5564 5564 memset(&c, 0, sizeof(c));
5565 5565 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5566 5566 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5567 5567 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5568 5568 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5569 5569 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5570 5570 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5571 5571 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5572 5572 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5573 5573 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5574 5574
5575 5575 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5576 5576 }
5577 5577
5578 5578 /* Read an RSS table row */
5579 5579 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5580 5580 {
5581 5581 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5582 5582 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5583 5583 5, 0, val);
5584 5584 }
5585 5585
5586 5586 /**
5587 5587 * t4_read_rss - read the contents of the RSS mapping table
5588 5588 * @adapter: the adapter
5589 5589 * @map: holds the contents of the RSS mapping table
5590 5590 *
5591 5591 * Reads the contents of the RSS hash->queue mapping table.
5592 5592 */
5593 5593 int t4_read_rss(struct adapter *adapter, u16 *map)
5594 5594 {
5595 5595 u32 val;
5596 5596 int i, ret;
5597 5597
5598 5598 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5599 5599 ret = rd_rss_row(adapter, i, &val);
5600 5600 if (ret)
5601 5601 return ret;
5602 5602 *map++ = G_LKPTBLQUEUE0(val);
5603 5603 *map++ = G_LKPTBLQUEUE1(val);
5604 5604 }
5605 5605 return 0;
5606 5606 }
5607 5607
5608 5608 /**
5609 5609 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5610 5610 * @adap: the adapter
5611 5611 * @cmd: TP fw ldst address space type
5612 5612 * @vals: where the indirect register values are stored/written
5613 5613 * @nregs: how many indirect registers to read/write
5614 5614 * @start_idx: index of first indirect register to read/write
5615 5615 * @rw: Read (1) or Write (0)
5616 5616 * @sleep_ok: if true we may sleep while awaiting command completion
5617 5617 *
5618 5618 * Access TP indirect registers through LDST
5619 5619 **/
5620 5620 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5621 5621 unsigned int nregs, unsigned int start_index,
5622 5622 unsigned int rw, bool sleep_ok)
5623 5623 {
5624 5624 int ret = 0;
5625 5625 unsigned int i;
5626 5626 struct fw_ldst_cmd c;
5627 5627
5628 5628 for (i = 0; i < nregs; i++) {
5629 5629 memset(&c, 0, sizeof(c));
5630 5630 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5631 5631 F_FW_CMD_REQUEST |
5632 5632 (rw ? F_FW_CMD_READ :
5633 5633 F_FW_CMD_WRITE) |
5634 5634 V_FW_LDST_CMD_ADDRSPACE(cmd));
5635 5635 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5636 5636
5637 5637 c.u.addrval.addr = cpu_to_be32(start_index + i);
5638 5638 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5639 5639 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5640 5640 sleep_ok);
5641 5641 if (ret)
5642 5642 return ret;
5643 5643
5644 5644 if (rw)
5645 5645 vals[i] = be32_to_cpu(c.u.addrval.val);
5646 5646 }
5647 5647 return 0;
5648 5648 }
5649 5649
5650 5650 /**
5651 5651 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5652 5652 * @adap: the adapter
5653 5653 * @reg_addr: Address Register
5654 5654 * @reg_data: Data register
5655 5655 * @buff: where the indirect register values are stored/written
5656 5656 * @nregs: how many indirect registers to read/write
5657 5657 * @start_index: index of first indirect register to read/write
5658 5658 * @rw: READ(1) or WRITE(0)
5659 5659 * @sleep_ok: if true we may sleep while awaiting command completion
5660 5660 *
5661 5661 * Read/Write TP indirect registers through LDST if possible.
5662 5662 * Else, use backdoor access
5663 5663 **/
5664 5664 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5665 5665 u32 *buff, u32 nregs, u32 start_index, int rw,
5666 5666 bool sleep_ok)
5667 5667 {
5668 5668 int rc = -EINVAL;
5669 5669 int cmd;
5670 5670
5671 5671 switch (reg_addr) {
5672 5672 case A_TP_PIO_ADDR:
5673 5673 cmd = FW_LDST_ADDRSPC_TP_PIO;
5674 5674 break;
5675 5675 case A_TP_TM_PIO_ADDR:
5676 5676 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5677 5677 break;
5678 5678 case A_TP_MIB_INDEX:
5679 5679 cmd = FW_LDST_ADDRSPC_TP_MIB;
5680 5680 break;
5681 5681 default:
5682 5682 goto indirect_access;
5683 5683 }
5684 5684
5685 5685 if (t4_use_ldst(adap))
5686 5686 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5687 5687 sleep_ok);
5688 5688
5689 5689 indirect_access:
5690 5690
5691 5691 if (rc) {
5692 5692 if (rw)
5693 5693 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5694 5694 start_index);
5695 5695 else
5696 5696 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5697 5697 start_index);
5698 5698 }
5699 5699 }
5700 5700
5701 5701 /**
5702 5702 * t4_tp_pio_read - Read TP PIO registers
5703 5703 * @adap: the adapter
5704 5704 * @buff: where the indirect register values are written
5705 5705 * @nregs: how many indirect registers to read
5706 5706 * @start_index: index of first indirect register to read
5707 5707 * @sleep_ok: if true we may sleep while awaiting command completion
5708 5708 *
5709 5709 * Read TP PIO Registers
5710 5710 **/
5711 5711 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5712 5712 u32 start_index, bool sleep_ok)
5713 5713 {
5714 5714 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5715 5715 start_index, 1, sleep_ok);
5716 5716 }
5717 5717
5718 5718 /**
5719 5719 * t4_tp_pio_write - Write TP PIO registers
5720 5720 * @adap: the adapter
5721 5721 * @buff: where the indirect register values are stored
5722 5722 * @nregs: how many indirect registers to write
5723 5723 * @start_index: index of first indirect register to write
5724 5724 * @sleep_ok: if true we may sleep while awaiting command completion
5725 5725 *
5726 5726 * Write TP PIO Registers
5727 5727 **/
5728 5728 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5729 5729 u32 start_index, bool sleep_ok)
5730 5730 {
5731 5731 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5732 5732 start_index, 0, sleep_ok);
5733 5733 }
5734 5734
5735 5735 /**
5736 5736 * t4_tp_tm_pio_read - Read TP TM PIO registers
5737 5737 * @adap: the adapter
5738 5738 * @buff: where the indirect register values are written
5739 5739 * @nregs: how many indirect registers to read
5740 5740 * @start_index: index of first indirect register to read
5741 5741 * @sleep_ok: if true we may sleep while awaiting command completion
5742 5742 *
5743 5743 * Read TP TM PIO Registers
5744 5744 **/
5745 5745 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5746 5746 u32 start_index, bool sleep_ok)
5747 5747 {
5748 5748 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5749 5749 nregs, start_index, 1, sleep_ok);
5750 5750 }
5751 5751
5752 5752 /**
5753 5753 * t4_tp_mib_read - Read TP MIB registers
5754 5754 * @adap: the adapter
5755 5755 * @buff: where the indirect register values are written
5756 5756 * @nregs: how many indirect registers to read
5757 5757 * @start_index: index of first indirect register to read
5758 5758 * @sleep_ok: if true we may sleep while awaiting command completion
5759 5759 *
5760 5760 * Read TP MIB Registers
5761 5761 **/
5762 5762 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5763 5763 bool sleep_ok)
5764 5764 {
5765 5765 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5766 5766 start_index, 1, sleep_ok);
5767 5767 }
5768 5768
5769 5769 /**
5770 5770 * t4_read_rss_key - read the global RSS key
5771 5771 * @adap: the adapter
5772 5772 * @key: 10-entry array holding the 320-bit RSS key
5773 5773 * @sleep_ok: if true we may sleep while awaiting command completion
5774 5774 *
5775 5775 * Reads the global 320-bit RSS key.
5776 5776 */
5777 5777 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5778 5778 {
5779 5779 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5780 5780 }
5781 5781
5782 5782 /**
5783 5783 * t4_write_rss_key - program one of the RSS keys
5784 5784 * @adap: the adapter
5785 5785 * @key: 10-entry array holding the 320-bit RSS key
5786 5786 * @idx: which RSS key to write
5787 5787 * @sleep_ok: if true we may sleep while awaiting command completion
5788 5788 *
5789 5789 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5790 5790 * 0..15 the corresponding entry in the RSS key table is written,
5791 5791 * otherwise the global RSS key is written.
5792 5792 */
5793 5793 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5794 5794 bool sleep_ok)
5795 5795 {
5796 5796 u8 rss_key_addr_cnt = 16;
5797 5797 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5798 5798
5799 5799 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5800 5800 * allows access to key addresses 16-63 by using KeyWrAddrX
5801 5801 * as index[5:4](upper 2) into key table
5802 5802 */
5803 5803 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5804 5804 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5805 5805 rss_key_addr_cnt = 32;
5806 5806
5807 5807 t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5808 5808
5809 5809 if (idx >= 0 && idx < rss_key_addr_cnt) {
5810 5810 if (rss_key_addr_cnt > 16)
5811 5811 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5812 5812 vrt | V_KEYWRADDRX(idx >> 4) |
5813 5813 V_T6_VFWRADDR(idx) | F_KEYWREN);
5814 5814 else
5815 5815 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5816 5816 vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5817 5817 }
5818 5818 }
5819 5819
5820 5820 /**
5821 5821 * t4_read_rss_pf_config - read PF RSS Configuration Table
5822 5822 * @adapter: the adapter
5823 5823 * @index: the entry in the PF RSS table to read
5824 5824 * @valp: where to store the returned value
5825 5825 * @sleep_ok: if true we may sleep while awaiting command completion
5826 5826 *
5827 5827 * Reads the PF RSS Configuration Table at the specified index and returns
5828 5828 * the value found there.
5829 5829 */
5830 5830 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5831 5831 u32 *valp, bool sleep_ok)
5832 5832 {
5833 5833 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5834 5834 }
5835 5835
5836 5836 /**
5837 5837 * t4_write_rss_pf_config - write PF RSS Configuration Table
5838 5838 * @adapter: the adapter
5839 5839 * @index: the entry in the VF RSS table to read
5840 5840 * @val: the value to store
5841 5841 * @sleep_ok: if true we may sleep while awaiting command completion
5842 5842 *
5843 5843 * Writes the PF RSS Configuration Table at the specified index with the
5844 5844 * specified value.
5845 5845 */
5846 5846 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5847 5847 u32 val, bool sleep_ok)
5848 5848 {
5849 5849 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5850 5850 sleep_ok);
5851 5851 }
5852 5852
5853 5853 /**
5854 5854 * t4_read_rss_vf_config - read VF RSS Configuration Table
5855 5855 * @adapter: the adapter
5856 5856 * @index: the entry in the VF RSS table to read
5857 5857 * @vfl: where to store the returned VFL
5858 5858 * @vfh: where to store the returned VFH
5859 5859 * @sleep_ok: if true we may sleep while awaiting command completion
5860 5860 *
5861 5861 * Reads the VF RSS Configuration Table at the specified index and returns
5862 5862 * the (VFL, VFH) values found there.
5863 5863 */
5864 5864 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5865 5865 u32 *vfl, u32 *vfh, bool sleep_ok)
5866 5866 {
5867 5867 u32 vrt, mask, data;
5868 5868
5869 5869 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5870 5870 mask = V_VFWRADDR(M_VFWRADDR);
5871 5871 data = V_VFWRADDR(index);
5872 5872 } else {
5873 5873 mask = V_T6_VFWRADDR(M_T6_VFWRADDR);
5874 5874 data = V_T6_VFWRADDR(index);
5875 5875 }
5876 5876 /*
5877 5877 * Request that the index'th VF Table values be read into VFL/VFH.
5878 5878 */
5879 5879 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5880 5880 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5881 5881 vrt |= data | F_VFRDEN;
5882 5882 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5883 5883
5884 5884 /*
5885 5885 * Grab the VFL/VFH values ...
5886 5886 */
5887 5887 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5888 5888 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5889 5889 }
5890 5890
5891 5891 /**
5892 5892 * t4_read_rss_pf_map - read PF RSS Map
5893 5893 * @adapter: the adapter
5894 5894 * @sleep_ok: if true we may sleep while awaiting command completion
5895 5895 *
5896 5896 * Reads the PF RSS Map register and returns its value.
5897 5897 */
5898 5898 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5899 5899 {
5900 5900 u32 pfmap;
5901 5901
5902 5902 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5903 5903
5904 5904 return pfmap;
5905 5905 }
5906 5906
5907 5907 /**
5908 5908 * t4_read_rss_pf_mask - read PF RSS Mask
5909 5909 * @adapter: the adapter
5910 5910 * @sleep_ok: if true we may sleep while awaiting command completion
5911 5911 *
5912 5912 * Reads the PF RSS Mask register and returns its value.
5913 5913 */
5914 5914 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5915 5915 {
5916 5916 u32 pfmask;
5917 5917
5918 5918 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5919 5919
5920 5920 return pfmask;
5921 5921 }
5922 5922
5923 5923 /**
5924 5924 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5925 5925 * @adap: the adapter
5926 5926 * @v4: holds the TCP/IP counter values
5927 5927 * @v6: holds the TCP/IPv6 counter values
5928 5928 * @sleep_ok: if true we may sleep while awaiting command completion
5929 5929 *
5930 5930 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5931 5931 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5932 5932 */
5933 5933 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5934 5934 struct tp_tcp_stats *v6, bool sleep_ok)
5935 5935 {
5936 5936 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5937 5937
5938 5938 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5939 5939 #define STAT(x) val[STAT_IDX(x)]
5940 5940 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5941 5941
5942 5942 if (v4) {
5943 5943 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5944 5944 A_TP_MIB_TCP_OUT_RST, sleep_ok);
5945 5945 v4->tcp_out_rsts = STAT(OUT_RST);
5946 5946 v4->tcp_in_segs = STAT64(IN_SEG);
5947 5947 v4->tcp_out_segs = STAT64(OUT_SEG);
5948 5948 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5949 5949 }
5950 5950 if (v6) {
5951 5951 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5952 5952 A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5953 5953 v6->tcp_out_rsts = STAT(OUT_RST);
5954 5954 v6->tcp_in_segs = STAT64(IN_SEG);
5955 5955 v6->tcp_out_segs = STAT64(OUT_SEG);
5956 5956 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5957 5957 }
5958 5958 #undef STAT64
5959 5959 #undef STAT
5960 5960 #undef STAT_IDX
5961 5961 }
5962 5962
5963 5963 /**
5964 5964 * t4_tp_get_err_stats - read TP's error MIB counters
5965 5965 * @adap: the adapter
5966 5966 * @st: holds the counter values
5967 5967 * @sleep_ok: if true we may sleep while awaiting command completion
5968 5968 *
5969 5969 * Returns the values of TP's error counters.
5970 5970 */
5971 5971 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5972 5972 bool sleep_ok)
5973 5973 {
5974 5974 int nchan = adap->params.arch.nchan;
5975 5975
5976 5976 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5977 5977 sleep_ok);
5978 5978
5979 5979 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5980 5980 sleep_ok);
5981 5981
5982 5982 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5983 5983 sleep_ok);
5984 5984
5985 5985 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5986 5986 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5987 5987
5988 5988 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5989 5989 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5990 5990
5991 5991 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5992 5992 sleep_ok);
5993 5993
5994 5994 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5995 5995 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5996 5996
5997 5997 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5998 5998 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5999 5999
6000 6000 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6001 6001 sleep_ok);
6002 6002 }
6003 6003
6004 6004 /**
6005 6005 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
6006 6006 * @adap: the adapter
6007 6007 * @st: holds the counter values
6008 6008 * @sleep_ok: if true we may sleep while awaiting command completion
6009 6009 *
6010 6010 * Returns the values of TP's CPL counters.
6011 6011 */
6012 6012 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6013 6013 bool sleep_ok)
6014 6014 {
6015 6015 int nchan = adap->params.arch.nchan;
6016 6016
6017 6017 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6018 6018
6019 6019 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6020 6020 }
6021 6021
6022 6022 /**
6023 6023 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6024 6024 * @adap: the adapter
6025 6025 * @st: holds the counter values
6026 6026 *
6027 6027 * Returns the values of TP's RDMA counters.
6028 6028 */
6029 6029 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6030 6030 bool sleep_ok)
6031 6031 {
6032 6032 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6033 6033 sleep_ok);
6034 6034 }
6035 6035
6036 6036 /**
6037 6037 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6038 6038 * @adap: the adapter
6039 6039 * @idx: the port index
6040 6040 * @st: holds the counter values
6041 6041 * @sleep_ok: if true we may sleep while awaiting command completion
6042 6042 *
6043 6043 * Returns the values of TP's FCoE counters for the selected port.
6044 6044 */
6045 6045 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6046 6046 struct tp_fcoe_stats *st, bool sleep_ok)
6047 6047 {
6048 6048 u32 val[2];
6049 6049
6050 6050 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6051 6051 sleep_ok);
6052 6052
6053 6053 t4_tp_mib_read(adap, &st->frames_drop, 1,
6054 6054 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6055 6055
6056 6056 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6057 6057 sleep_ok);
6058 6058
6059 6059 st->octets_ddp = ((u64)val[0] << 32) | val[1];
6060 6060 }
6061 6061
6062 6062 /**
6063 6063 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6064 6064 * @adap: the adapter
6065 6065 * @st: holds the counter values
6066 6066 * @sleep_ok: if true we may sleep while awaiting command completion
6067 6067 *
6068 6068 * Returns the values of TP's counters for non-TCP directly-placed packets.
6069 6069 */
6070 6070 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6071 6071 bool sleep_ok)
6072 6072 {
6073 6073 u32 val[4];
6074 6074
6075 6075 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6076 6076
6077 6077 st->frames = val[0];
6078 6078 st->drops = val[1];
6079 6079 st->octets = ((u64)val[2] << 32) | val[3];
6080 6080 }
6081 6081
6082 6082 /**
6083 6083 * t4_read_mtu_tbl - returns the values in the HW path MTU table
6084 6084 * @adap: the adapter
6085 6085 * @mtus: where to store the MTU values
6086 6086 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
6087 6087 *
6088 6088 * Reads the HW path MTU table.
6089 6089 */
6090 6090 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6091 6091 {
6092 6092 u32 v;
6093 6093 int i;
6094 6094
6095 6095 for (i = 0; i < NMTUS; ++i) {
6096 6096 t4_write_reg(adap, A_TP_MTU_TABLE,
6097 6097 V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6098 6098 v = t4_read_reg(adap, A_TP_MTU_TABLE);
6099 6099 mtus[i] = G_MTUVALUE(v);
6100 6100 if (mtu_log)
6101 6101 mtu_log[i] = G_MTUWIDTH(v);
6102 6102 }
6103 6103 }
6104 6104
6105 6105 /**
6106 6106 * t4_read_cong_tbl - reads the congestion control table
6107 6107 * @adap: the adapter
6108 6108 * @incr: where to store the alpha values
6109 6109 *
6110 6110 * Reads the additive increments programmed into the HW congestion
6111 6111 * control table.
6112 6112 */
6113 6113 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6114 6114 {
6115 6115 unsigned int mtu, w;
6116 6116
6117 6117 for (mtu = 0; mtu < NMTUS; ++mtu)
6118 6118 for (w = 0; w < NCCTRL_WIN; ++w) {
6119 6119 t4_write_reg(adap, A_TP_CCTRL_TABLE,
6120 6120 V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6121 6121 incr[mtu][w] = (u16)t4_read_reg(adap,
6122 6122 A_TP_CCTRL_TABLE) & 0x1fff;
6123 6123 }
6124 6124 }
6125 6125
6126 6126 /**
6127 6127 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6128 6128 * @adap: the adapter
6129 6129 * @addr: the indirect TP register address
6130 6130 * @mask: specifies the field within the register to modify
6131 6131 * @val: new value for the field
6132 6132 *
6133 6133 * Sets a field of an indirect TP register to the given value.
6134 6134 */
6135 6135 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6136 6136 unsigned int mask, unsigned int val)
6137 6137 {
6138 6138 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6139 6139 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6140 6140 t4_write_reg(adap, A_TP_PIO_DATA, val);
6141 6141 }
6142 6142
6143 6143 /**
6144 6144 * init_cong_ctrl - initialize congestion control parameters
6145 6145 * @a: the alpha values for congestion control
6146 6146 * @b: the beta values for congestion control
6147 6147 *
6148 6148 * Initialize the congestion control parameters.
6149 6149 */
6150 6150 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6151 6151 {
6152 6152 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6153 6153 a[9] = 2;
6154 6154 a[10] = 3;
6155 6155 a[11] = 4;
6156 6156 a[12] = 5;
6157 6157 a[13] = 6;
6158 6158 a[14] = 7;
6159 6159 a[15] = 8;
6160 6160 a[16] = 9;
6161 6161 a[17] = 10;
6162 6162 a[18] = 14;
6163 6163 a[19] = 17;
6164 6164 a[20] = 21;
6165 6165 a[21] = 25;
6166 6166 a[22] = 30;
6167 6167 a[23] = 35;
6168 6168 a[24] = 45;
6169 6169 a[25] = 60;
6170 6170 a[26] = 80;
6171 6171 a[27] = 100;
6172 6172 a[28] = 200;
6173 6173 a[29] = 300;
6174 6174 a[30] = 400;
6175 6175 a[31] = 500;
6176 6176
6177 6177 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6178 6178 b[9] = b[10] = 1;
6179 6179 b[11] = b[12] = 2;
6180 6180 b[13] = b[14] = b[15] = b[16] = 3;
6181 6181 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6182 6182 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6183 6183 b[28] = b[29] = 6;
6184 6184 b[30] = b[31] = 7;
6185 6185 }
6186 6186
6187 6187 /* The minimum additive increment value for the congestion control table */
6188 6188 #define CC_MIN_INCR 2U
6189 6189
6190 6190 /**
6191 6191 * t4_load_mtus - write the MTU and congestion control HW tables
6192 6192 * @adap: the adapter
6193 6193 * @mtus: the values for the MTU table
6194 6194 * @alpha: the values for the congestion control alpha parameter
6195 6195 * @beta: the values for the congestion control beta parameter
6196 6196 *
6197 6197 * Write the HW MTU table with the supplied MTUs and the high-speed
6198 6198 * congestion control table with the supplied alpha, beta, and MTUs.
6199 6199 * We write the two tables together because the additive increments
6200 6200 * depend on the MTUs.
6201 6201 */
6202 6202 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6203 6203 const unsigned short *alpha, const unsigned short *beta)
6204 6204 {
6205 6205 static const unsigned int avg_pkts[NCCTRL_WIN] = {
6206 6206 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6207 6207 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6208 6208 28672, 40960, 57344, 81920, 114688, 163840, 229376
6209 6209 };
6210 6210
6211 6211 unsigned int i, w;
6212 6212
6213 6213 for (i = 0; i < NMTUS; ++i) {
6214 6214 unsigned int mtu = mtus[i];
6215 6215 unsigned int log2 = fls(mtu);
6216 6216
6217 6217 if (!(mtu & ((1 << log2) >> 2))) /* round */
6218 6218 log2--;
6219 6219 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6220 6220 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6221 6221
6222 6222 for (w = 0; w < NCCTRL_WIN; ++w) {
6223 6223 unsigned int inc;
6224 6224
6225 6225 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6226 6226 CC_MIN_INCR);
6227 6227
6228 6228 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6229 6229 (w << 16) | (beta[w] << 13) | inc);
6230 6230 }
6231 6231 }
6232 6232 }
6233 6233
6234 6234 /*
6235 6235 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6236 6236 * clocks. The formula is
6237 6237 *
6238 6238 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6239 6239 *
6240 6240 * which is equivalent to
6241 6241 *
6242 6242 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6243 6243 */
6244 6244 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6245 6245 {
6246 6246 u64 v = bytes256 * adap->params.vpd.cclk;
6247 6247
6248 6248 return v * 62 + v / 2;
6249 6249 }
6250 6250
6251 6251 /**
6252 6252 * t4_get_chan_txrate - get the current per channel Tx rates
6253 6253 * @adap: the adapter
6254 6254 * @nic_rate: rates for NIC traffic
6255 6255 * @ofld_rate: rates for offloaded traffic
6256 6256 *
6257 6257 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
6258 6258 * for each channel.
6259 6259 */
6260 6260 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6261 6261 {
6262 6262 u32 v;
6263 6263
6264 6264 v = t4_read_reg(adap, A_TP_TX_TRATE);
6265 6265 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6266 6266 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6267 6267 if (adap->params.arch.nchan == NCHAN) {
6268 6268 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6269 6269 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6270 6270 }
6271 6271
6272 6272 v = t4_read_reg(adap, A_TP_TX_ORATE);
6273 6273 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6274 6274 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6275 6275 if (adap->params.arch.nchan == NCHAN) {
6276 6276 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6277 6277 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6278 6278 }
6279 6279 }
6280 6280
6281 6281 /**
6282 6282 * t4_set_trace_filter - configure one of the tracing filters
6283 6283 * @adap: the adapter
6284 6284 * @tp: the desired trace filter parameters
6285 6285 * @idx: which filter to configure
6286 6286 * @enable: whether to enable or disable the filter
6287 6287 *
6288 6288 * Configures one of the tracing filters available in HW. If @enable is
6289 6289 * %0 @tp is not examined and may be %NULL. The user is responsible to
6290 6290 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6291 6291 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6292 6292 * docs/readme.txt for a complete description of how to setup traceing on
6293 6293 * T4.
6294 6294 */
6295 6295 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6296 6296 int enable)
6297 6297 {
6298 6298 int i, ofst = idx * 4;
6299 6299 u32 data_reg, mask_reg, cfg;
6300 6300
6301 6301 if (!enable) {
6302 6302 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6303 6303 return 0;
6304 6304 }
6305 6305
6306 6306 /*
6307 6307 * TODO - After T4 data book is updated, specify the exact
6308 6308 * section below.
6309 6309 *
6310 6310 * See T4 data book - MPS section for a complete description
6311 6311 * of the below if..else handling of A_MPS_TRC_CFG register
6312 6312 * value.
6313 6313 */
6314 6314 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6315 6315 if (cfg & F_TRCMULTIFILTER) {
6316 6316 /*
6317 6317 * If multiple tracers are enabled, then maximum
6318 6318 * capture size is 2.5KB (FIFO size of a single channel)
6319 6319 * minus 2 flits for CPL_TRACE_PKT header.
6320 6320 */
6321 6321 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6322 6322 return -EINVAL;
6323 6323 }
6324 6324 else {
6325 6325 /*
6326 6326 * If multiple tracers are disabled, to avoid deadlocks
6327 6327 * maximum packet capture size of 9600 bytes is recommended.
6328 6328 * Also in this mode, only trace0 can be enabled and running.
6329 6329 */
6330 6330 if (tp->snap_len > 9600 || idx)
6331 6331 return -EINVAL;
6332 6332 }
6333 6333
6334 6334 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6335 6335 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6336 6336 tp->min_len > M_TFMINPKTSIZE)
6337 6337 return -EINVAL;
6338 6338
6339 6339 /* stop the tracer we'll be changing */
6340 6340 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6341 6341
6342 6342 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6343 6343 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6344 6344 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6345 6345
6346 6346 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6347 6347 t4_write_reg(adap, data_reg, tp->data[i]);
6348 6348 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6349 6349 }
6350 6350 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6351 6351 V_TFCAPTUREMAX(tp->snap_len) |
6352 6352 V_TFMINPKTSIZE(tp->min_len));
6353 6353 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6354 6354 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6355 6355 (is_t4(adap->params.chip) ?
6356 6356 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6357 6357 V_T5_TFPORT(tp->port) | F_T5_TFEN |
6358 6358 V_T5_TFINVERTMATCH(tp->invert)));
6359 6359
6360 6360 return 0;
6361 6361 }
6362 6362
6363 6363 /**
6364 6364 * t4_get_trace_filter - query one of the tracing filters
6365 6365 * @adap: the adapter
6366 6366 * @tp: the current trace filter parameters
6367 6367 * @idx: which trace filter to query
6368 6368 * @enabled: non-zero if the filter is enabled
6369 6369 *
6370 6370 * Returns the current settings of one of the HW tracing filters.
6371 6371 */
6372 6372 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6373 6373 int *enabled)
6374 6374 {
6375 6375 u32 ctla, ctlb;
6376 6376 int i, ofst = idx * 4;
6377 6377 u32 data_reg, mask_reg;
6378 6378
6379 6379 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6380 6380 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6381 6381
6382 6382 if (is_t4(adap->params.chip)) {
6383 6383 *enabled = !!(ctla & F_TFEN);
6384 6384 tp->port = G_TFPORT(ctla);
6385 6385 tp->invert = !!(ctla & F_TFINVERTMATCH);
6386 6386 } else {
6387 6387 *enabled = !!(ctla & F_T5_TFEN);
6388 6388 tp->port = G_T5_TFPORT(ctla);
6389 6389 tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6390 6390 }
6391 6391 tp->snap_len = G_TFCAPTUREMAX(ctlb);
6392 6392 tp->min_len = G_TFMINPKTSIZE(ctlb);
6393 6393 tp->skip_ofst = G_TFOFFSET(ctla);
6394 6394 tp->skip_len = G_TFLENGTH(ctla);
6395 6395
6396 6396 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6397 6397 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6398 6398 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6399 6399
6400 6400 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6401 6401 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6402 6402 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6403 6403 }
6404 6404 }
6405 6405
6406 6406 /**
6407 6407 * t4_read_tcb - read a hardware TCP Control Block structure
6408 6408 * @adap: the adapter
6409 6409 * @win: PCI-E Memory Window to use
6410 6410 * @tid: the TCB ID
6411 6411 * @tcb: the buffer to return the TCB in
6412 6412 *
6413 6413 * Reads the indicated hardware TCP Control Block and returns it in
6414 6414 * the supplied buffer. Returns 0 on success.
6415 6415 */
6416 6416 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6417 6417 {
6418 6418 u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6419 6419 u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6420 6420 __be32 raw_tcb[TCB_SIZE/4];
6421 6421 int ret, word;
6422 6422
6423 6423 ret = t4_memory_rw_addr(adap, win,
6424 6424 tcb_addr, sizeof raw_tcb, raw_tcb,
6425 6425 T4_MEMORY_READ);
6426 6426 if (ret)
6427 6427 return ret;
6428 6428
6429 6429 for (word = 0; word < 32; word++)
6430 6430 tcb[word] = be32_to_cpu(raw_tcb[word]);
6431 6431 return 0;
6432 6432 }
6433 6433
6434 6434 /**
6435 6435 * t4_pmtx_get_stats - returns the HW stats from PMTX
6436 6436 * @adap: the adapter
6437 6437 * @cnt: where to store the count statistics
6438 6438 * @cycles: where to store the cycle statistics
6439 6439 *
6440 6440 * Returns performance statistics from PMTX.
6441 6441 */
6442 6442 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6443 6443 {
6444 6444 int i;
6445 6445 u32 data[2];
6446 6446
6447 6447 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6448 6448 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6449 6449 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6450 6450 if (is_t4(adap->params.chip)) {
6451 6451 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6452 6452 } else {
6453 6453 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6454 6454 A_PM_TX_DBG_DATA, data, 2,
6455 6455 A_PM_TX_DBG_STAT_MSB);
6456 6456 cycles[i] = (((u64)data[0] << 32) | data[1]);
6457 6457 }
6458 6458 }
6459 6459 }
6460 6460
6461 6461 /**
6462 6462 * t4_pmrx_get_stats - returns the HW stats from PMRX
6463 6463 * @adap: the adapter
6464 6464 * @cnt: where to store the count statistics
6465 6465 * @cycles: where to store the cycle statistics
6466 6466 *
6467 6467 * Returns performance statistics from PMRX.
6468 6468 */
6469 6469 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6470 6470 {
6471 6471 int i;
6472 6472 u32 data[2];
6473 6473
6474 6474 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6475 6475 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6476 6476 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6477 6477 if (is_t4(adap->params.chip)) {
6478 6478 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6479 6479 } else {
6480 6480 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6481 6481 A_PM_RX_DBG_DATA, data, 2,
6482 6482 A_PM_RX_DBG_STAT_MSB);
6483 6483 cycles[i] = (((u64)data[0] << 32) | data[1]);
6484 6484 }
6485 6485 }
6486 6486 }
6487 6487
6488 6488 /**
6489 6489 * t4_get_mps_bg_map - return the buffer groups associated with a port
6490 6490 * @adapter: the adapter
6491 6491 * @pidx: the port index
6492 6492 *
6493 6493 * Returns a bitmap indicating which MPS buffer groups are associated
6494 6494 * with the given Port. Bit i is set if buffer group i is used by the
6495 6495 * Port.
6496 6496 */
6497 6497 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6498 6498 {
6499 6499 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6500 6500 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6501 6501 u32 param, val;
6502 6502 int ret;
6503 6503
6504 6504 if (pidx >= nports) {
6505 6505 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6506 6506 return 0;
6507 6507 }
6508 6508
6509 6509 /* FW version >= 1.16.34.0 can determine buffergroup map using
6510 6510 * FW_PARAMS_PARAM_DEV_MPSBGMAP API. We will initially try to
6511 6511 * use this API. If it fails, revert back to old hardcoded way.
6512 6512 * The value obtained from FW is encoded in below format
6513 6513 * val = (( MPSBGMAP[Port 3] << 24 ) |
6514 6514 * ( MPSBGMAP[Port 2] << 16 ) |
6515 6515 * ( MPSBGMAP[Port 1] << 8 ) |
6516 6516 * ( MPSBGMAP[Port 0] << 0 ))
6517 6517 */
6518 6518 if (adapter->flags & FW_OK) {
6519 6519 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6520 6520 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6521 6521 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6522 6522 0, 1, ¶m, &val);
6523 6523 if (!ret)
6524 6524 return (val >> (8 * pidx)) & 0xff;
6525 6525 }
6526 6526
6527 6527 /* FW_PARAMS_PARAM_DEV_MPSBGMAP API has failed. Falling back to driver
6528 6528 * to determine bgmap.
6529 6529 */
6530 6530 switch (chip_version) {
6531 6531 case CHELSIO_T4:
6532 6532 case CHELSIO_T5:
6533 6533 switch (nports) {
6534 6534 case 1: return 0xf;
6535 6535 case 2: return 3 << (2 * pidx);
6536 6536 case 4: return 1 << pidx;
6537 6537 }
6538 6538 break;
6539 6539
6540 6540 case CHELSIO_T6:
6541 6541 switch (nports) {
6542 6542 case 2: return 1 << (2 * pidx);
6543 6543 }
6544 6544 break;
6545 6545 }
6546 6546
6547 6547 CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6548 6548 chip_version, nports);
6549 6549 return 0;
6550 6550 }
6551 6551
6552 6552 /**
6553 6553 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6554 6554 * @adapter: the adapter
6555 6555 * @pidx: the port index
6556 6556 */
6557 6557 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6558 6558 {
6559 6559 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6560 6560 u32 param, val = 0;
6561 6561 int ret;
6562 6562
6563 6563 if (pidx >= nports) {
6564 6564 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6565 6565 return 0;
6566 6566 }
6567 6567
6568 6568 /* FW version >= 1.16.44.0 can determine E2C channel map using
6569 6569 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6570 6570 */
6571 6571 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6572 6572 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6573 6573 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6574 6574 0, 1, ¶m, &val);
6575 6575 if (!ret)
6576 6576 return (val >> (8*pidx)) & 0xff;
6577 6577
6578 6578 return 0;
6579 6579 }
6580 6580
6581 6581 /**
6582 6582 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6583 6583 * @adapter: the adapter
6584 6584 * @pidx: the port index
6585 6585 *
6586 6586 * Returns a bitmap indicating which TP Ingress Channels are associated with
6587 6587 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
6588 6588 */
6589 6589 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6590 6590 {
6591 6591 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6592 6592 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6593 6593
6594 6594 if (pidx >= nports) {
6595 6595 CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6596 6596 return 0;
6597 6597 }
6598 6598
6599 6599 switch (chip_version) {
6600 6600 case CHELSIO_T4:
6601 6601 case CHELSIO_T5:
6602 6602 /*
6603 6603 * Note that this happens to be the same values as the MPS
6604 6604 * Buffer Group Map for these Chips. But we replicate the code
6605 6605 * here because they're really separate concepts.
6606 6606 */
6607 6607 switch (nports) {
6608 6608 case 1: return 0xf;
6609 6609 case 2: return 3 << (2 * pidx);
6610 6610 case 4: return 1 << pidx;
6611 6611 }
6612 6612 break;
6613 6613
6614 6614 case CHELSIO_T6:
6615 6615 switch (nports) {
6616 6616 case 2: return 1 << pidx;
6617 6617 }
6618 6618 break;
6619 6619 }
6620 6620
6621 6621 CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6622 6622 chip_version, nports);
6623 6623 return 0;
6624 6624 }
6625 6625
6626 6626 /**
6627 6627 * t4_get_port_type_description - return Port Type string description
6628 6628 * @port_type: firmware Port Type enumeration
6629 6629 */
6630 6630 const char *t4_get_port_type_description(enum fw_port_type port_type)
6631 6631 {
6632 6632 static const char *const port_type_description[] = {
6633 6633 "Fiber_XFI",
6634 6634 "Fiber_XAUI",
6635 6635 "BT_SGMII",
6636 6636 "BT_XFI",
6637 6637 "BT_XAUI",
6638 6638 "KX4",
6639 6639 "CX4",
6640 6640 "KX",
6641 6641 "KR",
6642 6642 "SFP",
6643 6643 "BP_AP",
6644 6644 "BP4_AP",
6645 6645 "QSFP_10G",
6646 6646 "QSA",
6647 6647 "QSFP",
6648 6648 "BP40_BA",
6649 6649 "KR4_100G",
6650 6650 "CR4_QSFP",
6651 6651 "CR_QSFP",
6652 6652 "CR2_QSFP",
6653 6653 "SFP28",
6654 6654 "KR_SFP28",
6655 6655 };
6656 6656
6657 6657 if (port_type < ARRAY_SIZE(port_type_description))
6658 6658 return port_type_description[port_type];
6659 6659 return "UNKNOWN";
6660 6660 }
6661 6661
6662 6662 /**
6663 6663 * t4_get_port_stats_offset - collect port stats relative to a previous
6664 6664 * snapshot
6665 6665 * @adap: The adapter
6666 6666 * @idx: The port
6667 6667 * @stats: Current stats to fill
6668 6668 * @offset: Previous stats snapshot
6669 6669 */
6670 6670 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6671 6671 struct port_stats *stats,
6672 6672 struct port_stats *offset)
6673 6673 {
6674 6674 u64 *s, *o;
6675 6675 int i;
6676 6676
6677 6677 t4_get_port_stats(adap, idx, stats);
6678 6678 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6679 6679 i < (sizeof(struct port_stats)/sizeof(u64)) ;
6680 6680 i++, s++, o++)
6681 6681 *s -= *o;
6682 6682 }
6683 6683
6684 6684 /**
6685 6685 * t4_get_port_stats - collect port statistics
6686 6686 * @adap: the adapter
6687 6687 * @idx: the port index
6688 6688 * @p: the stats structure to fill
6689 6689 *
6690 6690 * Collect statistics related to the given port from HW.
6691 6691 */
6692 6692 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6693 6693 {
6694 6694 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6695 6695 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6696 6696
6697 6697 #define GET_STAT(name) \
6698 6698 t4_read_reg64(adap, \
6699 6699 (is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6700 6700 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6701 6701 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6702 6702
6703 6703 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6704 6704 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6705 6705 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6706 6706 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6707 6707 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6708 6708 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6709 6709 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6710 6710 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6711 6711 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6712 6712 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6713 6713 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6714 6714 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6715 6715 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6716 6716 p->tx_drop = GET_STAT(TX_PORT_DROP);
6717 6717 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6718 6718 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6719 6719 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6720 6720 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6721 6721 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6722 6722 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6723 6723 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6724 6724 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6725 6725 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6726 6726
6727 6727 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6728 6728 if (stat_ctl & F_COUNTPAUSESTATTX) {
6729 6729 p->tx_frames -= p->tx_pause;
6730 6730 p->tx_octets -= p->tx_pause * 64;
6731 6731 }
6732 6732 if (stat_ctl & F_COUNTPAUSEMCTX)
6733 6733 p->tx_mcast_frames -= p->tx_pause;
6734 6734 }
6735 6735
6736 6736 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6737 6737 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6738 6738 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6739 6739 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6740 6740 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6741 6741 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6742 6742 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6743 6743 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6744 6744 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6745 6745 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6746 6746 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6747 6747 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6748 6748 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6749 6749 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6750 6750 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6751 6751 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6752 6752 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6753 6753 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6754 6754 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6755 6755 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6756 6756 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6757 6757 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6758 6758 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6759 6759 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6760 6760 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6761 6761 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6762 6762 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6763 6763
6764 6764 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6765 6765 if (stat_ctl & F_COUNTPAUSESTATRX) {
6766 6766 p->rx_frames -= p->rx_pause;
6767 6767 p->rx_octets -= p->rx_pause * 64;
6768 6768 }
6769 6769 if (stat_ctl & F_COUNTPAUSEMCRX)
6770 6770 p->rx_mcast_frames -= p->rx_pause;
6771 6771 }
6772 6772
6773 6773 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6774 6774 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6775 6775 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6776 6776 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6777 6777 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6778 6778 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6779 6779 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6780 6780 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6781 6781
6782 6782 #undef GET_STAT
6783 6783 #undef GET_STAT_COM
6784 6784 }
6785 6785
6786 6786 /**
6787 6787 * t4_get_lb_stats - collect loopback port statistics
6788 6788 * @adap: the adapter
6789 6789 * @idx: the loopback port index
6790 6790 * @p: the stats structure to fill
6791 6791 *
6792 6792 * Return HW statistics for the given loopback port.
6793 6793 */
6794 6794 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6795 6795 {
6796 6796 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6797 6797
6798 6798 #define GET_STAT(name) \
6799 6799 t4_read_reg64(adap, \
6800 6800 (is_t4(adap->params.chip) ? \
6801 6801 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6802 6802 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6803 6803 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6804 6804
6805 6805 p->octets = GET_STAT(BYTES);
6806 6806 p->frames = GET_STAT(FRAMES);
6807 6807 p->bcast_frames = GET_STAT(BCAST);
6808 6808 p->mcast_frames = GET_STAT(MCAST);
6809 6809 p->ucast_frames = GET_STAT(UCAST);
6810 6810 p->error_frames = GET_STAT(ERROR);
6811 6811
6812 6812 p->frames_64 = GET_STAT(64B);
6813 6813 p->frames_65_127 = GET_STAT(65B_127B);
6814 6814 p->frames_128_255 = GET_STAT(128B_255B);
6815 6815 p->frames_256_511 = GET_STAT(256B_511B);
6816 6816 p->frames_512_1023 = GET_STAT(512B_1023B);
6817 6817 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6818 6818 p->frames_1519_max = GET_STAT(1519B_MAX);
6819 6819 p->drop = GET_STAT(DROP_FRAMES);
6820 6820
6821 6821 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6822 6822 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6823 6823 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6824 6824 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6825 6825 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6826 6826 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6827 6827 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6828 6828 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6829 6829
6830 6830 #undef GET_STAT
6831 6831 #undef GET_STAT_COM
6832 6832 }
6833 6833
6834 6834 /* t4_mk_filtdelwr - create a delete filter WR
6835 6835 * @ftid: the filter ID
6836 6836 * @wr: the filter work request to populate
6837 6837 * @rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6838 6838 * @qid: ingress queue to receive the delete notification
6839 6839 *
6840 6840 * Creates a filter work request to delete the supplied filter. If @qid
6841 6841 * is negative the delete notification is suppressed.
6842 6842 */
6843 6843 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6844 6844 int rqtype, int qid)
6845 6845 {
6846 6846 memset(wr, 0, sizeof(*wr));
6847 6847 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6848 6848 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6849 6849 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6850 6850 V_FW_FILTER_WR_RQTYPE(rqtype) |
6851 6851 V_FW_FILTER_WR_NOREPLY(qid < 0));
6852 6852 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6853 6853 if (qid >= 0)
6854 6854 wr->rx_chan_rx_rpl_iq =
6855 6855 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6856 6856 }
6857 6857
6858 6858 #define INIT_CMD(var, cmd, rd_wr) do { \
6859 6859 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6860 6860 F_FW_CMD_REQUEST | \
6861 6861 F_FW_CMD_##rd_wr); \
6862 6862 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6863 6863 } while (0)
6864 6864
6865 6865 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6866 6866 u32 addr, u32 val)
6867 6867 {
6868 6868 u32 ldst_addrspace;
6869 6869 struct fw_ldst_cmd c;
6870 6870
6871 6871 memset(&c, 0, sizeof(c));
6872 6872 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6873 6873 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6874 6874 F_FW_CMD_REQUEST |
6875 6875 F_FW_CMD_WRITE |
6876 6876 ldst_addrspace);
6877 6877 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6878 6878 c.u.addrval.addr = cpu_to_be32(addr);
6879 6879 c.u.addrval.val = cpu_to_be32(val);
6880 6880
6881 6881 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6882 6882 }
6883 6883
6884 6884 /**
6885 6885 * t4_mdio_rd - read a PHY register through MDIO
6886 6886 * @adap: the adapter
6887 6887 * @mbox: mailbox to use for the FW command
6888 6888 * @phy_addr: the PHY address
6889 6889 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6890 6890 * @reg: the register to read
6891 6891 * @valp: where to store the value
6892 6892 *
6893 6893 * Issues a FW command through the given mailbox to read a PHY register.
6894 6894 */
6895 6895 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6896 6896 unsigned int mmd, unsigned int reg, unsigned int *valp)
6897 6897 {
6898 6898 int ret;
6899 6899 u32 ldst_addrspace;
6900 6900 struct fw_ldst_cmd c;
6901 6901
6902 6902 memset(&c, 0, sizeof(c));
6903 6903 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6904 6904 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6905 6905 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6906 6906 ldst_addrspace);
6907 6907 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6908 6908 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6909 6909 V_FW_LDST_CMD_MMD(mmd));
6910 6910 c.u.mdio.raddr = cpu_to_be16(reg);
6911 6911
6912 6912 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6913 6913 if (ret == 0)
6914 6914 *valp = be16_to_cpu(c.u.mdio.rval);
6915 6915 return ret;
6916 6916 }
6917 6917
6918 6918 /**
6919 6919 * t4_mdio_wr - write a PHY register through MDIO
6920 6920 * @adap: the adapter
6921 6921 * @mbox: mailbox to use for the FW command
6922 6922 * @phy_addr: the PHY address
6923 6923 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6924 6924 * @reg: the register to write
6925 6925 * @valp: value to write
6926 6926 *
6927 6927 * Issues a FW command through the given mailbox to write a PHY register.
6928 6928 */
6929 6929 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6930 6930 unsigned int mmd, unsigned int reg, unsigned int val)
6931 6931 {
6932 6932 u32 ldst_addrspace;
6933 6933 struct fw_ldst_cmd c;
6934 6934
6935 6935 memset(&c, 0, sizeof(c));
6936 6936 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6937 6937 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6938 6938 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6939 6939 ldst_addrspace);
6940 6940 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6941 6941 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6942 6942 V_FW_LDST_CMD_MMD(mmd));
6943 6943 c.u.mdio.raddr = cpu_to_be16(reg);
6944 6944 c.u.mdio.rval = cpu_to_be16(val);
6945 6945
6946 6946 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6947 6947 }
6948 6948
6949 6949 /**
6950 6950 *
6951 6951 * t4_sge_decode_idma_state - decode the idma state
6952 6952 * @adap: the adapter
6953 6953 * @state: the state idma is stuck in
6954 6954 */
6955 6955 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6956 6956 {
6957 6957 static const char * const t4_decode[] = {
6958 6958 "IDMA_IDLE",
6959 6959 "IDMA_PUSH_MORE_CPL_FIFO",
6960 6960 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6961 6961 "Not used",
6962 6962 "IDMA_PHYSADDR_SEND_PCIEHDR",
6963 6963 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6964 6964 "IDMA_PHYSADDR_SEND_PAYLOAD",
6965 6965 "IDMA_SEND_FIFO_TO_IMSG",
6966 6966 "IDMA_FL_REQ_DATA_FL_PREP",
6967 6967 "IDMA_FL_REQ_DATA_FL",
6968 6968 "IDMA_FL_DROP",
6969 6969 "IDMA_FL_H_REQ_HEADER_FL",
6970 6970 "IDMA_FL_H_SEND_PCIEHDR",
6971 6971 "IDMA_FL_H_PUSH_CPL_FIFO",
6972 6972 "IDMA_FL_H_SEND_CPL",
6973 6973 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6974 6974 "IDMA_FL_H_SEND_IP_HDR",
6975 6975 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6976 6976 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6977 6977 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6978 6978 "IDMA_FL_D_SEND_PCIEHDR",
6979 6979 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6980 6980 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6981 6981 "IDMA_FL_SEND_PCIEHDR",
6982 6982 "IDMA_FL_PUSH_CPL_FIFO",
6983 6983 "IDMA_FL_SEND_CPL",
6984 6984 "IDMA_FL_SEND_PAYLOAD_FIRST",
6985 6985 "IDMA_FL_SEND_PAYLOAD",
6986 6986 "IDMA_FL_REQ_NEXT_DATA_FL",
6987 6987 "IDMA_FL_SEND_NEXT_PCIEHDR",
6988 6988 "IDMA_FL_SEND_PADDING",
6989 6989 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6990 6990 "IDMA_FL_SEND_FIFO_TO_IMSG",
6991 6991 "IDMA_FL_REQ_DATAFL_DONE",
6992 6992 "IDMA_FL_REQ_HEADERFL_DONE",
6993 6993 };
6994 6994 static const char * const t5_decode[] = {
6995 6995 "IDMA_IDLE",
6996 6996 "IDMA_ALMOST_IDLE",
6997 6997 "IDMA_PUSH_MORE_CPL_FIFO",
6998 6998 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6999 6999 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7000 7000 "IDMA_PHYSADDR_SEND_PCIEHDR",
7001 7001 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7002 7002 "IDMA_PHYSADDR_SEND_PAYLOAD",
7003 7003 "IDMA_SEND_FIFO_TO_IMSG",
7004 7004 "IDMA_FL_REQ_DATA_FL",
7005 7005 "IDMA_FL_DROP",
7006 7006 "IDMA_FL_DROP_SEND_INC",
7007 7007 "IDMA_FL_H_REQ_HEADER_FL",
7008 7008 "IDMA_FL_H_SEND_PCIEHDR",
7009 7009 "IDMA_FL_H_PUSH_CPL_FIFO",
7010 7010 "IDMA_FL_H_SEND_CPL",
7011 7011 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7012 7012 "IDMA_FL_H_SEND_IP_HDR",
7013 7013 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7014 7014 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7015 7015 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7016 7016 "IDMA_FL_D_SEND_PCIEHDR",
7017 7017 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7018 7018 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7019 7019 "IDMA_FL_SEND_PCIEHDR",
7020 7020 "IDMA_FL_PUSH_CPL_FIFO",
7021 7021 "IDMA_FL_SEND_CPL",
7022 7022 "IDMA_FL_SEND_PAYLOAD_FIRST",
7023 7023 "IDMA_FL_SEND_PAYLOAD",
7024 7024 "IDMA_FL_REQ_NEXT_DATA_FL",
7025 7025 "IDMA_FL_SEND_NEXT_PCIEHDR",
7026 7026 "IDMA_FL_SEND_PADDING",
7027 7027 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7028 7028 };
7029 7029 static const char * const t6_decode[] = {
7030 7030 "IDMA_IDLE",
7031 7031 "IDMA_PUSH_MORE_CPL_FIFO",
7032 7032 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7033 7033 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7034 7034 "IDMA_PHYSADDR_SEND_PCIEHDR",
7035 7035 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7036 7036 "IDMA_PHYSADDR_SEND_PAYLOAD",
7037 7037 "IDMA_FL_REQ_DATA_FL",
7038 7038 "IDMA_FL_DROP",
7039 7039 "IDMA_FL_DROP_SEND_INC",
7040 7040 "IDMA_FL_H_REQ_HEADER_FL",
7041 7041 "IDMA_FL_H_SEND_PCIEHDR",
7042 7042 "IDMA_FL_H_PUSH_CPL_FIFO",
7043 7043 "IDMA_FL_H_SEND_CPL",
7044 7044 "IDMA_FL_H_SEND_IP_HDR_FIRST",
7045 7045 "IDMA_FL_H_SEND_IP_HDR",
7046 7046 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
7047 7047 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
7048 7048 "IDMA_FL_H_SEND_IP_HDR_PADDING",
7049 7049 "IDMA_FL_D_SEND_PCIEHDR",
7050 7050 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7051 7051 "IDMA_FL_D_REQ_NEXT_DATA_FL",
7052 7052 "IDMA_FL_SEND_PCIEHDR",
7053 7053 "IDMA_FL_PUSH_CPL_FIFO",
7054 7054 "IDMA_FL_SEND_CPL",
7055 7055 "IDMA_FL_SEND_PAYLOAD_FIRST",
7056 7056 "IDMA_FL_SEND_PAYLOAD",
7057 7057 "IDMA_FL_REQ_NEXT_DATA_FL",
7058 7058 "IDMA_FL_SEND_NEXT_PCIEHDR",
7059 7059 "IDMA_FL_SEND_PADDING",
7060 7060 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
7061 7061 };
7062 7062 static const u32 sge_regs[] = {
7063 7063 A_SGE_DEBUG_DATA_LOW_INDEX_2,
7064 7064 A_SGE_DEBUG_DATA_LOW_INDEX_3,
7065 7065 A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7066 7066 };
7067 7067 const char **sge_idma_decode;
7068 7068 int sge_idma_decode_nstates;
7069 7069 int i;
7070 7070 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7071 7071
7072 7072 /* Select the right set of decode strings to dump depending on the
7073 7073 * adapter chip type.
7074 7074 */
7075 7075 switch (chip_version) {
7076 7076 case CHELSIO_T4:
7077 7077 sge_idma_decode = (const char **)t4_decode;
7078 7078 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7079 7079 break;
7080 7080
7081 7081 case CHELSIO_T5:
7082 7082 sge_idma_decode = (const char **)t5_decode;
7083 7083 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7084 7084 break;
7085 7085
7086 7086 case CHELSIO_T6:
7087 7087 sge_idma_decode = (const char **)t6_decode;
7088 7088 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7089 7089 break;
7090 7090
7091 7091 default:
7092 7092 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version);
7093 7093 return;
7094 7094 }
7095 7095
7096 7096 if (state < sge_idma_decode_nstates)
7097 7097 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7098 7098 else
7099 7099 CH_WARN(adapter, "idma state %d unknown\n", state);
7100 7100
7101 7101 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7102 7102 CH_WARN(adapter, "SGE register %#x value %#x\n",
7103 7103 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7104 7104 }
7105 7105
7106 7106 /**
7107 7107 * t4_sge_ctxt_flush - flush the SGE context cache
7108 7108 * @adap: the adapter
7109 7109 * @mbox: mailbox to use for the FW command
7110 7110 *
7111 7111 * Issues a FW command through the given mailbox to flush the
7112 7112 * SGE context cache.
7113 7113 */
7114 7114 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7115 7115 {
7116 7116 int ret;
7117 7117 u32 ldst_addrspace;
7118 7118 struct fw_ldst_cmd c;
7119 7119
7120 7120 memset(&c, 0, sizeof(c));
7121 7121 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
7122 7122 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7123 7123 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7124 7124 ldst_addrspace);
7125 7125 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7126 7126 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7127 7127
7128 7128 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7129 7129 return ret;
7130 7130 }
7131 7131
7132 7132 /**
7133 7133 * t4_fw_hello - establish communication with FW
7134 7134 * @adap: the adapter
7135 7135 * @mbox: mailbox to use for the FW command
7136 7136 * @evt_mbox: mailbox to receive async FW events
7137 7137 * @master: specifies the caller's willingness to be the device master
7138 7138 * @state: returns the current device state (if non-NULL)
7139 7139 *
7140 7140 * Issues a command to establish communication with FW. Returns either
7141 7141 * an error (negative integer) or the mailbox of the Master PF.
7142 7142 */
7143 7143 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7144 7144 enum dev_master master, enum dev_state *state)
7145 7145 {
7146 7146 int ret;
7147 7147 struct fw_hello_cmd c;
7148 7148 u32 v;
7149 7149 unsigned int master_mbox;
7150 7150 int retries = FW_CMD_HELLO_RETRIES;
7151 7151
7152 7152 retry:
7153 7153 memset(&c, 0, sizeof(c));
7154 7154 INIT_CMD(c, HELLO, WRITE);
7155 7155 c.err_to_clearinit = cpu_to_be32(
7156 7156 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7157 7157 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7158 7158 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7159 7159 mbox : M_FW_HELLO_CMD_MBMASTER) |
7160 7160 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7161 7161 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7162 7162 F_FW_HELLO_CMD_CLEARINIT);
7163 7163
7164 7164 /*
7165 7165 * Issue the HELLO command to the firmware. If it's not successful
7166 7166 * but indicates that we got a "busy" or "timeout" condition, retry
7167 7167 * the HELLO until we exhaust our retry limit. If we do exceed our
7168 7168 * retry limit, check to see if the firmware left us any error
7169 7169 * information and report that if so ...
7170 7170 */
7171 7171 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7172 7172 if (ret != FW_SUCCESS) {
7173 7173 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7174 7174 goto retry;
7175 7175 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7176 7176 t4_report_fw_error(adap);
7177 7177 return ret;
7178 7178 }
7179 7179
7180 7180 v = be32_to_cpu(c.err_to_clearinit);
7181 7181 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7182 7182 if (state) {
7183 7183 if (v & F_FW_HELLO_CMD_ERR)
7184 7184 *state = DEV_STATE_ERR;
7185 7185 else if (v & F_FW_HELLO_CMD_INIT)
7186 7186 *state = DEV_STATE_INIT;
7187 7187 else
7188 7188 *state = DEV_STATE_UNINIT;
7189 7189 }
7190 7190
7191 7191 /*
7192 7192 * If we're not the Master PF then we need to wait around for the
7193 7193 * Master PF Driver to finish setting up the adapter.
7194 7194 *
7195 7195 * Note that we also do this wait if we're a non-Master-capable PF and
7196 7196 * there is no current Master PF; a Master PF may show up momentarily
7197 7197 * and we wouldn't want to fail pointlessly. (This can happen when an
7198 7198 * OS loads lots of different drivers rapidly at the same time). In
7199 7199 * this case, the Master PF returned by the firmware will be
7200 7200 * M_PCIE_FW_MASTER so the test below will work ...
7201 7201 */
7202 7202 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7203 7203 master_mbox != mbox) {
7204 7204 int waiting = FW_CMD_HELLO_TIMEOUT;
7205 7205
7206 7206 /*
7207 7207 * Wait for the firmware to either indicate an error or
7208 7208 * initialized state. If we see either of these we bail out
7209 7209 * and report the issue to the caller. If we exhaust the
7210 7210 * "hello timeout" and we haven't exhausted our retries, try
7211 7211 * again. Otherwise bail with a timeout error.
7212 7212 */
7213 7213 for (;;) {
7214 7214 u32 pcie_fw;
7215 7215
7216 7216 msleep(50);
7217 7217 waiting -= 50;
7218 7218
7219 7219 /*
7220 7220 * If neither Error nor Initialialized are indicated
7221 7221 * by the firmware keep waiting till we exaust our
7222 7222 * timeout ... and then retry if we haven't exhausted
7223 7223 * our retries ...
7224 7224 */
7225 7225 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7226 7226 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7227 7227 if (waiting <= 0) {
7228 7228 if (retries-- > 0)
7229 7229 goto retry;
7230 7230
7231 7231 return -ETIMEDOUT;
7232 7232 }
7233 7233 continue;
7234 7234 }
7235 7235
7236 7236 /*
7237 7237 * We either have an Error or Initialized condition
7238 7238 * report errors preferentially.
7239 7239 */
7240 7240 if (state) {
7241 7241 if (pcie_fw & F_PCIE_FW_ERR)
7242 7242 *state = DEV_STATE_ERR;
7243 7243 else if (pcie_fw & F_PCIE_FW_INIT)
7244 7244 *state = DEV_STATE_INIT;
7245 7245 }
7246 7246
7247 7247 /*
7248 7248 * If we arrived before a Master PF was selected and
7249 7249 * there's not a valid Master PF, grab its identity
7250 7250 * for our caller.
7251 7251 */
7252 7252 if (master_mbox == M_PCIE_FW_MASTER &&
7253 7253 (pcie_fw & F_PCIE_FW_MASTER_VLD))
7254 7254 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7255 7255 break;
7256 7256 }
7257 7257 }
7258 7258
7259 7259 return master_mbox;
7260 7260 }
7261 7261
7262 7262 /**
7263 7263 * t4_fw_bye - end communication with FW
7264 7264 * @adap: the adapter
7265 7265 * @mbox: mailbox to use for the FW command
7266 7266 *
7267 7267 * Issues a command to terminate communication with FW.
7268 7268 */
7269 7269 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7270 7270 {
7271 7271 struct fw_bye_cmd c;
7272 7272
7273 7273 memset(&c, 0, sizeof(c));
7274 7274 INIT_CMD(c, BYE, WRITE);
7275 7275 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7276 7276 }
7277 7277
7278 7278 /**
7279 7279 * t4_fw_reset - issue a reset to FW
7280 7280 * @adap: the adapter
7281 7281 * @mbox: mailbox to use for the FW command
7282 7282 * @reset: specifies the type of reset to perform
7283 7283 *
7284 7284 * Issues a reset command of the specified type to FW.
7285 7285 */
7286 7286 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7287 7287 {
7288 7288 struct fw_reset_cmd c;
7289 7289
7290 7290 memset(&c, 0, sizeof(c));
7291 7291 INIT_CMD(c, RESET, WRITE);
7292 7292 c.val = cpu_to_be32(reset);
7293 7293 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7294 7294 }
7295 7295
7296 7296 /**
7297 7297 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7298 7298 * @adap: the adapter
7299 7299 * @mbox: mailbox to use for the FW RESET command (if desired)
7300 7300 * @force: force uP into RESET even if FW RESET command fails
7301 7301 *
7302 7302 * Issues a RESET command to firmware (if desired) with a HALT indication
7303 7303 * and then puts the microprocessor into RESET state. The RESET command
7304 7304 * will only be issued if a legitimate mailbox is provided (mbox <=
7305 7305 * M_PCIE_FW_MASTER).
7306 7306 *
7307 7307 * This is generally used in order for the host to safely manipulate the
7308 7308 * adapter without fear of conflicting with whatever the firmware might
7309 7309 * be doing. The only way out of this state is to RESTART the firmware
7310 7310 * ...
7311 7311 */
7312 7312 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7313 7313 {
7314 7314 int ret = 0;
7315 7315
7316 7316 /*
7317 7317 * If a legitimate mailbox is provided, issue a RESET command
7318 7318 * with a HALT indication.
7319 7319 */
7320 7320 if (mbox <= M_PCIE_FW_MASTER) {
7321 7321 struct fw_reset_cmd c;
7322 7322
7323 7323 memset(&c, 0, sizeof(c));
7324 7324 INIT_CMD(c, RESET, WRITE);
7325 7325 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7326 7326 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7327 7327 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7328 7328 }
7329 7329
7330 7330 /*
7331 7331 * Normally we won't complete the operation if the firmware RESET
7332 7332 * command fails but if our caller insists we'll go ahead and put the
7333 7333 * uP into RESET. This can be useful if the firmware is hung or even
7334 7334 * missing ... We'll have to take the risk of putting the uP into
7335 7335 * RESET without the cooperation of firmware in that case.
7336 7336 *
7337 7337 * We also force the firmware's HALT flag to be on in case we bypassed
7338 7338 * the firmware RESET command above or we're dealing with old firmware
7339 7339 * which doesn't have the HALT capability. This will serve as a flag
7340 7340 * for the incoming firmware to know that it's coming out of a HALT
7341 7341 * rather than a RESET ... if it's new enough to understand that ...
7342 7342 */
7343 7343 if (ret == 0 || force) {
7344 7344 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7345 7345 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7346 7346 F_PCIE_FW_HALT);
7347 7347 }
7348 7348
7349 7349 /*
7350 7350 * And we always return the result of the firmware RESET command
7351 7351 * even when we force the uP into RESET ...
7352 7352 */
7353 7353 return ret;
7354 7354 }
7355 7355
7356 7356 /**
7357 7357 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7358 7358 * @adap: the adapter
7359 7359 * @reset: if we want to do a RESET to restart things
7360 7360 *
7361 7361 * Restart firmware previously halted by t4_fw_halt(). On successful
7362 7362 * return the previous PF Master remains as the new PF Master and there
7363 7363 * is no need to issue a new HELLO command, etc.
7364 7364 *
7365 7365 * We do this in two ways:
7366 7366 *
7367 7367 * 1. If we're dealing with newer firmware we'll simply want to take
7368 7368 * the chip's microprocessor out of RESET. This will cause the
7369 7369 * firmware to start up from its start vector. And then we'll loop
7370 7370 * until the firmware indicates it's started again (PCIE_FW.HALT
7371 7371 * reset to 0) or we timeout.
7372 7372 *
7373 7373 * 2. If we're dealing with older firmware then we'll need to RESET
7374 7374 * the chip since older firmware won't recognize the PCIE_FW.HALT
7375 7375 * flag and automatically RESET itself on startup.
7376 7376 */
7377 7377 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7378 7378 {
7379 7379 if (reset) {
7380 7380 /*
7381 7381 * Since we're directing the RESET instead of the firmware
7382 7382 * doing it automatically, we need to clear the PCIE_FW.HALT
7383 7383 * bit.
7384 7384 */
7385 7385 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7386 7386
7387 7387 /*
7388 7388 * If we've been given a valid mailbox, first try to get the
7389 7389 * firmware to do the RESET. If that works, great and we can
7390 7390 * return success. Otherwise, if we haven't been given a
7391 7391 * valid mailbox or the RESET command failed, fall back to
7392 7392 * hitting the chip with a hammer.
7393 7393 */
7394 7394 if (mbox <= M_PCIE_FW_MASTER) {
7395 7395 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7396 7396 msleep(100);
7397 7397 if (t4_fw_reset(adap, mbox,
7398 7398 F_PIORST | F_PIORSTMODE) == 0)
7399 7399 return 0;
7400 7400 }
7401 7401
7402 7402 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7403 7403 msleep(2000);
7404 7404 } else {
7405 7405 int ms;
7406 7406
7407 7407 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7408 7408 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7409 7409 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7410 7410 return FW_SUCCESS;
7411 7411 msleep(100);
7412 7412 ms += 100;
7413 7413 }
7414 7414 return -ETIMEDOUT;
7415 7415 }
7416 7416 return 0;
7417 7417 }
7418 7418
7419 7419 /**
7420 7420 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7421 7421 * @adap: the adapter
7422 7422 * @mbox: mailbox to use for the FW RESET command (if desired)
7423 7423 * @fw_data: the firmware image to write
7424 7424 * @size: image size
7425 7425 * @force: force upgrade even if firmware doesn't cooperate
7426 7426 *
7427 7427 * Perform all of the steps necessary for upgrading an adapter's
7428 7428 * firmware image. Normally this requires the cooperation of the
7429 7429 * existing firmware in order to halt all existing activities
7430 7430 * but if an invalid mailbox token is passed in we skip that step
7431 7431 * (though we'll still put the adapter microprocessor into RESET in
7432 7432 * that case).
7433 7433 *
7434 7434 * On successful return the new firmware will have been loaded and
7435 7435 * the adapter will have been fully RESET losing all previous setup
7436 7436 * state. On unsuccessful return the adapter may be completely hosed ...
7437 7437 * positive errno indicates that the adapter is ~probably~ intact, a
7438 7438 * negative errno indicates that things are looking bad ...
7439 7439 */
7440 7440 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7441 7441 const u8 *fw_data, unsigned int size, int force)
7442 7442 {
7443 7443 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7444 7444 unsigned int bootstrap =
7445 7445 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7446 7446 int reset, ret;
7447 7447
7448 7448 if (!t4_fw_matches_chip(adap, fw_hdr))
7449 7449 return -EINVAL;
7450 7450
7451 7451 /* Disable FW_OK flags so that mbox commands with FW_OK flags check
7452 7452 * wont be send when we are flashing FW.
7453 7453 */
7454 7454 adap->flags &= ~FW_OK;
7455 7455
7456 7456 if (!bootstrap) {
7457 7457 ret = t4_fw_halt(adap, mbox, force);
7458 7458 if (ret < 0 && !force)
7459 7459 goto out;
7460 7460 }
7461 7461
7462 7462 ret = t4_load_fw(adap, fw_data, size, bootstrap);
7463 7463 if (ret < 0 || bootstrap)
7464 7464 goto out;
7465 7465
7466 7466 /*
7467 7467 * Older versions of the firmware don't understand the new
7468 7468 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7469 7469 * restart. So for newly loaded older firmware we'll have to do the
7470 7470 * RESET for it so it starts up on a clean slate. We can tell if
7471 7471 * the newly loaded firmware will handle this right by checking
7472 7472 * its header flags to see if it advertises the capability.
7473 7473 */
7474 7474 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7475 7475 ret = t4_fw_restart(adap, mbox, reset);
7476 7476 out:
7477 7477 adap->flags |= FW_OK;
7478 7478 return ret;
7479 7479 }
7480 7480
7481 7481 /**
7482 7482 * t4_fl_pkt_align - return the fl packet alignment
7483 7483 * @adap: the adapter
7484 7484 * is_packed: True when the driver uses packed FLM mode
7485 7485 *
7486 7486 * T4 has a single field to specify the packing and padding boundary.
7487 7487 * T5 onwards has separate fields for this and hence the alignment for
7488 7488 * next packet offset is maximum of these two.
7489 7489 *
7490 7490 */
7491 7491 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7492 7492 {
7493 7493 u32 sge_control, sge_control2;
7494 7494 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7495 7495
7496 7496 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7497 7497
7498 7498 /* T4 uses a single control field to specify both the PCIe Padding and
7499 7499 * Packing Boundary. T5 introduced the ability to specify these
7500 7500 * separately. The actual Ingress Packet Data alignment boundary
7501 7501 * within Packed Buffer Mode is the maximum of these two
7502 7502 * specifications. (Note that it makes no real practical sense to
7503 7503 * have the Pading Boudary be larger than the Packing Boundary but you
7504 7504 * could set the chip up that way and, in fact, legacy T4 code would
7505 7505 * end doing this because it would initialize the Padding Boundary and
7506 7506 * leave the Packing Boundary initialized to 0 (16 bytes).)
7507 7507 * Padding Boundary values in T6 starts from 8B,
7508 7508 * where as it is 32B for T4 and T5.
7509 7509 */
7510 7510 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7511 7511 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7512 7512 else
7513 7513 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7514 7514
7515 7515 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7516 7516
7517 7517 fl_align = ingpadboundary;
7518 7518 if (!is_t4(adap->params.chip) && is_packed) {
7519 7519 /* T5 has a weird interpretation of one of the PCIe Packing
7520 7520 * Boundary values. No idea why ...
7521 7521 */
7522 7522 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7523 7523 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7524 7524 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7525 7525 ingpackboundary = 16;
7526 7526 else
7527 7527 ingpackboundary = 1 << (ingpackboundary +
7528 7528 X_INGPACKBOUNDARY_SHIFT);
7529 7529
7530 7530 fl_align = max(ingpadboundary, ingpackboundary);
7531 7531 }
7532 7532 return fl_align;
7533 7533 }
7534 7534
7535 7535 /**
7536 7536 * t4_fixup_host_params_compat - fix up host-dependent parameters
7537 7537 * @adap: the adapter
7538 7538 * @page_size: the host's Base Page Size
7539 7539 * @cache_line_size: the host's Cache Line Size
7540 7540 * @chip_compat: maintain compatibility with designated chip
7541 7541 *
7542 7542 * Various registers in the chip contain values which are dependent on the
7543 7543 * host's Base Page and Cache Line Sizes. This function will fix all of
7544 7544 * those registers with the appropriate values as passed in ...
7545 7545 *
7546 7546 * @chip_compat is used to limit the set of changes that are made
7547 7547 * to be compatible with the indicated chip release. This is used by
7548 7548 * drivers to maintain compatibility with chip register settings when
7549 7549 * the drivers haven't [yet] been updated with new chip support.
7550 7550 */
7551 7551 int t4_fixup_host_params_compat(struct adapter *adap,
7552 7552 unsigned int page_size,
7553 7553 unsigned int cache_line_size,
7554 7554 enum chip_type chip_compat)
7555 7555 {
7556 7556 unsigned int page_shift = fls(page_size) - 1;
7557 7557 unsigned int sge_hps = page_shift - 10;
7558 7558 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7559 7559 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7560 7560 unsigned int fl_align_log = fls(fl_align) - 1;
7561 7561
7562 7562 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7563 7563 V_HOSTPAGESIZEPF0(sge_hps) |
7564 7564 V_HOSTPAGESIZEPF1(sge_hps) |
7565 7565 V_HOSTPAGESIZEPF2(sge_hps) |
7566 7566 V_HOSTPAGESIZEPF3(sge_hps) |
7567 7567 V_HOSTPAGESIZEPF4(sge_hps) |
7568 7568 V_HOSTPAGESIZEPF5(sge_hps) |
7569 7569 V_HOSTPAGESIZEPF6(sge_hps) |
7570 7570 V_HOSTPAGESIZEPF7(sge_hps));
7571 7571
7572 7572 if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7573 7573 t4_set_reg_field(adap, A_SGE_CONTROL,
7574 7574 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7575 7575 F_EGRSTATUSPAGESIZE,
7576 7576 V_INGPADBOUNDARY(fl_align_log -
7577 7577 X_INGPADBOUNDARY_SHIFT) |
7578 7578 V_EGRSTATUSPAGESIZE(stat_len != 64));
7579 7579 } else {
7580 7580 unsigned int pack_align;
7581 7581 unsigned int ingpad, ingpack;
7582 7582 unsigned int pcie_cap;
7583 7583
7584 7584 /* T5 introduced the separation of the Free List Padding and
7585 7585 * Packing Boundaries. Thus, we can select a smaller Padding
7586 7586 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7587 7587 * Bandwidth, and use a Packing Boundary which is large enough
7588 7588 * to avoid false sharing between CPUs, etc.
7589 7589 *
7590 7590 * For the PCI Link, the smaller the Padding Boundary the
7591 7591 * better. For the Memory Controller, a smaller Padding
7592 7592 * Boundary is better until we cross under the Memory Line
7593 7593 * Size (the minimum unit of transfer to/from Memory). If we
7594 7594 * have a Padding Boundary which is smaller than the Memory
7595 7595 * Line Size, that'll involve a Read-Modify-Write cycle on the
7596 7596 * Memory Controller which is never good.
7597 7597 */
7598 7598
7599 7599 /* We want the Packing Boundary to be based on the Cache Line
7600 7600 * Size in order to help avoid False Sharing performance
7601 7601 * issues between CPUs, etc. We also want the Packing
7602 7602 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7603 7603 * get best performance when the Packing Boundary is a
7604 7604 * multiple of the Maximum Payload Size.
7605 7605 */
7606 7606 pack_align = fl_align;
7607 7607 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7608 7608 if (pcie_cap) {
7609 7609 unsigned int mps, mps_log;
7610 7610 u16 devctl;
7611 7611
7612 7612 /*
7613 7613 * The PCIe Device Control Maximum Payload Size field
7614 7614 * [bits 7:5] encodes sizes as powers of 2 starting at
7615 7615 * 128 bytes.
7616 7616 */
7617 7617 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7618 7618 &devctl);
7619 7619 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7620 7620 mps = 1 << mps_log;
7621 7621 if (mps > pack_align)
7622 7622 pack_align = mps;
7623 7623 }
7624 7624
7625 7625 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7626 7626 * value for the Packing Boundary. This corresponds to 16
7627 7627 * bytes instead of the expected 32 bytes. So if we want 32
7628 7628 * bytes, the best we can really do is 64 bytes ...
7629 7629 */
7630 7630 if (pack_align <= 16) {
7631 7631 ingpack = X_INGPACKBOUNDARY_16B;
7632 7632 fl_align = 16;
7633 7633 } else if (pack_align == 32) {
7634 7634 ingpack = X_INGPACKBOUNDARY_64B;
7635 7635 fl_align = 64;
7636 7636 } else {
7637 7637 unsigned int pack_align_log = fls(pack_align) - 1;
7638 7638 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7639 7639 fl_align = pack_align;
7640 7640 }
7641 7641
7642 7642 /* Use the smallest Ingress Padding which isn't smaller than
7643 7643 * the Memory Controller Read/Write Size. We'll take that as
7644 7644 * being 8 bytes since we don't know of any system with a
7645 7645 * wider Memory Controller Bus Width.
7646 7646 */
7647 7647 if (is_t5(adap->params.chip))
7648 7648 ingpad = X_INGPADBOUNDARY_32B;
7649 7649 else
7650 7650 ingpad = X_T6_INGPADBOUNDARY_8B;
7651 7651
7652 7652 t4_set_reg_field(adap, A_SGE_CONTROL,
7653 7653 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7654 7654 F_EGRSTATUSPAGESIZE,
7655 7655 V_INGPADBOUNDARY(ingpad) |
7656 7656 V_EGRSTATUSPAGESIZE(stat_len != 64));
7657 7657 t4_set_reg_field(adap, A_SGE_CONTROL2,
7658 7658 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7659 7659 V_INGPACKBOUNDARY(ingpack));
7660 7660 }
7661 7661 /*
7662 7662 * Adjust various SGE Free List Host Buffer Sizes.
7663 7663 *
7664 7664 * This is something of a crock since we're using fixed indices into
7665 7665 * the array which are also known by the sge.c code and the T4
7666 7666 * Firmware Configuration File. We need to come up with a much better
7667 7667 * approach to managing this array. For now, the first four entries
7668 7668 * are:
7669 7669 *
7670 7670 * 0: Host Page Size
7671 7671 * 1: 64KB
7672 7672 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7673 7673 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7674 7674 *
7675 7675 * For the single-MTU buffers in unpacked mode we need to include
7676 7676 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7677 7677 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7678 7678 * Padding boundary. All of these are accommodated in the Factory
7679 7679 * Default Firmware Configuration File but we need to adjust it for
7680 7680 * this host's cache line size.
7681 7681 */
7682 7682 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7683 7683 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7684 7684 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7685 7685 & ~(fl_align-1));
7686 7686 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7687 7687 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7688 7688 & ~(fl_align-1));
7689 7689
7690 7690 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7691 7691
7692 7692 return 0;
7693 7693 }
7694 7694
7695 7695 /**
7696 7696 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7697 7697 * @adap: the adapter
7698 7698 * @page_size: the host's Base Page Size
7699 7699 * @cache_line_size: the host's Cache Line Size
7700 7700 *
7701 7701 * Various registers in T4 contain values which are dependent on the
7702 7702 * host's Base Page and Cache Line Sizes. This function will fix all of
7703 7703 * those registers with the appropriate values as passed in ...
7704 7704 *
7705 7705 * This routine makes changes which are compatible with T4 chips.
7706 7706 */
7707 7707 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7708 7708 unsigned int cache_line_size)
7709 7709 {
7710 7710 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7711 7711 T4_LAST_REV);
7712 7712 }
7713 7713
7714 7714 /**
7715 7715 * t4_fw_initialize - ask FW to initialize the device
7716 7716 * @adap: the adapter
7717 7717 * @mbox: mailbox to use for the FW command
7718 7718 *
7719 7719 * Issues a command to FW to partially initialize the device. This
7720 7720 * performs initialization that generally doesn't depend on user input.
7721 7721 */
7722 7722 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7723 7723 {
7724 7724 struct fw_initialize_cmd c;
7725 7725
7726 7726 memset(&c, 0, sizeof(c));
7727 7727 INIT_CMD(c, INITIALIZE, WRITE);
7728 7728 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7729 7729 }
7730 7730
7731 7731 /**
7732 7732 * t4_query_params_rw - query FW or device parameters
7733 7733 * @adap: the adapter
7734 7734 * @mbox: mailbox to use for the FW command
7735 7735 * @pf: the PF
7736 7736 * @vf: the VF
7737 7737 * @nparams: the number of parameters
7738 7738 * @params: the parameter names
7739 7739 * @val: the parameter values
7740 7740 * @rw: Write and read flag
7741 7741 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7742 7742 *
7743 7743 * Reads the value of FW or device parameters. Up to 7 parameters can be
7744 7744 * queried at once.
7745 7745 */
7746 7746 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7747 7747 unsigned int vf, unsigned int nparams, const u32 *params,
7748 7748 u32 *val, int rw, bool sleep_ok)
7749 7749 {
7750 7750 int i, ret;
7751 7751 struct fw_params_cmd c;
7752 7752 __be32 *p = &c.param[0].mnem;
7753 7753
7754 7754 if (nparams > 7)
7755 7755 return -EINVAL;
7756 7756
7757 7757 memset(&c, 0, sizeof(c));
7758 7758 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7759 7759 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7760 7760 V_FW_PARAMS_CMD_PFN(pf) |
7761 7761 V_FW_PARAMS_CMD_VFN(vf));
7762 7762 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7763 7763
7764 7764 for (i = 0; i < nparams; i++) {
7765 7765 *p++ = cpu_to_be32(*params++);
7766 7766 if (rw)
7767 7767 *p = cpu_to_be32(*(val + i));
7768 7768 p++;
7769 7769 }
7770 7770
7771 7771 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7772 7772 if (ret == 0)
7773 7773 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7774 7774 *val++ = be32_to_cpu(*p);
7775 7775 return ret;
7776 7776 }
7777 7777
7778 7778 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7779 7779 unsigned int vf, unsigned int nparams, const u32 *params,
7780 7780 u32 *val)
7781 7781 {
7782 7782 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7783 7783 true);
7784 7784 }
7785 7785
7786 7786 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7787 7787 unsigned int vf, unsigned int nparams, const u32 *params,
7788 7788 u32 *val)
7789 7789 {
7790 7790 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7791 7791 false);
7792 7792 }
7793 7793
7794 7794 /**
7795 7795 * t4_set_params_timeout - sets FW or device parameters
7796 7796 * @adap: the adapter
7797 7797 * @mbox: mailbox to use for the FW command
7798 7798 * @pf: the PF
7799 7799 * @vf: the VF
7800 7800 * @nparams: the number of parameters
7801 7801 * @params: the parameter names
7802 7802 * @val: the parameter values
7803 7803 * @timeout: the timeout time
7804 7804 *
7805 7805 * Sets the value of FW or device parameters. Up to 7 parameters can be
7806 7806 * specified at once.
7807 7807 */
7808 7808 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7809 7809 unsigned int pf, unsigned int vf,
7810 7810 unsigned int nparams, const u32 *params,
7811 7811 const u32 *val, int timeout)
7812 7812 {
7813 7813 struct fw_params_cmd c;
7814 7814 __be32 *p = &c.param[0].mnem;
7815 7815
7816 7816 if (nparams > 7)
7817 7817 return -EINVAL;
7818 7818
7819 7819 memset(&c, 0, sizeof(c));
7820 7820 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7821 7821 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7822 7822 V_FW_PARAMS_CMD_PFN(pf) |
7823 7823 V_FW_PARAMS_CMD_VFN(vf));
7824 7824 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7825 7825
7826 7826 while (nparams--) {
7827 7827 *p++ = cpu_to_be32(*params++);
7828 7828 *p++ = cpu_to_be32(*val++);
7829 7829 }
7830 7830
7831 7831 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7832 7832 }
7833 7833
7834 7834 /**
7835 7835 * t4_set_params - sets FW or device parameters
7836 7836 * @adap: the adapter
7837 7837 * @mbox: mailbox to use for the FW command
7838 7838 * @pf: the PF
7839 7839 * @vf: the VF
7840 7840 * @nparams: the number of parameters
7841 7841 * @params: the parameter names
7842 7842 * @val: the parameter values
7843 7843 *
7844 7844 * Sets the value of FW or device parameters. Up to 7 parameters can be
7845 7845 * specified at once.
7846 7846 */
7847 7847 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7848 7848 unsigned int vf, unsigned int nparams, const u32 *params,
7849 7849 const u32 *val)
7850 7850 {
7851 7851 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7852 7852 FW_CMD_MAX_TIMEOUT);
7853 7853 }
7854 7854
7855 7855 /**
7856 7856 * t4_cfg_pfvf - configure PF/VF resource limits
7857 7857 * @adap: the adapter
7858 7858 * @mbox: mailbox to use for the FW command
7859 7859 * @pf: the PF being configured
7860 7860 * @vf: the VF being configured
7861 7861 * @txq: the max number of egress queues
7862 7862 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7863 7863 * @rxqi: the max number of interrupt-capable ingress queues
7864 7864 * @rxq: the max number of interruptless ingress queues
7865 7865 * @tc: the PCI traffic class
7866 7866 * @vi: the max number of virtual interfaces
7867 7867 * @cmask: the channel access rights mask for the PF/VF
7868 7868 * @pmask: the port access rights mask for the PF/VF
7869 7869 * @nexact: the maximum number of exact MPS filters
7870 7870 * @rcaps: read capabilities
7871 7871 * @wxcaps: write/execute capabilities
7872 7872 *
7873 7873 * Configures resource limits and capabilities for a physical or virtual
7874 7874 * function.
7875 7875 */
7876 7876 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7877 7877 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7878 7878 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7879 7879 unsigned int vi, unsigned int cmask, unsigned int pmask,
7880 7880 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7881 7881 {
7882 7882 struct fw_pfvf_cmd c;
7883 7883
7884 7884 memset(&c, 0, sizeof(c));
7885 7885 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7886 7886 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7887 7887 V_FW_PFVF_CMD_VFN(vf));
7888 7888 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7889 7889 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7890 7890 V_FW_PFVF_CMD_NIQ(rxq));
7891 7891 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7892 7892 V_FW_PFVF_CMD_PMASK(pmask) |
7893 7893 V_FW_PFVF_CMD_NEQ(txq));
7894 7894 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7895 7895 V_FW_PFVF_CMD_NVI(vi) |
7896 7896 V_FW_PFVF_CMD_NEXACTF(nexact));
7897 7897 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7898 7898 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7899 7899 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7900 7900 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7901 7901 }
7902 7902
7903 7903 /**
7904 7904 * t4_alloc_vi_func - allocate a virtual interface
7905 7905 * @adap: the adapter
7906 7906 * @mbox: mailbox to use for the FW command
7907 7907 * @port: physical port associated with the VI
7908 7908 * @pf: the PF owning the VI
7909 7909 * @vf: the VF owning the VI
7910 7910 * @nmac: number of MAC addresses needed (1 to 5)
7911 7911 * @mac: the MAC addresses of the VI
7912 7912 * @rss_size: size of RSS table slice associated with this VI
7913 7913 * @portfunc: which Port Application Function MAC Address is desired
7914 7914 * @idstype: Intrusion Detection Type
7915 7915 *
7916 7916 * Allocates a virtual interface for the given physical port. If @mac is
7917 7917 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7918 7918 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7919 7919 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7920 7920 * stored consecutively so the space needed is @nmac * 6 bytes.
7921 7921 * Returns a negative error number or the non-negative VI id.
7922 7922 */
7923 7923 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7924 7924 unsigned int port, unsigned int pf, unsigned int vf,
7925 7925 unsigned int nmac, u8 *mac, unsigned int *rss_size,
7926 7926 unsigned int portfunc, unsigned int idstype)
7927 7927 {
7928 7928 int ret;
7929 7929 struct fw_vi_cmd c;
7930 7930
7931 7931 memset(&c, 0, sizeof(c));
7932 7932 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7933 7933 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7934 7934 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7935 7935 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7936 7936 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7937 7937 V_FW_VI_CMD_FUNC(portfunc));
7938 7938 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7939 7939 c.nmac = nmac - 1;
7940 7940 if(!rss_size)
7941 7941 c.norss_rsssize = F_FW_VI_CMD_NORSS;
7942 7942
7943 7943 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7944 7944 if (ret)
7945 7945 return ret;
7946 7946
7947 7947 if (mac) {
7948 7948 memcpy(mac, c.mac, sizeof(c.mac));
7949 7949 switch (nmac) {
7950 7950 case 5:
7951 7951 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7952 7952 /* FALLTHRU */
7953 7953 case 4:
7954 7954 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7955 7955 /* FALLTHRU */
7956 7956 case 3:
7957 7957 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7958 7958 /* FALLTHRU */
7959 7959 case 2:
7960 7960 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7961 7961 }
7962 7962 }
7963 7963 if (rss_size)
7964 7964 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7965 7965 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7966 7966 }
7967 7967
7968 7968 /**
7969 7969 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7970 7970 * @adap: the adapter
7971 7971 * @mbox: mailbox to use for the FW command
7972 7972 * @port: physical port associated with the VI
7973 7973 * @pf: the PF owning the VI
7974 7974 * @vf: the VF owning the VI
7975 7975 * @nmac: number of MAC addresses needed (1 to 5)
7976 7976 * @mac: the MAC addresses of the VI
7977 7977 * @rss_size: size of RSS table slice associated with this VI
7978 7978 *
7979 7979 * backwards compatible and convieniance routine to allocate a Virtual
7980 7980 * Interface with a Ethernet Port Application Function and Intrustion
7981 7981 * Detection System disabled.
7982 7982 */
7983 7983 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7984 7984 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7985 7985 unsigned int *rss_size)
7986 7986 {
7987 7987 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7988 7988 FW_VI_FUNC_ETH, 0);
7989 7989 }
7990 7990
7991 7991
7992 7992 /**
7993 7993 * t4_free_vi - free a virtual interface
7994 7994 * @adap: the adapter
7995 7995 * @mbox: mailbox to use for the FW command
7996 7996 * @pf: the PF owning the VI
7997 7997 * @vf: the VF owning the VI
7998 7998 * @viid: virtual interface identifiler
7999 7999 *
8000 8000 * Free a previously allocated virtual interface.
8001 8001 */
8002 8002 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8003 8003 unsigned int vf, unsigned int viid)
8004 8004 {
8005 8005 struct fw_vi_cmd c;
8006 8006
8007 8007 memset(&c, 0, sizeof(c));
8008 8008 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8009 8009 F_FW_CMD_REQUEST |
8010 8010 F_FW_CMD_EXEC |
8011 8011 V_FW_VI_CMD_PFN(pf) |
8012 8012 V_FW_VI_CMD_VFN(vf));
8013 8013 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8014 8014 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8015 8015
8016 8016 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8017 8017 }
8018 8018
8019 8019 /**
8020 8020 * t4_set_rxmode - set Rx properties of a virtual interface
8021 8021 * @adap: the adapter
8022 8022 * @mbox: mailbox to use for the FW command
8023 8023 * @viid: the VI id
8024 8024 * @mtu: the new MTU or -1
8025 8025 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8026 8026 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8027 8027 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8028 8028 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8029 8029 * @sleep_ok: if true we may sleep while awaiting command completion
8030 8030 *
8031 8031 * Sets Rx properties of a virtual interface.
8032 8032 */
8033 8033 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8034 8034 int mtu, int promisc, int all_multi, int bcast, int vlanex,
8035 8035 bool sleep_ok)
8036 8036 {
8037 8037 struct fw_vi_rxmode_cmd c;
8038 8038
8039 8039 /* convert to FW values */
8040 8040 if (mtu < 0)
8041 8041 mtu = M_FW_VI_RXMODE_CMD_MTU;
8042 8042 if (promisc < 0)
8043 8043 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8044 8044 if (all_multi < 0)
8045 8045 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8046 8046 if (bcast < 0)
8047 8047 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8048 8048 if (vlanex < 0)
8049 8049 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8050 8050
8051 8051 memset(&c, 0, sizeof(c));
8052 8052 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8053 8053 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8054 8054 V_FW_VI_RXMODE_CMD_VIID(viid));
8055 8055 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8056 8056 c.mtu_to_vlanexen =
8057 8057 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8058 8058 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8059 8059 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8060 8060 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8061 8061 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8062 8062 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8063 8063 }
8064 8064
8065 8065 /**
8066 8066 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8067 8067 * @adap: the adapter
8068 8068 * @viid: the VI id
8069 8069 * @mac: the MAC address
8070 8070 * @mask: the mask
8071 8071 * @idx: index at which to add this entry
8072 8072 * @lookup_type: MAC address for inner (1) or outer (0) header
8073 8073 * @sleep_ok: call is allowed to sleep
8074 8074 *
8075 8075 * Adds the mac entry at the specified index using raw mac interface.
8076 8076 *
8077 8077 * Returns a negative error number or the allocated index for this mac.
8078 8078 */
8079 8079 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8080 8080 const u8 *addr, const u8 *mask, unsigned int idx,
8081 8081 u8 lookup_type, bool sleep_ok)
8082 8082 {
8083 8083 int ret = 0;
8084 8084 struct fw_vi_mac_cmd c;
8085 8085 struct fw_vi_mac_raw *p = &c.u.raw;
8086 8086 u32 val;
8087 8087
8088 8088 memset(&c, 0, sizeof(c));
8089 8089 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8090 8090 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8091 8091 V_FW_VI_MAC_CMD_VIID(viid));
8092 8092 val = V_FW_CMD_LEN16(1) |
8093 8093 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8094 8094 c.freemacs_to_len16 = cpu_to_be32(val);
8095 8095
8096 8096 /* Specify that this is an inner mac address */
8097 8097 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8098 8098
8099 8099 /* Lookup Type. Outer header: 0, Inner header: 1 */
8100 8100 p->data0_pkd = cpu_to_be32(lookup_type << 10);
8101 8101 p->data0m_pkd = cpu_to_be64(3 << 10); /* Lookup mask */
8102 8102
8103 8103 /* Copy the address and the mask */
8104 8104 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8105 8105 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8106 8106
8107 8107 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8108 8108 if (ret == 0) {
8109 8109 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8110 8110 if (ret != idx)
8111 8111 ret = -ENOMEM;
8112 8112 }
8113 8113
8114 8114 return ret;
8115 8115 }
8116 8116
8117 8117 /**
8118 8118 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8119 8119 * @adap: the adapter
8120 8120 * @mbox: mailbox to use for the FW command
8121 8121 * @viid: the VI id
8122 8122 * @free: if true any existing filters for this VI id are first removed
8123 8123 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8124 8124 * @addr: the MAC address(es)
8125 8125 * @idx: where to store the index of each allocated filter
8126 8126 * @hash: pointer to hash address filter bitmap
8127 8127 * @sleep_ok: call is allowed to sleep
8128 8128 *
8129 8129 * Allocates an exact-match filter for each of the supplied addresses and
8130 8130 * sets it to the corresponding address. If @idx is not %NULL it should
8131 8131 * have at least @naddr entries, each of which will be set to the index of
8132 8132 * the filter allocated for the corresponding MAC address. If a filter
8133 8133 * could not be allocated for an address its index is set to 0xffff.
8134 8134 * If @hash is not %NULL addresses that fail to allocate an exact filter
8135 8135 * are hashed and update the hash filter bitmap pointed at by @hash.
8136 8136 *
8137 8137 * Returns a negative error number or the number of filters allocated.
8138 8138 */
8139 8139 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8140 8140 unsigned int viid, bool free, unsigned int naddr,
8141 8141 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8142 8142 {
8143 8143 int offset, ret = 0;
8144 8144 struct fw_vi_mac_cmd c;
8145 8145 unsigned int nfilters = 0;
8146 8146 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8147 8147 unsigned int rem = naddr;
8148 8148
8149 8149 if (naddr > max_naddr)
8150 8150 return -EINVAL;
8151 8151
8152 8152 for (offset = 0; offset < naddr ; /**/) {
8153 8153 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8154 8154 ? rem
8155 8155 : ARRAY_SIZE(c.u.exact));
8156 8156 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8157 8157 u.exact[fw_naddr]), 16);
8158 8158 struct fw_vi_mac_exact *p;
8159 8159 int i;
8160 8160
8161 8161 memset(&c, 0, sizeof(c));
8162 8162 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8163 8163 F_FW_CMD_REQUEST |
8164 8164 F_FW_CMD_WRITE |
8165 8165 V_FW_CMD_EXEC(free) |
8166 8166 V_FW_VI_MAC_CMD_VIID(viid));
8167 8167 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8168 8168 V_FW_CMD_LEN16(len16));
8169 8169
8170 8170 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8171 8171 p->valid_to_idx =
8172 8172 cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8173 8173 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8174 8174 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8175 8175 }
8176 8176
8177 8177 /*
8178 8178 * It's okay if we run out of space in our MAC address arena.
8179 8179 * Some of the addresses we submit may get stored so we need
8180 8180 * to run through the reply to see what the results were ...
8181 8181 */
8182 8182 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8183 8183 if (ret && ret != -FW_ENOMEM)
8184 8184 break;
8185 8185
8186 8186 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8187 8187 u16 index = G_FW_VI_MAC_CMD_IDX(
8188 8188 be16_to_cpu(p->valid_to_idx));
8189 8189
8190 8190 if (idx)
8191 8191 idx[offset+i] = (index >= max_naddr
8192 8192 ? 0xffff
8193 8193 : index);
8194 8194 if (index < max_naddr)
8195 8195 nfilters++;
8196 8196 else if (hash)
8197 8197 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8198 8198 }
8199 8199
8200 8200 free = false;
8201 8201 offset += fw_naddr;
8202 8202 rem -= fw_naddr;
8203 8203 }
8204 8204
8205 8205 if (ret == 0 || ret == -FW_ENOMEM)
8206 8206 ret = nfilters;
8207 8207 return ret;
8208 8208 }
8209 8209
8210 8210 /**
8211 8211 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8212 8212 * @adap: the adapter
8213 8213 * @mbox: mailbox to use for the FW command
8214 8214 * @viid: the VI id
8215 8215 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8216 8216 * @addr: the MAC address(es)
8217 8217 * @sleep_ok: call is allowed to sleep
8218 8218 *
8219 8219 * Frees the exact-match filter for each of the supplied addresses
8220 8220 *
8221 8221 * Returns a negative error number or the number of filters freed.
8222 8222 */
8223 8223 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8224 8224 unsigned int viid, unsigned int naddr,
8225 8225 const u8 **addr, bool sleep_ok)
8226 8226 {
8227 8227 int offset, ret = 0;
8228 8228 struct fw_vi_mac_cmd c;
8229 8229 unsigned int nfilters = 0;
8230 8230 unsigned int max_naddr = is_t4(adap->params.chip) ?
8231 8231 NUM_MPS_CLS_SRAM_L_INSTANCES :
8232 8232 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8233 8233 unsigned int rem = naddr;
8234 8234
8235 8235 if (naddr > max_naddr)
8236 8236 return -EINVAL;
8237 8237
8238 8238 for (offset = 0; offset < (int)naddr ; /**/) {
8239 8239 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8240 8240 ? rem
8241 8241 : ARRAY_SIZE(c.u.exact));
8242 8242 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8243 8243 u.exact[fw_naddr]), 16);
8244 8244 struct fw_vi_mac_exact *p;
8245 8245 int i;
8246 8246
8247 8247 memset(&c, 0, sizeof(c));
8248 8248 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8249 8249 F_FW_CMD_REQUEST |
8250 8250 F_FW_CMD_WRITE |
8251 8251 V_FW_CMD_EXEC(0) |
8252 8252 V_FW_VI_MAC_CMD_VIID(viid));
8253 8253 c.freemacs_to_len16 =
8254 8254 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8255 8255 V_FW_CMD_LEN16(len16));
8256 8256
8257 8257 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8258 8258 p->valid_to_idx = cpu_to_be16(
8259 8259 F_FW_VI_MAC_CMD_VALID |
8260 8260 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8261 8261 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8262 8262 }
8263 8263
8264 8264 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8265 8265 if (ret)
8266 8266 break;
8267 8267
8268 8268 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8269 8269 u16 index = G_FW_VI_MAC_CMD_IDX(
8270 8270 be16_to_cpu(p->valid_to_idx));
8271 8271
8272 8272 if (index < max_naddr)
8273 8273 nfilters++;
8274 8274 }
8275 8275
8276 8276 offset += fw_naddr;
8277 8277 rem -= fw_naddr;
8278 8278 }
8279 8279
8280 8280 if (ret == 0)
8281 8281 ret = nfilters;
8282 8282 return ret;
8283 8283 }
8284 8284
8285 8285 /**
8286 8286 * t4_change_mac - modifies the exact-match filter for a MAC address
8287 8287 * @adap: the adapter
8288 8288 * @mbox: mailbox to use for the FW command
8289 8289 * @viid: the VI id
8290 8290 * @idx: index of existing filter for old value of MAC address, or -1
8291 8291 * @addr: the new MAC address value
8292 8292 * @persist: whether a new MAC allocation should be persistent
8293 8293 * @add_smt: if true also add the address to the HW SMT
8294 8294 *
8295 8295 * Modifies an exact-match filter and sets it to the new MAC address if
8296 8296 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
8297 8297 * latter case the address is added persistently if @persist is %true.
8298 8298 *
8299 8299 * Note that in general it is not possible to modify the value of a given
8300 8300 * filter so the generic way to modify an address filter is to free the one
8301 8301 * being used by the old address value and allocate a new filter for the
8302 8302 * new address value.
8303 8303 *
8304 8304 * Returns a negative error number or the index of the filter with the new
8305 8305 * MAC value. Note that this index may differ from @idx.
8306 8306 */
8307 8307 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8308 8308 int idx, const u8 *addr, bool persist, bool add_smt)
8309 8309 {
8310 8310 int ret, mode;
8311 8311 struct fw_vi_mac_cmd c;
8312 8312 struct fw_vi_mac_exact *p = c.u.exact;
8313 8313 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8314 8314
8315 8315 if (idx < 0) /* new allocation */
8316 8316 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8317 8317 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8318 8318
8319 8319 memset(&c, 0, sizeof(c));
8320 8320 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8321 8321 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8322 8322 V_FW_VI_MAC_CMD_VIID(viid));
8323 8323 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8324 8324 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8325 8325 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8326 8326 V_FW_VI_MAC_CMD_IDX(idx));
8327 8327 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8328 8328
8329 8329 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8330 8330 if (ret == 0) {
8331 8331 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8332 8332 if (ret >= max_mac_addr)
8333 8333 ret = -ENOMEM;
8334 8334 }
8335 8335 return ret;
8336 8336 }
8337 8337
8338 8338 /**
8339 8339 * t4_set_addr_hash - program the MAC inexact-match hash filter
8340 8340 * @adap: the adapter
8341 8341 * @mbox: mailbox to use for the FW command
8342 8342 * @viid: the VI id
8343 8343 * @ucast: whether the hash filter should also match unicast addresses
8344 8344 * @vec: the value to be written to the hash filter
8345 8345 * @sleep_ok: call is allowed to sleep
8346 8346 *
8347 8347 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8348 8348 */
8349 8349 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8350 8350 bool ucast, u64 vec, bool sleep_ok)
8351 8351 {
8352 8352 struct fw_vi_mac_cmd c;
8353 8353 u32 val;
8354 8354
8355 8355 memset(&c, 0, sizeof(c));
8356 8356 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8357 8357 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8358 8358 V_FW_VI_ENABLE_CMD_VIID(viid));
8359 8359 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8360 8360 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8361 8361 c.freemacs_to_len16 = cpu_to_be32(val);
8362 8362 c.u.hash.hashvec = cpu_to_be64(vec);
8363 8363 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8364 8364 }
8365 8365
8366 8366 /**
8367 8367 * t4_enable_vi_params - enable/disable a virtual interface
8368 8368 * @adap: the adapter
8369 8369 * @mbox: mailbox to use for the FW command
8370 8370 * @viid: the VI id
8371 8371 * @rx_en: 1=enable Rx, 0=disable Rx
8372 8372 * @tx_en: 1=enable Tx, 0=disable Tx
8373 8373 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8374 8374 *
8375 8375 * Enables/disables a virtual interface. Note that setting DCB Enable
8376 8376 * only makes sense when enabling a Virtual Interface ...
8377 8377 */
8378 8378 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8379 8379 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8380 8380 {
8381 8381 struct fw_vi_enable_cmd c;
8382 8382
8383 8383 memset(&c, 0, sizeof(c));
8384 8384 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8385 8385 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8386 8386 V_FW_VI_ENABLE_CMD_VIID(viid));
8387 8387 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8388 8388 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8389 8389 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8390 8390 FW_LEN16(c));
8391 8391 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8392 8392 }
8393 8393
8394 8394 /**
8395 8395 * t4_enable_vi - enable/disable a virtual interface
8396 8396 * @adap: the adapter
8397 8397 * @mbox: mailbox to use for the FW command
8398 8398 * @viid: the VI id
8399 8399 * @rx_en: 1=enable Rx, 0=disable Rx
8400 8400 * @tx_en: 1=enable Tx, 0=disable Tx
8401 8401 *
8402 8402 * Enables/disables a virtual interface. Note that setting DCB Enable
8403 8403 * only makes sense when enabling a Virtual Interface ...
8404 8404 */
8405 8405 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8406 8406 bool rx_en, bool tx_en)
8407 8407 {
8408 8408 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8409 8409 }
8410 8410
8411 8411 /**
8412 8412 * t4_identify_port - identify a VI's port by blinking its LED
8413 8413 * @adap: the adapter
8414 8414 * @mbox: mailbox to use for the FW command
8415 8415 * @viid: the VI id
8416 8416 * @nblinks: how many times to blink LED at 2.5 Hz
8417 8417 *
8418 8418 * Identifies a VI's port by blinking its LED.
8419 8419 */
8420 8420 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8421 8421 unsigned int nblinks)
8422 8422 {
8423 8423 struct fw_vi_enable_cmd c;
8424 8424
8425 8425 memset(&c, 0, sizeof(c));
8426 8426 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8427 8427 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8428 8428 V_FW_VI_ENABLE_CMD_VIID(viid));
8429 8429 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8430 8430 c.blinkdur = cpu_to_be16(nblinks);
8431 8431 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8432 8432 }
8433 8433
8434 8434 /**
8435 8435 * t4_iq_stop - stop an ingress queue and its FLs
8436 8436 * @adap: the adapter
8437 8437 * @mbox: mailbox to use for the FW command
8438 8438 * @pf: the PF owning the queues
8439 8439 * @vf: the VF owning the queues
8440 8440 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8441 8441 * @iqid: ingress queue id
8442 8442 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8443 8443 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8444 8444 *
8445 8445 * Stops an ingress queue and its associated FLs, if any. This causes
8446 8446 * any current or future data/messages destined for these queues to be
8447 8447 * tossed.
8448 8448 */
8449 8449 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8450 8450 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8451 8451 unsigned int fl0id, unsigned int fl1id)
8452 8452 {
8453 8453 struct fw_iq_cmd c;
8454 8454
8455 8455 memset(&c, 0, sizeof(c));
8456 8456 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8457 8457 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8458 8458 V_FW_IQ_CMD_VFN(vf));
8459 8459 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8460 8460 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8461 8461 c.iqid = cpu_to_be16(iqid);
8462 8462 c.fl0id = cpu_to_be16(fl0id);
8463 8463 c.fl1id = cpu_to_be16(fl1id);
8464 8464 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8465 8465 }
8466 8466
8467 8467 /**
8468 8468 * t4_iq_free - free an ingress queue and its FLs
8469 8469 * @adap: the adapter
8470 8470 * @mbox: mailbox to use for the FW command
8471 8471 * @pf: the PF owning the queues
8472 8472 * @vf: the VF owning the queues
8473 8473 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8474 8474 * @iqid: ingress queue id
8475 8475 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8476 8476 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8477 8477 *
8478 8478 * Frees an ingress queue and its associated FLs, if any.
8479 8479 */
8480 8480 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8481 8481 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8482 8482 unsigned int fl0id, unsigned int fl1id)
8483 8483 {
8484 8484 struct fw_iq_cmd c;
8485 8485
8486 8486 memset(&c, 0, sizeof(c));
8487 8487 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8488 8488 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8489 8489 V_FW_IQ_CMD_VFN(vf));
8490 8490 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8491 8491 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8492 8492 c.iqid = cpu_to_be16(iqid);
8493 8493 c.fl0id = cpu_to_be16(fl0id);
8494 8494 c.fl1id = cpu_to_be16(fl1id);
8495 8495 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8496 8496 }
8497 8497
8498 8498 /**
8499 8499 * t4_eth_eq_free - free an Ethernet egress queue
8500 8500 * @adap: the adapter
8501 8501 * @mbox: mailbox to use for the FW command
8502 8502 * @pf: the PF owning the queue
8503 8503 * @vf: the VF owning the queue
8504 8504 * @eqid: egress queue id
8505 8505 *
8506 8506 * Frees an Ethernet egress queue.
8507 8507 */
8508 8508 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8509 8509 unsigned int vf, unsigned int eqid)
8510 8510 {
8511 8511 struct fw_eq_eth_cmd c;
8512 8512
8513 8513 memset(&c, 0, sizeof(c));
8514 8514 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8515 8515 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8516 8516 V_FW_EQ_ETH_CMD_PFN(pf) |
8517 8517 V_FW_EQ_ETH_CMD_VFN(vf));
8518 8518 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8519 8519 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8520 8520 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8521 8521 }
8522 8522
8523 8523 /**
8524 8524 * t4_ctrl_eq_free - free a control egress queue
8525 8525 * @adap: the adapter
8526 8526 * @mbox: mailbox to use for the FW command
8527 8527 * @pf: the PF owning the queue
8528 8528 * @vf: the VF owning the queue
8529 8529 * @eqid: egress queue id
8530 8530 *
8531 8531 * Frees a control egress queue.
8532 8532 */
8533 8533 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8534 8534 unsigned int vf, unsigned int eqid)
8535 8535 {
8536 8536 struct fw_eq_ctrl_cmd c;
8537 8537
8538 8538 memset(&c, 0, sizeof(c));
8539 8539 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8540 8540 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8541 8541 V_FW_EQ_CTRL_CMD_PFN(pf) |
8542 8542 V_FW_EQ_CTRL_CMD_VFN(vf));
8543 8543 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8544 8544 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8545 8545 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8546 8546 }
8547 8547
8548 8548 /**
8549 8549 * t4_ofld_eq_free - free an offload egress queue
8550 8550 * @adap: the adapter
8551 8551 * @mbox: mailbox to use for the FW command
8552 8552 * @pf: the PF owning the queue
8553 8553 * @vf: the VF owning the queue
8554 8554 * @eqid: egress queue id
8555 8555 *
8556 8556 * Frees a control egress queue.
8557 8557 */
8558 8558 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8559 8559 unsigned int vf, unsigned int eqid)
8560 8560 {
8561 8561 struct fw_eq_ofld_cmd c;
8562 8562
8563 8563 memset(&c, 0, sizeof(c));
8564 8564 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8565 8565 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8566 8566 V_FW_EQ_OFLD_CMD_PFN(pf) |
8567 8567 V_FW_EQ_OFLD_CMD_VFN(vf));
8568 8568 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8569 8569 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8570 8570 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8571 8571 }
8572 8572
8573 8573 /**
8574 8574 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8575 8575 * @link_down_rc: Link Down Reason Code
8576 8576 *
8577 8577 * Returns a string representation of the Link Down Reason Code.
8578 8578 */
8579 8579 const char *t4_link_down_rc_str(unsigned char link_down_rc)
8580 8580 {
8581 8581 static const char * const reason[] = {
8582 8582 "Link Down",
8583 8583 "Remote Fault",
8584 8584 "Auto-negotiation Failure",
8585 8585 "Reserved",
8586 8586 "Insufficient Airflow",
8587 8587 "Unable To Determine Reason",
8588 8588 "No RX Signal Detected",
8589 8589 "Reserved",
8590 8590 };
8591 8591
8592 8592 if (link_down_rc >= ARRAY_SIZE(reason))
8593 8593 return "Bad Reason Code";
8594 8594
8595 8595 return reason[link_down_rc];
8596 8596 }
8597 8597
8598 8598 /**
8599 8599 * Get the highest speed for the port from the advertised port capabilities.
8600 8600 * It will be either the highest speed from the list of speeds or
8601 8601 * whatever user has set using ethtool.
8602 8602 */
8603 8603 static inline unsigned int fwcap_to_fw_speed(unsigned int acaps)
8604 8604 {
8605 8605 if (acaps & FW_PORT_CAP_SPEED_100G)
8606 8606 return FW_PORT_CAP_SPEED_100G;
8607 8607 if (acaps & FW_PORT_CAP_SPEED_40G)
8608 8608 return FW_PORT_CAP_SPEED_40G;
8609 8609 if (acaps & FW_PORT_CAP_SPEED_25G)
8610 8610 return FW_PORT_CAP_SPEED_25G;
8611 8611 if (acaps & FW_PORT_CAP_SPEED_10G)
8612 8612 return FW_PORT_CAP_SPEED_10G;
8613 8613 if (acaps & FW_PORT_CAP_SPEED_1G)
8614 8614 return FW_PORT_CAP_SPEED_1G;
8615 8615 if (acaps & FW_PORT_CAP_SPEED_100M)
8616 8616 return FW_PORT_CAP_SPEED_100M;
8617 8617 return 0;
8618 8618 }
8619 8619
8620 8620 /**
8621 8621 * t4_handle_get_port_info - process a FW reply message
8622 8622 * @pi: the port info
8623 8623 * @rpl: start of the FW message
8624 8624 *
8625 8625 * Processes a GET_PORT_INFO FW reply message.
8626 8626 */
8627 8627 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8628 8628 {
8629 8629 const struct fw_port_cmd *p = (const void *)rpl;
8630 8630 unsigned int acaps = be16_to_cpu(p->u.info.acap);
8631 8631 struct adapter *adap = pi->adapter;
8632 8632
8633 8633 /* link/module state change message */
8634 8634 int speed = 0;
8635 8635 unsigned int fc, fec;
8636 8636 struct link_config *lc;
8637 8637 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8638 8638 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8639 8639 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
8640 8640
8641 8641 /*
8642 8642 * Unfortunately the format of the Link Status returned by the
8643 8643 * Firmware isn't the same as the Firmware Port Capabilities bitfield
8644 8644 * used everywhere else ...
8645 8645 */
8646 8646 fc = 0;
8647 8647 if (stat & F_FW_PORT_CMD_RXPAUSE)
8648 8648 fc |= PAUSE_RX;
8649 8649 if (stat & F_FW_PORT_CMD_TXPAUSE)
8650 8650 fc |= PAUSE_TX;
8651 8651
8652 8652 fec = fwcap_to_cc_fec(acaps);
8653 8653
8654 8654 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8655 8655 speed = 100;
8656 8656 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8657 8657 speed = 1000;
8658 8658 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8659 8659 speed = 10000;
8660 8660 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8661 8661 speed = 25000;
8662 8662 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8663 8663 speed = 40000;
8664 8664 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8665 8665 speed = 100000;
8666 8666
8667 8667 lc = &pi->link_cfg;
8668 8668
8669 8669 if (mod != pi->mod_type) {
8670 8670 /*
8671 8671 * When a new Transceiver Module is inserted, the Firmware
8672 8672 * will examine any Forward Error Correction parameters
8673 8673 * present in the Transceiver Module i2c EPROM and determine
8674 8674 * the supported and recommended FEC settings from those
8675 8675 * based on IEEE 802.3 standards. We always record the
8676 8676 * IEEE 802.3 recommended "automatic" settings.
8677 8677 */
8678 8678 lc->auto_fec = fec;
8679 8679
8680 8680 pi->mod_type = mod;
8681 8681 t4_os_portmod_changed(adap, pi->port_id);
8682 8682 }
8683 8683
8684 8684 if (link_ok != lc->link_ok || speed != lc->speed ||
8685 8685 fc != lc->fc || fec != lc->fec) { /* something changed */
8686 8686 if (!link_ok && lc->link_ok) {
8687 8687 unsigned char rc = G_FW_PORT_CMD_LINKDNRC(stat);
8688 8688
8689 8689 lc->link_down_rc = rc;
8690 8690 CH_WARN_RATELIMIT(adap,
8691 8691 "Port %d link down, reason: %s\n",
8692 8692 pi->tx_chan, t4_link_down_rc_str(rc));
8693 8693 }
8694 8694 lc->link_ok = link_ok;
8695 8695 lc->speed = speed;
8696 8696 lc->fc = fc;
8697 8697 lc->fec = fec;
8698 8698
8699 8699 lc->supported = be16_to_cpu(p->u.info.pcap);
8700 8700 lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
8701 8701 lc->advertising = be16_to_cpu(p->u.info.acap) & ADVERT_MASK;
8702 8702
8703 8703 if (lc->advertising & FW_PORT_CAP_ANEG) {
8704 8704 lc->autoneg = AUTONEG_ENABLE;
8705 8705 } else {
8706 8706 /* When Autoneg is disabled, user needs to set
8707 8707 * single speed.
8708 8708 * Similar to cxgb4_ethtool.c: set_link_ksettings
8709 8709 */
8710 8710 lc->advertising = 0;
8711 8711 lc->requested_speed = fwcap_to_fw_speed(acaps);
8712 8712 lc->autoneg = AUTONEG_DISABLE;
8713 8713 }
8714 8714
8715 8715 t4_os_link_changed(adap, pi->port_id, link_ok);
8716 8716 }
8717 8717 }
8718 8718
8719 8719 /**
8720 8720 * t4_update_port_info - retrieve and update port information if changed
8721 8721 * @pi: the port_info
8722 8722 *
8723 8723 * We issue a Get Port Information Command to the Firmware and, if
8724 8724 * successful, we check to see if anything is different from what we
8725 8725 * last recorded and update things accordingly.
8726 8726 */
8727 8727 int t4_update_port_info(struct port_info *pi)
8728 8728 {
8729 8729 struct fw_port_cmd port_cmd;
8730 8730 int ret;
8731 8731
8732 8732 memset(&port_cmd, 0, sizeof port_cmd);
8733 8733 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8734 8734 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8735 8735 V_FW_PORT_CMD_PORTID(pi->tx_chan));
8736 8736 port_cmd.action_to_len16 = cpu_to_be32(
8737 8737 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8738 8738 FW_LEN16(port_cmd));
8739 8739 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8740 8740 &port_cmd, sizeof(port_cmd), &port_cmd);
8741 8741 if (ret)
8742 8742 return ret;
8743 8743
8744 8744 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8745 8745 return 0;
8746 8746 }
8747 8747
8748 8748 /**
8749 8749 * t4_handle_fw_rpl - process a FW reply message
8750 8750 * @adap: the adapter
8751 8751 * @rpl: start of the FW message
8752 8752 *
8753 8753 * Processes a FW message, such as link state change messages.
8754 8754 */
8755 8755 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8756 8756 {
8757 8757 u8 opcode = *(const u8 *)rpl;
8758 8758
8759 8759 /*
8760 8760 * This might be a port command ... this simplifies the following
8761 8761 * conditionals ... We can get away with pre-dereferencing
8762 8762 * action_to_len16 because it's in the first 16 bytes and all messages
8763 8763 * will be at least that long.
8764 8764 */
8765 8765 const struct fw_port_cmd *p = (const void *)rpl;
8766 8766 unsigned int action =
8767 8767 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8768 8768
8769 8769 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
8770 8770 int i;
8771 8771 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8772 8772 struct port_info *pi = NULL;
8773 8773
8774 8774 for_each_port(adap, i) {
8775 8775 pi = adap2pinfo(adap, i);
8776 8776 if (pi->tx_chan == chan)
8777 8777 break;
8778 8778 }
8779 8779
8780 8780 t4_handle_get_port_info(pi, rpl);
8781 8781 } else {
8782 8782 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8783 8783 return -EINVAL;
8784 8784 }
8785 8785 return 0;
8786 8786 }
8787 8787
8788 8788 /**
8789 8789 * get_pci_mode - determine a card's PCI mode
8790 8790 * @adapter: the adapter
8791 8791 * @p: where to store the PCI settings
8792 8792 *
8793 8793 * Determines a card's PCI mode and associated parameters, such as speed
8794 8794 * and width.
8795 8795 */
8796 8796 static void get_pci_mode(struct adapter *adapter,
8797 8797 struct pci_params *p)
8798 8798 {
8799 8799 u16 val;
8800 8800 u32 pcie_cap;
8801 8801
8802 8802 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8803 8803 if (pcie_cap) {
8804 8804 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8805 8805 p->speed = val & PCI_EXP_LNKSTA_CLS;
8806 8806 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8807 8807 }
8808 8808 }
8809 8809
8810 8810 /**
8811 8811 * init_link_config - initialize a link's SW state
8812 8812 * @lc: pointer to structure holding the link state
8813 8813 * @pcaps: link Port Capabilities
8814 8814 * @acaps: link current Advertised Port Capabilities
8815 8815 *
8816 8816 * Initializes the SW state maintained for each link, including the link's
8817 8817 * capabilities and default speed/flow-control/autonegotiation settings.
8818 8818 */
8819 8819 static void init_link_config(struct link_config *lc, unsigned int pcaps,
8820 8820 unsigned int acaps)
8821 8821 {
8822 8822 lc->supported = pcaps;
8823 8823 lc->lp_advertising = 0;
8824 8824 lc->requested_speed = 0;
8825 8825 lc->speed = 0;
8826 8826 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8827 8827
8828 8828 /*
8829 8829 * For Forward Error Control, we default to whatever the Firmware
8830 8830 * tells us the Link is currently advertising.
8831 8831 */
8832 8832 lc->auto_fec = fwcap_to_cc_fec(acaps);
8833 8833 lc->requested_fec = FEC_AUTO;
8834 8834 lc->fec = lc->auto_fec;
8835 8835
8836 8836 if (lc->supported & FW_PORT_CAP_ANEG) {
8837 8837 lc->advertising = lc->supported & ADVERT_MASK;
8838 8838 lc->autoneg = AUTONEG_ENABLE;
8839 8839 lc->requested_fc |= PAUSE_AUTONEG;
8840 8840 } else {
8841 8841 lc->advertising = 0;
8842 8842 lc->autoneg = AUTONEG_DISABLE;
8843 8843 }
8844 8844 }
8845 8845
8846 8846 /**
8847 8847 * t4_wait_dev_ready - wait till to reads of registers work
8848 8848 *
8849 8849 * Right after the device is RESET is can take a small amount of time
8850 8850 * for it to respond to register reads. Until then, all reads will
8851 8851 * return either 0xff...ff or 0xee...ee. Return an error if reads
8852 8852 * don't work within a reasonable time frame.
8853 8853 */
8854 8854 int t4_wait_dev_ready(struct adapter *adapter)
8855 8855 {
8856 8856 u32 whoami;
8857 8857
8858 8858 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8859 8859 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8860 8860 return 0;
8861 8861
8862 8862 msleep(500);
8863 8863 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8864 8864 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8865 8865 return 0;
8866 8866
8867 8867 CH_ERR(adapter, "Device didn't become ready for access, "
8868 8868 "whoami = %#x\n", whoami);
|
↓ open down ↓ |
8868 lines elided |
↑ open up ↑ |
8869 8869 return -EIO;
8870 8870 }
8871 8871
8872 8872 struct flash_desc {
8873 8873 u32 vendor_and_model_id;
8874 8874 u32 size_mb;
8875 8875 };
8876 8876
8877 8877 int t4_get_flash_params(struct adapter *adapter)
8878 8878 {
8879 - /* Table for non-Numonix supported flash parts. Numonix parts are left
8880 - * to the preexisting well-tested code. All flash parts have 64KB
8881 - * sectors.
8879 + /*
8880 + * Table for non-standard supported Flash parts. Note, all Flash
8881 + * parts must have 64KB sectors.
8882 8882 */
8883 8883 static struct flash_desc supported_flash[] = {
8884 8884 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8885 8885 };
8886 8886
8887 8887 int ret;
8888 8888 u32 flashid = 0;
8889 8889 unsigned int part, manufacturer;
8890 - unsigned int density, size;
8890 + unsigned int density, size = 0;
8891 8891
8892 8892
8893 8893 /*
8894 8894 * Issue a Read ID Command to the Flash part. We decode supported
8895 8895 * Flash parts and their sizes from this. There's a newer Query
8896 - * Command which can retrieve detailed geometry information but
8897 - * many Flash parts don't support it.
8896 + * Command which can retrieve detailed geometry information but many
8897 + * Flash parts don't support it.
8898 8898 */
8899 8899 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8900 8900 if (!ret)
8901 8901 ret = sf1_read(adapter, 3, 0, 1, &flashid);
8902 8902 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
8903 8903 if (ret < 0)
8904 8904 return ret;
8905 8905
8906 + /*
8907 + * Check to see if it's one of our non-standard supported Flash parts.
8908 + */
8906 8909 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8907 8910 if (supported_flash[part].vendor_and_model_id == flashid) {
8908 8911 adapter->params.sf_size =
8909 8912 supported_flash[part].size_mb;
8910 8913 adapter->params.sf_nsec =
8911 8914 adapter->params.sf_size / SF_SEC_SIZE;
8912 8915 goto found;
8913 8916 }
8914 8917
8918 + /*
8919 + * Decode Flash part size. The code below looks repetitive with
8920 + * common encodings, but that's not guaranteed in the JEDEC
8921 + * specification for the Read JEDEC ID command. The only thing that
8922 + * we're guaranteed by the JEDEC specification is where the
8923 + * Manufacturer ID is in the returned result. After that each
8924 + * Manufacturer ~could~ encode things completely differently.
8925 + * Note, all Flash parts must have 64KB sectors.
8926 + */
8915 8927 manufacturer = flashid & 0xff;
8916 8928 switch (manufacturer) {
8917 8929 case 0x20: { /* Micron/Numonix */
8918 8930 /*
8919 8931 * This Density -> Size decoding table is taken from Micron
8920 8932 * Data Sheets.
8921 8933 */
8922 8934 density = (flashid >> 16) & 0xff;
8923 8935 switch (density) {
8924 8936 case 0x14: size = 1 << 20; break; /* 1MB */
8925 8937 case 0x15: size = 1 << 21; break; /* 2MB */
8926 8938 case 0x16: size = 1 << 22; break; /* 4MB */
8927 8939 case 0x17: size = 1 << 23; break; /* 8MB */
8928 8940 case 0x18: size = 1 << 24; break; /* 16MB */
8929 8941 case 0x19: size = 1 << 25; break; /* 32MB */
8930 8942 case 0x20: size = 1 << 26; break; /* 64MB */
8931 8943 case 0x21: size = 1 << 27; break; /* 128MB */
8932 8944 case 0x22: size = 1 << 28; break; /* 256MB */
8945 + }
8946 + break;
8947 + }
8933 8948
8934 - default:
8935 - CH_ERR(adapter, "Micron Flash Part has bad size, "
8936 - "ID = %#x, Density code = %#x\n",
8937 - flashid, density);
8938 - return -EINVAL;
8949 + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
8950 + /*
8951 + * This Density -> Size decoding table is taken from ISSI
8952 + * Data Sheets.
8953 + */
8954 + density = (flashid >> 16) & 0xff;
8955 + switch (density) {
8956 + case 0x16: size = 1 << 25; break; /* 32MB */
8957 + case 0x17: size = 1 << 26; break; /* 64MB */
8939 8958 }
8959 + break;
8960 + }
8940 8961
8941 - adapter->params.sf_size = size;
8942 - adapter->params.sf_nsec = size / SF_SEC_SIZE;
8962 + case 0xc2: { /* Macronix */
8963 + /*
8964 + * This Density -> Size decoding table is taken from Macronix
8965 + * Data Sheets.
8966 + */
8967 + density = (flashid >> 16) & 0xff;
8968 + switch (density) {
8969 + case 0x17: size = 1 << 23; break; /* 8MB */
8970 + case 0x18: size = 1 << 24; break; /* 16MB */
8971 + }
8943 8972 break;
8944 8973 }
8945 8974
8946 - default:
8947 - CH_ERR(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
8948 - return -EINVAL;
8975 + case 0xef: { /* Winbond */
8976 + /*
8977 + * This Density -> Size decoding table is taken from Winbond
8978 + * Data Sheets.
8979 + */
8980 + density = (flashid >> 16) & 0xff;
8981 + switch (density) {
8982 + case 0x17: size = 1 << 23; break; /* 8MB */
8983 + case 0x18: size = 1 << 24; break; /* 16MB */
8984 + }
8985 + break;
8949 8986 }
8987 + }
8950 8988
8989 + /*
8990 + * If we didn't recognize the FLASH part, that's no real issue: the
8991 + * Hardware/Software contract says that Hardware will _*ALWAYS*_
8992 + * use a FLASH part which is at least 4MB in size and has 64KB
8993 + * sectors. The unrecognized FLASH part is likely to be much larger
8994 + * than 4MB, but that's all we really need.
8995 + */
8996 + if (size == 0) {
8997 + CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8998 + size = 1 << 22;
8999 + }
9000 +
9001 + /*
9002 + * Store decoded Flash size and fall through into vetting code.
9003 + */
9004 + adapter->params.sf_size = size;
9005 + adapter->params.sf_nsec = size / SF_SEC_SIZE;
9006 +
8951 9007 found:
8952 9008 /*
8953 9009 * We should ~probably~ reject adapters with FLASHes which are too
8954 9010 * small but we have some legacy FPGAs with small FLASHes that we'd
8955 9011 * still like to use. So instead we emit a scary message ...
8956 9012 */
8957 9013 if (adapter->params.sf_size < FLASH_MIN_SIZE)
8958 9014 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8959 9015 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8960 9016
8961 9017 return 0;
8962 9018 }
8963 9019
8964 9020 static void set_pcie_completion_timeout(struct adapter *adapter,
8965 9021 u8 range)
8966 9022 {
8967 9023 u16 val;
8968 9024 u32 pcie_cap;
8969 9025
8970 9026 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8971 9027 if (pcie_cap) {
8972 9028 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8973 9029 val &= 0xfff0;
8974 9030 val |= range ;
8975 9031 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8976 9032 }
8977 9033 }
8978 9034
8979 9035 /**
8980 9036 * t4_get_chip_type - Determine chip type from device ID
8981 9037 * @adap: the adapter
8982 9038 * @ver: adapter version
8983 9039 */
8984 9040 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
8985 9041 {
8986 9042 enum chip_type chip = 0;
8987 9043 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
8988 9044
8989 9045 /* Retrieve adapter's device ID */
8990 9046 switch (ver) {
8991 9047 case CHELSIO_T4_FPGA:
8992 9048 chip |= CHELSIO_CHIP_FPGA;
8993 9049 /*FALLTHROUGH*/
8994 9050 case CHELSIO_T4:
8995 9051 chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8996 9052 break;
8997 9053 case CHELSIO_T5_FPGA:
8998 9054 chip |= CHELSIO_CHIP_FPGA;
8999 9055 /*FALLTHROUGH*/
9000 9056 case CHELSIO_T5:
9001 9057 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9002 9058 break;
9003 9059 case CHELSIO_T6_FPGA:
9004 9060 chip |= CHELSIO_CHIP_FPGA;
9005 9061 /*FALLTHROUGH*/
9006 9062 case CHELSIO_T6:
9007 9063 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9008 9064 break;
9009 9065 default:
9010 9066 CH_ERR(adap, "Device %d is not supported\n",
9011 9067 adap->params.pci.device_id);
9012 9068 return -EINVAL;
9013 9069 }
9014 9070
9015 9071 /* T4A1 chip is no longer supported */
9016 9072 if (chip == T4_A1) {
9017 9073 CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
9018 9074 return -EINVAL;
9019 9075 }
9020 9076 return chip;
9021 9077 }
9022 9078
9023 9079 /**
9024 9080 * t4_prep_pf - prepare SW and HW for PF operation
9025 9081 * @adapter: the adapter
9026 9082 *
9027 9083 * Initialize adapter SW state for the various HW modules, set initial
9028 9084 * values for some adapter tunables on each PF.
9029 9085 */
9030 9086 int t4_prep_pf(struct adapter *adapter)
9031 9087 {
9032 9088 int ret, ver;
9033 9089
9034 9090 ret = t4_wait_dev_ready(adapter);
9035 9091 if (ret < 0)
9036 9092 return ret;
9037 9093
9038 9094 get_pci_mode(adapter, &adapter->params.pci);
9039 9095
9040 9096
9041 9097 /* Retrieve adapter's device ID
9042 9098 */
9043 9099 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
9044 9100 t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
9045 9101
9046 9102 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
9047 9103 adapter->params.chip = t4_get_chip_type(adapter, ver);
9048 9104 if (is_t4(adapter->params.chip)) {
9049 9105 adapter->params.arch.sge_fl_db = F_DBPRIO;
9050 9106 adapter->params.arch.mps_tcam_size =
9051 9107 NUM_MPS_CLS_SRAM_L_INSTANCES;
9052 9108 adapter->params.arch.mps_rplc_size = 128;
9053 9109 adapter->params.arch.nchan = NCHAN;
9054 9110 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9055 9111 adapter->params.arch.vfcount = 128;
9056 9112 /* Congestion map is for 4 channels so that
9057 9113 * MPS can have 4 priority per port.
9058 9114 */
9059 9115 adapter->params.arch.cng_ch_bits_log = 2;
9060 9116 } else if (is_t5(adapter->params.chip)) {
9061 9117 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
9062 9118 adapter->params.arch.mps_tcam_size =
9063 9119 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9064 9120 adapter->params.arch.mps_rplc_size = 128;
9065 9121 adapter->params.arch.nchan = NCHAN;
9066 9122 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9067 9123 adapter->params.arch.vfcount = 128;
9068 9124 adapter->params.arch.cng_ch_bits_log = 2;
9069 9125 } else if (is_t6(adapter->params.chip)) {
9070 9126 adapter->params.arch.sge_fl_db = 0;
9071 9127 adapter->params.arch.mps_tcam_size =
9072 9128 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9073 9129 adapter->params.arch.mps_rplc_size = 256;
9074 9130 adapter->params.arch.nchan = 2;
9075 9131 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9076 9132 adapter->params.arch.vfcount = 256;
9077 9133 /* Congestion map will be for 2 channels so that
9078 9134 * MPS can have 8 priority per port.
9079 9135 */
9080 9136 adapter->params.arch.cng_ch_bits_log = 3;
9081 9137 } else {
9082 9138 CH_ERR(adapter, "Device %d is not supported\n",
9083 9139 adapter->params.pci.device_id);
9084 9140 return -EINVAL;
9085 9141 }
9086 9142
9087 9143 adapter->params.pci.vpd_cap_addr =
9088 9144 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
9089 9145
9090 9146 if (is_fpga(adapter->params.chip)) {
9091 9147 /* FPGA */
9092 9148 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
9093 9149 } else {
9094 9150 /* ASIC */
9095 9151 adapter->params.cim_la_size = CIMLA_SIZE;
9096 9152 }
9097 9153
9098 9154 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9099 9155
9100 9156 /*
9101 9157 * Default port and clock for debugging in case we can't reach FW.
9102 9158 */
9103 9159 adapter->params.nports = 1;
9104 9160 adapter->params.portvec = 1;
9105 9161 adapter->params.vpd.cclk = 50000;
9106 9162
9107 9163 /* Set pci completion timeout value to 4 seconds. */
9108 9164 set_pcie_completion_timeout(adapter, 0xd);
9109 9165 return 0;
9110 9166 }
9111 9167
9112 9168 /**
9113 9169 * t4_prep_master_pf - prepare SW for master PF operations
9114 9170 * @adapter: the adapter
9115 9171 *
9116 9172 */
9117 9173 int t4_prep_master_pf(struct adapter *adapter)
9118 9174 {
9119 9175 int ret;
9120 9176
9121 9177 ret = t4_prep_pf(adapter);
9122 9178 if (ret < 0)
9123 9179 return ret;
9124 9180
9125 9181 ret = t4_get_flash_params(adapter);
9126 9182 if (ret < 0) {
9127 9183 CH_ERR(adapter,
9128 9184 "Unable to retrieve Flash parameters ret = %d\n", -ret);
9129 9185 return ret;
9130 9186 }
9131 9187
9132 9188 return 0;
9133 9189 }
9134 9190
9135 9191 /**
9136 9192 * t4_prep_adapter - prepare SW and HW for operation
9137 9193 * @adapter: the adapter
9138 9194 * @reset: if true perform a HW reset
9139 9195 *
9140 9196 * Initialize adapter SW state for the various HW modules, set initial
9141 9197 * values for some adapter tunables.
9142 9198 */
9143 9199 int t4_prep_adapter(struct adapter *adapter, bool reset)
9144 9200 {
9145 9201 return t4_prep_master_pf(adapter);
9146 9202 }
9147 9203
9148 9204 /**
9149 9205 * t4_shutdown_adapter - shut down adapter, host & wire
9150 9206 * @adapter: the adapter
9151 9207 *
9152 9208 * Perform an emergency shutdown of the adapter and stop it from
9153 9209 * continuing any further communication on the ports or DMA to the
9154 9210 * host. This is typically used when the adapter and/or firmware
9155 9211 * have crashed and we want to prevent any further accidental
9156 9212 * communication with the rest of the world. This will also force
9157 9213 * the port Link Status to go down -- if register writes work --
9158 9214 * which should help our peers figure out that we're down.
9159 9215 */
9160 9216 int t4_shutdown_adapter(struct adapter *adapter)
9161 9217 {
9162 9218 int port;
9163 9219
9164 9220 t4_intr_disable(adapter);
9165 9221 t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
9166 9222 for_each_port(adapter, port) {
9167 9223 u32 a_port_cfg = is_t4(adapter->params.chip) ?
9168 9224 PORT_REG(port, A_XGMAC_PORT_CFG) :
9169 9225 T5_PORT_REG(port, A_MAC_PORT_CFG);
9170 9226
9171 9227 t4_write_reg(adapter, a_port_cfg,
9172 9228 t4_read_reg(adapter, a_port_cfg)
9173 9229 & ~V_SIGNAL_DET(1));
9174 9230 }
9175 9231 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
9176 9232
9177 9233 return 0;
9178 9234 }
9179 9235
9180 9236 /**
9181 9237 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9182 9238 * @adapter: the adapter
9183 9239 * @qid: the Queue ID
9184 9240 * @qtype: the Ingress or Egress type for @qid
9185 9241 * @user: true if this request is for a user mode queue
9186 9242 * @pbar2_qoffset: BAR2 Queue Offset
9187 9243 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9188 9244 *
9189 9245 * Returns the BAR2 SGE Queue Registers information associated with the
9190 9246 * indicated Absolute Queue ID. These are passed back in return value
9191 9247 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9192 9248 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9193 9249 *
9194 9250 * This may return an error which indicates that BAR2 SGE Queue
9195 9251 * registers aren't available. If an error is not returned, then the
9196 9252 * following values are returned:
9197 9253 *
9198 9254 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9199 9255 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9200 9256 *
9201 9257 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9202 9258 * require the "Inferred Queue ID" ability may be used. E.g. the
9203 9259 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9204 9260 * then these "Inferred Queue ID" register may not be used.
9205 9261 */
9206 9262 int t4_bar2_sge_qregs(struct adapter *adapter,
9207 9263 unsigned int qid,
9208 9264 enum t4_bar2_qtype qtype,
9209 9265 int user,
9210 9266 u64 *pbar2_qoffset,
9211 9267 unsigned int *pbar2_qid)
9212 9268 {
9213 9269 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9214 9270 u64 bar2_page_offset, bar2_qoffset;
9215 9271 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9216 9272
9217 9273 /* T4 doesn't support BAR2 SGE Queue registers for kernel
9218 9274 * mode queues.
9219 9275 */
9220 9276 if (!user && is_t4(adapter->params.chip))
9221 9277 return -EINVAL;
9222 9278
9223 9279 /* Get our SGE Page Size parameters.
9224 9280 */
9225 9281 page_shift = adapter->params.sge.hps + 10;
9226 9282 page_size = 1 << page_shift;
9227 9283
9228 9284 /* Get the right Queues per Page parameters for our Queue.
9229 9285 */
9230 9286 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9231 9287 ? adapter->params.sge.eq_qpp
9232 9288 : adapter->params.sge.iq_qpp);
9233 9289 qpp_mask = (1 << qpp_shift) - 1;
9234 9290
9235 9291 /* Calculate the basics of the BAR2 SGE Queue register area:
9236 9292 * o The BAR2 page the Queue registers will be in.
9237 9293 * o The BAR2 Queue ID.
9238 9294 * o The BAR2 Queue ID Offset into the BAR2 page.
9239 9295 */
9240 9296 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9241 9297 bar2_qid = qid & qpp_mask;
9242 9298 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9243 9299
9244 9300 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9245 9301 * hardware will infer the Absolute Queue ID simply from the writes to
9246 9302 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9247 9303 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9248 9304 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9249 9305 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9250 9306 * from the BAR2 Page and BAR2 Queue ID.
9251 9307 *
9252 9308 * One important censequence of this is that some BAR2 SGE registers
9253 9309 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9254 9310 * there. But other registers synthesize the SGE Queue ID purely
9255 9311 * from the writes to the registers -- the Write Combined Doorbell
9256 9312 * Buffer is a good example. These BAR2 SGE Registers are only
9257 9313 * available for those BAR2 SGE Register areas where the SGE Absolute
9258 9314 * Queue ID can be inferred from simple writes.
9259 9315 */
9260 9316 bar2_qoffset = bar2_page_offset;
9261 9317 bar2_qinferred = (bar2_qid_offset < page_size);
9262 9318 if (bar2_qinferred) {
9263 9319 bar2_qoffset += bar2_qid_offset;
9264 9320 bar2_qid = 0;
9265 9321 }
9266 9322
9267 9323 *pbar2_qoffset = bar2_qoffset;
9268 9324 *pbar2_qid = bar2_qid;
9269 9325 return 0;
9270 9326 }
9271 9327
9272 9328 /**
9273 9329 * t4_init_devlog_params - initialize adapter->params.devlog
9274 9330 * @adap: the adapter
9275 9331 * @fw_attach: whether we can talk to the firmware
9276 9332 *
9277 9333 * Initialize various fields of the adapter's Firmware Device Log
9278 9334 * Parameters structure.
9279 9335 */
9280 9336 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9281 9337 {
9282 9338 struct devlog_params *dparams = &adap->params.devlog;
9283 9339 u32 pf_dparams;
9284 9340 unsigned int devlog_meminfo;
9285 9341 struct fw_devlog_cmd devlog_cmd;
9286 9342 int ret;
9287 9343
9288 9344 /* If we're dealing with newer firmware, the Device Log Paramerters
9289 9345 * are stored in a designated register which allows us to access the
9290 9346 * Device Log even if we can't talk to the firmware.
9291 9347 */
9292 9348 pf_dparams =
9293 9349 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9294 9350 if (pf_dparams) {
9295 9351 unsigned int nentries, nentries128;
9296 9352
9297 9353 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9298 9354 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9299 9355
9300 9356 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9301 9357 nentries = (nentries128 + 1) * 128;
9302 9358 dparams->size = nentries * sizeof(struct fw_devlog_e);
9303 9359
9304 9360 return 0;
9305 9361 }
9306 9362
9307 9363 /*
9308 9364 * For any failing returns ...
9309 9365 */
9310 9366 memset(dparams, 0, sizeof *dparams);
9311 9367
9312 9368 /*
9313 9369 * If we can't talk to the firmware, there's really nothing we can do
9314 9370 * at this point.
9315 9371 */
9316 9372 if (!fw_attach)
9317 9373 return -ENXIO;
9318 9374
9319 9375 /* Otherwise, ask the firmware for it's Device Log Parameters.
9320 9376 */
9321 9377 memset(&devlog_cmd, 0, sizeof devlog_cmd);
9322 9378 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9323 9379 F_FW_CMD_REQUEST | F_FW_CMD_READ);
9324 9380 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9325 9381 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9326 9382 &devlog_cmd);
9327 9383 if (ret)
9328 9384 return ret;
9329 9385
9330 9386 devlog_meminfo =
9331 9387 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9332 9388 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9333 9389 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9334 9390 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9335 9391
9336 9392 return 0;
9337 9393 }
9338 9394
9339 9395 /**
9340 9396 * t4_init_sge_params - initialize adap->params.sge
9341 9397 * @adapter: the adapter
9342 9398 *
9343 9399 * Initialize various fields of the adapter's SGE Parameters structure.
9344 9400 */
9345 9401 int t4_init_sge_params(struct adapter *adapter)
9346 9402 {
9347 9403 struct sge_params *sge_params = &adapter->params.sge;
9348 9404 u32 hps, qpp;
9349 9405 unsigned int s_hps, s_qpp;
9350 9406
9351 9407 /* Extract the SGE Page Size for our PF.
9352 9408 */
9353 9409 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9354 9410 s_hps = (S_HOSTPAGESIZEPF0 +
9355 9411 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
9356 9412 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
9357 9413
9358 9414 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9359 9415 */
9360 9416 s_qpp = (S_QUEUESPERPAGEPF0 +
9361 9417 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
9362 9418 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9363 9419 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9364 9420 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9365 9421 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9366 9422
9367 9423 return 0;
9368 9424 }
9369 9425
9370 9426 /**
9371 9427 * t4_init_tp_params - initialize adap->params.tp
9372 9428 * @adap: the adapter
9373 9429 * @sleep_ok: if true we may sleep while awaiting command completion
9374 9430 *
9375 9431 * Initialize various fields of the adapter's TP Parameters structure.
9376 9432 */
9377 9433 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9378 9434 {
9379 9435 int chan;
9380 9436 u32 v;
9381 9437
9382 9438 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9383 9439 adap->params.tp.tre = G_TIMERRESOLUTION(v);
9384 9440 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
9385 9441
9386 9442 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9387 9443 for (chan = 0; chan < NCHAN; chan++)
9388 9444 adap->params.tp.tx_modq[chan] = chan;
9389 9445
9390 9446 /* Cache the adapter's Compressed Filter Mode and global Incress
9391 9447 * Configuration.
9392 9448 */
9393 9449 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9394 9450 A_TP_VLAN_PRI_MAP, sleep_ok);
9395 9451 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9396 9452 A_TP_INGRESS_CONFIG, sleep_ok);
9397 9453
9398 9454 /* For T6, cache the adapter's compressed error vector
9399 9455 * and passing outer header info for encapsulated packets.
9400 9456 */
9401 9457 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9402 9458 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9403 9459 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
9404 9460 }
9405 9461
9406 9462 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9407 9463 * shift positions of several elements of the Compressed Filter Tuple
9408 9464 * for this adapter which we need frequently ...
9409 9465 */
9410 9466 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9411 9467 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
9412 9468 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9413 9469 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9414 9470 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
9415 9471 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9416 9472 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9417 9473 F_ETHERTYPE);
9418 9474 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9419 9475 F_MACMATCH);
9420 9476 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9421 9477 F_MPSHITTYPE);
9422 9478 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9423 9479 F_FRAGMENTATION);
9424 9480
9425 9481 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9426 9482 * represents the presence of an Outer VLAN instead of a VNIC ID.
9427 9483 */
9428 9484 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
9429 9485 adap->params.tp.vnic_shift = -1;
9430 9486
9431 9487 return 0;
9432 9488 }
9433 9489
9434 9490 /**
9435 9491 * t4_filter_field_shift - calculate filter field shift
9436 9492 * @adap: the adapter
9437 9493 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9438 9494 *
9439 9495 * Return the shift position of a filter field within the Compressed
9440 9496 * Filter Tuple. The filter field is specified via its selection bit
9441 9497 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9442 9498 */
9443 9499 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9444 9500 {
9445 9501 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9446 9502 unsigned int sel;
9447 9503 int field_shift;
9448 9504
9449 9505 if ((filter_mode & filter_sel) == 0)
9450 9506 return -1;
9451 9507
9452 9508 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9453 9509 switch (filter_mode & sel) {
9454 9510 case F_FCOE:
9455 9511 field_shift += W_FT_FCOE;
9456 9512 break;
9457 9513 case F_PORT:
9458 9514 field_shift += W_FT_PORT;
9459 9515 break;
9460 9516 case F_VNIC_ID:
9461 9517 field_shift += W_FT_VNIC_ID;
9462 9518 break;
9463 9519 case F_VLAN:
9464 9520 field_shift += W_FT_VLAN;
9465 9521 break;
9466 9522 case F_TOS:
9467 9523 field_shift += W_FT_TOS;
9468 9524 break;
9469 9525 case F_PROTOCOL:
9470 9526 field_shift += W_FT_PROTOCOL;
9471 9527 break;
9472 9528 case F_ETHERTYPE:
9473 9529 field_shift += W_FT_ETHERTYPE;
9474 9530 break;
9475 9531 case F_MACMATCH:
9476 9532 field_shift += W_FT_MACMATCH;
9477 9533 break;
9478 9534 case F_MPSHITTYPE:
9479 9535 field_shift += W_FT_MPSHITTYPE;
9480 9536 break;
9481 9537 case F_FRAGMENTATION:
9482 9538 field_shift += W_FT_FRAGMENTATION;
9483 9539 break;
9484 9540 }
9485 9541 }
9486 9542 return field_shift;
9487 9543 }
9488 9544
9489 9545 /**
9490 9546 * t4_create_filter_info - return Compressed Filter Value/Mask tuple
9491 9547 * @adapter: the adapter
9492 9548 * @filter_value: Filter Value return value pointer
9493 9549 * @filter_mask: Filter Mask return value pointer
9494 9550 * @fcoe: FCoE filter selection
9495 9551 * @port: physical port filter selection
9496 9552 * @vnic: Virtual NIC ID filter selection
9497 9553 * @vlan: VLAN ID filter selection
9498 9554 * @vlan_pcp: VLAN Priority Code Point
9499 9555 * @vlan_dei: VLAN Drop Eligibility Indicator
9500 9556 * @tos: Type Of Server filter selection
9501 9557 * @protocol: IP Protocol filter selection
9502 9558 * @ethertype: Ethernet Type filter selection
9503 9559 * @macmatch: MPS MAC Index filter selection
9504 9560 * @matchtype: MPS Hit Type filter selection
9505 9561 * @frag: IP Fragmentation filter selection
9506 9562 *
9507 9563 * Construct a Compressed Filter Value/Mask tuple based on a set of
9508 9564 * "filter selection" values. For each passed filter selection value
9509 9565 * which is greater than or equal to 0, we put that value into the
9510 9566 * constructed Filter Value and the appropriate mask into the Filter
9511 9567 * Mask. If a filter selections is specified which is not currently
9512 9568 * configured into the hardware, an error will be returned. Otherwise
9513 9569 * the constructed FIlter Value/Mask tuple will be returned via the
9514 9570 * specified return value pointers and success will be returned.
9515 9571 *
9516 9572 * All filter selection values and the returned Filter Value/Mask values
9517 9573 * are in Host-Endian format.
9518 9574 */
9519 9575 int t4_create_filter_info(const struct adapter *adapter,
9520 9576 u64 *filter_value, u64 *filter_mask,
9521 9577 int fcoe, int port, int vnic,
9522 9578 int vlan, int vlan_pcp, int vlan_dei,
9523 9579 int tos, int protocol, int ethertype,
9524 9580 int macmatch, int matchtype, int frag)
9525 9581 {
9526 9582 const struct tp_params *tp = &adapter->params.tp;
9527 9583 u64 v, m;
9528 9584
9529 9585 /*
9530 9586 * If any selected filter field isn't enabled, return an error.
9531 9587 */
9532 9588 #define BAD_FILTER(__field) \
9533 9589 ((__field) >= 0 && tp->__field##_shift < 0)
9534 9590 if (BAD_FILTER(fcoe) ||
9535 9591 BAD_FILTER(port) ||
9536 9592 BAD_FILTER(vnic) ||
9537 9593 BAD_FILTER(vlan) ||
9538 9594 BAD_FILTER(tos) ||
9539 9595 BAD_FILTER(protocol) ||
9540 9596 BAD_FILTER(ethertype) ||
9541 9597 BAD_FILTER(macmatch) ||
9542 9598 BAD_FILTER(matchtype) ||
9543 9599 BAD_FILTER(frag))
9544 9600 return -EINVAL;
9545 9601 #undef BAD_FILTER
9546 9602
9547 9603 /*
9548 9604 * We have to have VLAN ID selected if we want to also select on
9549 9605 * either the Priority Code Point or Drop Eligibility Indicator
9550 9606 * fields.
9551 9607 */
9552 9608 if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
9553 9609 return -EINVAL;
9554 9610
9555 9611 /*
9556 9612 * Construct Filter Value and Mask.
9557 9613 */
9558 9614 v = m = 0;
9559 9615 #define SET_FILTER_FIELD(__field, __width) \
9560 9616 do { \
9561 9617 if ((__field) >= 0) { \
9562 9618 const int shift = tp->__field##_shift; \
9563 9619 \
9564 9620 v |= (__field) << shift; \
9565 9621 m |= ((1ULL << (__width)) - 1) << shift; \
9566 9622 } \
9567 9623 } while (0)
9568 9624 SET_FILTER_FIELD(fcoe, W_FT_FCOE);
9569 9625 SET_FILTER_FIELD(port, W_FT_PORT);
9570 9626 SET_FILTER_FIELD(tos, W_FT_TOS);
9571 9627 SET_FILTER_FIELD(protocol, W_FT_PROTOCOL);
9572 9628 SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
9573 9629 SET_FILTER_FIELD(macmatch, W_FT_MACMATCH);
9574 9630 SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
9575 9631 SET_FILTER_FIELD(frag, W_FT_FRAGMENTATION);
9576 9632 #undef SET_FILTER_FIELD
9577 9633
9578 9634 /*
9579 9635 * We handle VNIC ID and VLANs separately because they're slightly
9580 9636 * different than the rest of the fields. Both require that a
9581 9637 * corresponding "valid" bit be set in the Filter Value and Mask.
9582 9638 * These bits are in the top bit of the field. Additionally, we can
9583 9639 * select the Priority Code Point and Drop Eligibility Indicator
9584 9640 * fields for VLANs as an option. Remember that the format of a VLAN
9585 9641 * Tag is:
9586 9642 *
9587 9643 * bits: 3 1 12
9588 9644 * +---+-+------------+
9589 9645 * |PCP|D| VLAN ID |
9590 9646 * +---+-+------------+
9591 9647 */
9592 9648 if (vnic >= 0) {
9593 9649 v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
9594 9650 m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
9595 9651 }
9596 9652 if (vlan >= 0) {
9597 9653 v |= ((1ULL << (W_FT_VLAN-1)) | vlan) << tp->vlan_shift;
9598 9654 m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
9599 9655
9600 9656 if (vlan_dei >= 0) {
9601 9657 v |= vlan_dei << (tp->vlan_shift + 12);
9602 9658 m |= 0x7 << (tp->vlan_shift + 12);
9603 9659 }
9604 9660 if (vlan_pcp >= 0) {
9605 9661 v |= vlan_pcp << (tp->vlan_shift + 13);
9606 9662 m |= 0x7 << (tp->vlan_shift + 13);
9607 9663 }
9608 9664 }
9609 9665
9610 9666 /*
9611 9667 * Pass back computed Filter Value and Mask; return success.
9612 9668 */
9613 9669 *filter_value = v;
9614 9670 *filter_mask = m;
9615 9671 return 0;
9616 9672 }
9617 9673
9618 9674 int t4_init_rss_mode(struct adapter *adap, int mbox)
9619 9675 {
9620 9676 int i, ret;
9621 9677 struct fw_rss_vi_config_cmd rvc;
9622 9678
9623 9679 memset(&rvc, 0, sizeof(rvc));
9624 9680
9625 9681 for_each_port(adap, i) {
9626 9682 struct port_info *p = adap2pinfo(adap, i);
9627 9683 rvc.op_to_viid =
9628 9684 cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
9629 9685 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9630 9686 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
9631 9687 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9632 9688 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9633 9689 if (ret)
9634 9690 return ret;
9635 9691 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9636 9692 }
9637 9693 return 0;
9638 9694 }
9639 9695
9640 9696 static int t4_init_portmirror(struct port_info *pi, int mbox,
9641 9697 int port, int pf, int vf)
9642 9698 {
9643 9699 struct adapter *adapter = pi->adapter;
9644 9700 int ret;
9645 9701
9646 9702 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL);
9647 9703 if (ret < 0)
9648 9704 return ret;
9649 9705
9650 9706 CH_INFO(adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
9651 9707 port, G_FW_VIID_PFN(ret), G_FW_VIID_VIN(ret));
9652 9708
9653 9709 pi->viid_mirror = ret;
9654 9710 return 0;
9655 9711 }
9656 9712
9657 9713 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf)
9658 9714 {
9659 9715 int ret, i, j = 0;
9660 9716
9661 9717 for_each_port(adap, i) {
9662 9718 struct port_info *pi = adap2pinfo(adap, i);
9663 9719
9664 9720 while ((adap->params.portvec & (1 << j)) == 0)
9665 9721 j++;
9666 9722
9667 9723 ret = t4_init_portmirror(pi, mbox, j, pf, vf);
9668 9724 if (ret)
9669 9725 return ret;
9670 9726 j++;
9671 9727 }
9672 9728 return 0;
9673 9729 }
9674 9730
9675 9731 /**
9676 9732 * t4_init_portinfo - allocate a virtual interface and initialize port_info
9677 9733 * @pi: the port_info
9678 9734 * @mbox: mailbox to use for the FW command
9679 9735 * @port: physical port associated with the VI
9680 9736 * @pf: the PF owning the VI
9681 9737 * @vf: the VF owning the VI
9682 9738 * @mac: the MAC address of the VI
9683 9739 *
9684 9740 * Allocates a virtual interface for the given physical port. If @mac is
9685 9741 * not %NULL it contains the MAC address of the VI as assigned by FW.
9686 9742 * @mac should be large enough to hold an Ethernet address.
9687 9743 * Returns < 0 on error.
9688 9744 */
9689 9745 int t4_init_portinfo(struct port_info *pi, int mbox,
9690 9746 int port, int pf, int vf, u8 mac[])
9691 9747 {
9692 9748 int ret;
9693 9749 struct fw_port_cmd c;
9694 9750 unsigned int rss_size;
9695 9751
9696 9752 memset(&c, 0, sizeof(c));
9697 9753 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9698 9754 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9699 9755 V_FW_PORT_CMD_PORTID(port));
9700 9756 c.action_to_len16 = cpu_to_be32(
9701 9757 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
9702 9758 FW_LEN16(c));
9703 9759 ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
9704 9760 if (ret)
9705 9761 return ret;
9706 9762
9707 9763 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9708 9764 if (ret < 0)
9709 9765 return ret;
9710 9766
9711 9767 pi->viid = ret;
9712 9768 pi->tx_chan = port;
9713 9769 pi->lport = port;
9714 9770 pi->rss_size = rss_size;
9715 9771 pi->rx_chan = t4_get_tp_e2c_map(pi->adapter, port);
9716 9772
9717 9773 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
9718 9774 pi->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
9719 9775 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
9720 9776 pi->port_type = G_FW_PORT_CMD_PTYPE(ret);
9721 9777 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9722 9778
9723 9779 init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
9724 9780 be16_to_cpu(c.u.info.acap));
9725 9781 return 0;
9726 9782 }
9727 9783
9728 9784 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9729 9785 {
9730 9786 u8 addr[6];
9731 9787 int ret, i, j = 0;
9732 9788
9733 9789 for_each_port(adap, i) {
9734 9790 struct port_info *pi = adap2pinfo(adap, i);
9735 9791
9736 9792 while ((adap->params.portvec & (1 << j)) == 0)
9737 9793 j++;
9738 9794
9739 9795 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9740 9796 if (ret)
9741 9797 return ret;
9742 9798
9743 9799 t4_os_set_hw_addr(adap, i, addr);
9744 9800 j++;
9745 9801 }
9746 9802 return 0;
9747 9803 }
9748 9804
9749 9805 /**
9750 9806 * t4_read_cimq_cfg - read CIM queue configuration
9751 9807 * @adap: the adapter
9752 9808 * @base: holds the queue base addresses in bytes
9753 9809 * @size: holds the queue sizes in bytes
9754 9810 * @thres: holds the queue full thresholds in bytes
9755 9811 *
9756 9812 * Returns the current configuration of the CIM queues, starting with
9757 9813 * the IBQs, then the OBQs.
9758 9814 */
9759 9815 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9760 9816 {
9761 9817 unsigned int i, v;
9762 9818 int cim_num_obq = is_t4(adap->params.chip) ?
9763 9819 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9764 9820
9765 9821 for (i = 0; i < CIM_NUM_IBQ; i++) {
9766 9822 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9767 9823 V_QUENUMSELECT(i));
9768 9824 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9769 9825 /* value is in 256-byte units */
9770 9826 *base++ = G_CIMQBASE(v) * 256;
9771 9827 *size++ = G_CIMQSIZE(v) * 256;
9772 9828 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9773 9829 }
9774 9830 for (i = 0; i < cim_num_obq; i++) {
9775 9831 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9776 9832 V_QUENUMSELECT(i));
9777 9833 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9778 9834 /* value is in 256-byte units */
9779 9835 *base++ = G_CIMQBASE(v) * 256;
9780 9836 *size++ = G_CIMQSIZE(v) * 256;
9781 9837 }
9782 9838 }
9783 9839
9784 9840 /**
9785 9841 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9786 9842 * @adap: the adapter
9787 9843 * @qid: the queue index
9788 9844 * @data: where to store the queue contents
9789 9845 * @n: capacity of @data in 32-bit words
9790 9846 *
9791 9847 * Reads the contents of the selected CIM queue starting at address 0 up
9792 9848 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9793 9849 * error and the number of 32-bit words actually read on success.
9794 9850 */
9795 9851 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9796 9852 {
9797 9853 int i, err, attempts;
9798 9854 unsigned int addr;
9799 9855 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9800 9856
9801 9857 if (qid > 5 || (n & 3))
9802 9858 return -EINVAL;
9803 9859
9804 9860 addr = qid * nwords;
9805 9861 if (n > nwords)
9806 9862 n = nwords;
9807 9863
9808 9864 /* It might take 3-10ms before the IBQ debug read access is allowed.
9809 9865 * Wait for 1 Sec with a delay of 1 usec.
9810 9866 */
9811 9867 attempts = 1000000;
9812 9868
9813 9869 for (i = 0; i < n; i++, addr++) {
9814 9870 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9815 9871 F_IBQDBGEN);
9816 9872 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9817 9873 attempts, 1);
9818 9874 if (err)
9819 9875 return err;
9820 9876 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9821 9877 }
9822 9878 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9823 9879 return i;
9824 9880 }
9825 9881
9826 9882 /**
9827 9883 * t4_read_cim_obq - read the contents of a CIM outbound queue
9828 9884 * @adap: the adapter
9829 9885 * @qid: the queue index
9830 9886 * @data: where to store the queue contents
9831 9887 * @n: capacity of @data in 32-bit words
9832 9888 *
9833 9889 * Reads the contents of the selected CIM queue starting at address 0 up
9834 9890 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9835 9891 * error and the number of 32-bit words actually read on success.
9836 9892 */
9837 9893 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9838 9894 {
9839 9895 int i, err;
9840 9896 unsigned int addr, v, nwords;
9841 9897 int cim_num_obq = is_t4(adap->params.chip) ?
9842 9898 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9843 9899
9844 9900 if ((qid > (cim_num_obq - 1)) || (n & 3))
9845 9901 return -EINVAL;
9846 9902
9847 9903 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9848 9904 V_QUENUMSELECT(qid));
9849 9905 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9850 9906
9851 9907 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
9852 9908 nwords = G_CIMQSIZE(v) * 64; /* same */
9853 9909 if (n > nwords)
9854 9910 n = nwords;
9855 9911
9856 9912 for (i = 0; i < n; i++, addr++) {
9857 9913 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9858 9914 F_OBQDBGEN);
9859 9915 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9860 9916 2, 1);
9861 9917 if (err)
9862 9918 return err;
9863 9919 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9864 9920 }
9865 9921 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9866 9922 return i;
9867 9923 }
9868 9924
9869 9925 /**
9870 9926 * t4_cim_read - read a block from CIM internal address space
9871 9927 * @adap: the adapter
9872 9928 * @addr: the start address within the CIM address space
9873 9929 * @n: number of words to read
9874 9930 * @valp: where to store the result
9875 9931 *
9876 9932 * Reads a block of 4-byte words from the CIM intenal address space.
9877 9933 */
9878 9934 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9879 9935 unsigned int *valp)
9880 9936 {
9881 9937 int ret = 0;
9882 9938
9883 9939 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9884 9940 return -EBUSY;
9885 9941
9886 9942 for ( ; !ret && n--; addr += 4) {
9887 9943 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9888 9944 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9889 9945 0, 5, 2);
9890 9946 if (!ret)
9891 9947 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9892 9948 }
9893 9949 return ret;
9894 9950 }
9895 9951
9896 9952 /**
9897 9953 * t4_cim_write - write a block into CIM internal address space
9898 9954 * @adap: the adapter
9899 9955 * @addr: the start address within the CIM address space
9900 9956 * @n: number of words to write
9901 9957 * @valp: set of values to write
9902 9958 *
9903 9959 * Writes a block of 4-byte words into the CIM intenal address space.
9904 9960 */
9905 9961 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9906 9962 const unsigned int *valp)
9907 9963 {
9908 9964 int ret = 0;
9909 9965
9910 9966 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9911 9967 return -EBUSY;
9912 9968
9913 9969 for ( ; !ret && n--; addr += 4) {
9914 9970 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9915 9971 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9916 9972 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9917 9973 0, 5, 2);
9918 9974 }
9919 9975 return ret;
9920 9976 }
9921 9977
9922 9978 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9923 9979 unsigned int val)
9924 9980 {
9925 9981 return t4_cim_write(adap, addr, 1, &val);
9926 9982 }
9927 9983
9928 9984 /**
9929 9985 * t4_cim_read_la - read CIM LA capture buffer
9930 9986 * @adap: the adapter
9931 9987 * @la_buf: where to store the LA data
9932 9988 * @wrptr: the HW write pointer within the capture buffer
9933 9989 *
9934 9990 * Reads the contents of the CIM LA buffer with the most recent entry at
9935 9991 * the end of the returned data and with the entry at @wrptr first.
9936 9992 * We try to leave the LA in the running state we find it in.
9937 9993 */
9938 9994 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9939 9995 {
9940 9996 int i, ret;
9941 9997 unsigned int cfg, val, idx;
9942 9998
9943 9999 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9944 10000 if (ret)
9945 10001 return ret;
9946 10002
9947 10003 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
9948 10004 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9949 10005 if (ret)
9950 10006 return ret;
9951 10007 }
9952 10008
9953 10009 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9954 10010 if (ret)
9955 10011 goto restart;
9956 10012
9957 10013 idx = G_UPDBGLAWRPTR(val);
9958 10014 if (wrptr)
9959 10015 *wrptr = idx;
9960 10016
9961 10017 for (i = 0; i < adap->params.cim_la_size; i++) {
9962 10018 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9963 10019 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9964 10020 if (ret)
9965 10021 break;
9966 10022 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9967 10023 if (ret)
9968 10024 break;
9969 10025 if (val & F_UPDBGLARDEN) {
9970 10026 ret = -ETIMEDOUT;
9971 10027 break;
9972 10028 }
9973 10029 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9974 10030 if (ret)
9975 10031 break;
9976 10032
9977 10033 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9978 10034 idx = (idx + 1) & M_UPDBGLARDPTR;
9979 10035 /*
9980 10036 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9981 10037 * identify the 32-bit portion of the full 312-bit data
9982 10038 */
9983 10039 if (is_t6(adap->params.chip))
9984 10040 while ((idx & 0xf) > 9)
9985 10041 idx = (idx + 1) % M_UPDBGLARDPTR;
9986 10042 }
9987 10043 restart:
9988 10044 if (cfg & F_UPDBGLAEN) {
9989 10045 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9990 10046 cfg & ~F_UPDBGLARDEN);
9991 10047 if (!ret)
9992 10048 ret = r;
9993 10049 }
9994 10050 return ret;
9995 10051 }
9996 10052
9997 10053 /**
9998 10054 * t4_tp_read_la - read TP LA capture buffer
9999 10055 * @adap: the adapter
10000 10056 * @la_buf: where to store the LA data
10001 10057 * @wrptr: the HW write pointer within the capture buffer
10002 10058 *
10003 10059 * Reads the contents of the TP LA buffer with the most recent entry at
10004 10060 * the end of the returned data and with the entry at @wrptr first.
10005 10061 * We leave the LA in the running state we find it in.
10006 10062 */
10007 10063 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10008 10064 {
10009 10065 bool last_incomplete;
10010 10066 unsigned int i, cfg, val, idx;
10011 10067
10012 10068 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
10013 10069 if (cfg & F_DBGLAENABLE) /* freeze LA */
10014 10070 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10015 10071 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
10016 10072
10017 10073 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
10018 10074 idx = G_DBGLAWPTR(val);
10019 10075 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
10020 10076 if (last_incomplete)
10021 10077 idx = (idx + 1) & M_DBGLARPTR;
10022 10078 if (wrptr)
10023 10079 *wrptr = idx;
10024 10080
10025 10081 val &= 0xffff;
10026 10082 val &= ~V_DBGLARPTR(M_DBGLARPTR);
10027 10083 val |= adap->params.tp.la_mask;
10028 10084
10029 10085 for (i = 0; i < TPLA_SIZE; i++) {
10030 10086 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
10031 10087 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
10032 10088 idx = (idx + 1) & M_DBGLARPTR;
10033 10089 }
10034 10090
10035 10091 /* Wipe out last entry if it isn't valid */
10036 10092 if (last_incomplete)
10037 10093 la_buf[TPLA_SIZE - 1] = ~0ULL;
10038 10094
10039 10095 if (cfg & F_DBGLAENABLE) /* restore running state */
10040 10096 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10041 10097 cfg | adap->params.tp.la_mask);
10042 10098 }
10043 10099
10044 10100 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10045 10101 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10046 10102 * state for more than the Warning Threshold then we'll issue a warning about
10047 10103 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10048 10104 * appears to be hung every Warning Repeat second till the situation clears.
10049 10105 * If the situation clears, we'll note that as well.
10050 10106 */
10051 10107 #define SGE_IDMA_WARN_THRESH 1
10052 10108 #define SGE_IDMA_WARN_REPEAT 300
10053 10109
10054 10110 /**
10055 10111 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10056 10112 * @adapter: the adapter
10057 10113 * @idma: the adapter IDMA Monitor state
10058 10114 *
10059 10115 * Initialize the state of an SGE Ingress DMA Monitor.
10060 10116 */
10061 10117 void t4_idma_monitor_init(struct adapter *adapter,
10062 10118 struct sge_idma_monitor_state *idma)
10063 10119 {
10064 10120 /* Initialize the state variables for detecting an SGE Ingress DMA
10065 10121 * hang. The SGE has internal counters which count up on each clock
10066 10122 * tick whenever the SGE finds its Ingress DMA State Engines in the
10067 10123 * same state they were on the previous clock tick. The clock used is
10068 10124 * the Core Clock so we have a limit on the maximum "time" they can
10069 10125 * record; typically a very small number of seconds. For instance,
10070 10126 * with a 600MHz Core Clock, we can only count up to a bit more than
10071 10127 * 7s. So we'll synthesize a larger counter in order to not run the
10072 10128 * risk of having the "timers" overflow and give us the flexibility to
10073 10129 * maintain a Hung SGE State Machine of our own which operates across
10074 10130 * a longer time frame.
10075 10131 */
10076 10132 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10077 10133 idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
10078 10134 }
10079 10135
10080 10136 /**
10081 10137 * t4_idma_monitor - monitor SGE Ingress DMA state
10082 10138 * @adapter: the adapter
10083 10139 * @idma: the adapter IDMA Monitor state
10084 10140 * @hz: number of ticks/second
10085 10141 * @ticks: number of ticks since the last IDMA Monitor call
10086 10142 */
10087 10143 void t4_idma_monitor(struct adapter *adapter,
10088 10144 struct sge_idma_monitor_state *idma,
10089 10145 int hz, int ticks)
10090 10146 {
10091 10147 int i, idma_same_state_cnt[2];
10092 10148
10093 10149 /* Read the SGE Debug Ingress DMA Same State Count registers. These
10094 10150 * are counters inside the SGE which count up on each clock when the
10095 10151 * SGE finds its Ingress DMA State Engines in the same states they
10096 10152 * were in the previous clock. The counters will peg out at
10097 10153 * 0xffffffff without wrapping around so once they pass the 1s
10098 10154 * threshold they'll stay above that till the IDMA state changes.
10099 10155 */
10100 10156 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
10101 10157 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
10102 10158 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10103 10159
10104 10160 for (i = 0; i < 2; i++) {
10105 10161 u32 debug0, debug11;
10106 10162
10107 10163 /* If the Ingress DMA Same State Counter ("timer") is less
10108 10164 * than 1s, then we can reset our synthesized Stall Timer and
10109 10165 * continue. If we have previously emitted warnings about a
10110 10166 * potential stalled Ingress Queue, issue a note indicating
10111 10167 * that the Ingress Queue has resumed forward progress.
10112 10168 */
10113 10169 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10114 10170 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
10115 10171 CH_WARN(adapter, "SGE idma%d, queue %u, "
10116 10172 "resumed after %d seconds\n",
10117 10173 i, idma->idma_qid[i],
10118 10174 idma->idma_stalled[i]/hz);
10119 10175 idma->idma_stalled[i] = 0;
10120 10176 continue;
10121 10177 }
10122 10178
10123 10179 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10124 10180 * domain. The first time we get here it'll be because we
10125 10181 * passed the 1s Threshold; each additional time it'll be
10126 10182 * because the RX Timer Callback is being fired on its regular
10127 10183 * schedule.
10128 10184 *
10129 10185 * If the stall is below our Potential Hung Ingress Queue
10130 10186 * Warning Threshold, continue.
10131 10187 */
10132 10188 if (idma->idma_stalled[i] == 0) {
10133 10189 idma->idma_stalled[i] = hz;
10134 10190 idma->idma_warn[i] = 0;
10135 10191 } else {
10136 10192 idma->idma_stalled[i] += ticks;
10137 10193 idma->idma_warn[i] -= ticks;
10138 10194 }
10139 10195
10140 10196 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
10141 10197 continue;
10142 10198
10143 10199 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10144 10200 */
10145 10201 if (idma->idma_warn[i] > 0)
10146 10202 continue;
10147 10203 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
10148 10204
10149 10205 /* Read and save the SGE IDMA State and Queue ID information.
10150 10206 * We do this every time in case it changes across time ...
10151 10207 * can't be too careful ...
10152 10208 */
10153 10209 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
10154 10210 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10155 10211 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10156 10212
10157 10213 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
10158 10214 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10159 10215 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10160 10216
10161 10217 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
10162 10218 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10163 10219 i, idma->idma_qid[i], idma->idma_state[i],
10164 10220 idma->idma_stalled[i]/hz,
10165 10221 debug0, debug11);
10166 10222 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10167 10223 }
10168 10224 }
10169 10225
10170 10226 /**
10171 10227 * t4_set_vf_mac - Set MAC address for the specified VF
10172 10228 * @adapter: The adapter
10173 10229 * @vf: one of the VFs instantiated by the specified PF
10174 10230 * @naddr: the number of MAC addresses
10175 10231 * @addr: the MAC address(es) to be set to the specified VF
10176 10232 */
10177 10233 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10178 10234 unsigned int naddr, u8 *addr)
10179 10235 {
10180 10236 struct fw_acl_mac_cmd cmd;
10181 10237
10182 10238 memset(&cmd, 0, sizeof(cmd));
10183 10239 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
10184 10240 F_FW_CMD_REQUEST |
10185 10241 F_FW_CMD_WRITE |
10186 10242 V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
10187 10243 V_FW_ACL_MAC_CMD_VFN(vf));
10188 10244
10189 10245 /* Note: Do not enable the ACL */
10190 10246 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10191 10247 cmd.nmac = naddr;
10192 10248
10193 10249 switch (adapter->pf) {
10194 10250 case 3:
10195 10251 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10196 10252 break;
10197 10253 case 2:
10198 10254 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10199 10255 break;
10200 10256 case 1:
10201 10257 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10202 10258 break;
10203 10259 case 0:
10204 10260 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10205 10261 break;
10206 10262 }
10207 10263
10208 10264 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10209 10265 }
10210 10266
10211 10267 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
10212 10268 * functions
10213 10269 */
10214 10270
10215 10271 /**
10216 10272 * t4_read_pace_tbl - read the pace table
10217 10273 * @adap: the adapter
10218 10274 * @pace_vals: holds the returned values
10219 10275 *
10220 10276 * Returns the values of TP's pace table in microseconds.
10221 10277 */
10222 10278 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10223 10279 {
10224 10280 unsigned int i, v;
10225 10281
10226 10282 for (i = 0; i < NTX_SCHED; i++) {
10227 10283 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
10228 10284 v = t4_read_reg(adap, A_TP_PACE_TABLE);
10229 10285 pace_vals[i] = dack_ticks_to_usec(adap, v);
10230 10286 }
10231 10287 }
10232 10288
10233 10289 /**
10234 10290 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10235 10291 * @adap: the adapter
10236 10292 * @sched: the scheduler index
10237 10293 * @kbps: the byte rate in Kbps
10238 10294 * @ipg: the interpacket delay in tenths of nanoseconds
10239 10295 * @sleep_ok: if true we may sleep while awaiting command completion
10240 10296 *
10241 10297 * Return the current configuration of a HW Tx scheduler.
10242 10298 */
10243 10299 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
10244 10300 unsigned int *ipg, bool sleep_ok)
10245 10301 {
10246 10302 unsigned int v, addr, bpt, cpt;
10247 10303
10248 10304 if (kbps) {
10249 10305 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
10250 10306 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10251 10307 if (sched & 1)
10252 10308 v >>= 16;
10253 10309 bpt = (v >> 8) & 0xff;
10254 10310 cpt = v & 0xff;
10255 10311 if (!cpt)
10256 10312 *kbps = 0; /* scheduler disabled */
10257 10313 else {
10258 10314 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10259 10315 *kbps = (v * bpt) / 125;
10260 10316 }
10261 10317 }
10262 10318 if (ipg) {
10263 10319 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
10264 10320 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10265 10321 if (sched & 1)
10266 10322 v >>= 16;
10267 10323 v &= 0xffff;
10268 10324 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10269 10325 }
10270 10326 }
10271 10327
10272 10328 /**
10273 10329 * t4_load_cfg - download config file
10274 10330 * @adap: the adapter
10275 10331 * @cfg_data: the cfg text file to write
10276 10332 * @size: text file size
10277 10333 *
10278 10334 * Write the supplied config text file to the card's serial flash.
10279 10335 */
10280 10336 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10281 10337 {
10282 10338 int ret, i, n, cfg_addr;
10283 10339 unsigned int addr;
10284 10340 unsigned int flash_cfg_start_sec;
10285 10341 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10286 10342
10287 10343 cfg_addr = t4_flash_cfg_addr(adap);
10288 10344 if (cfg_addr < 0)
10289 10345 return cfg_addr;
10290 10346
10291 10347 addr = cfg_addr;
10292 10348 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10293 10349
10294 10350 if (size > FLASH_CFG_MAX_SIZE) {
10295 10351 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
10296 10352 FLASH_CFG_MAX_SIZE);
10297 10353 return -EFBIG;
10298 10354 }
10299 10355
10300 10356 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10301 10357 sf_sec_size);
10302 10358 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10303 10359 flash_cfg_start_sec + i - 1);
10304 10360 /*
10305 10361 * If size == 0 then we're simply erasing the FLASH sectors associated
10306 10362 * with the on-adapter Firmware Configuration File.
10307 10363 */
10308 10364 if (ret || size == 0)
10309 10365 goto out;
10310 10366
10311 10367 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10312 10368 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10313 10369 if ( (size - i) < SF_PAGE_SIZE)
10314 10370 n = size - i;
10315 10371 else
10316 10372 n = SF_PAGE_SIZE;
10317 10373 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
10318 10374 if (ret)
10319 10375 goto out;
10320 10376
10321 10377 addr += SF_PAGE_SIZE;
10322 10378 cfg_data += SF_PAGE_SIZE;
10323 10379 }
10324 10380
10325 10381 out:
10326 10382 if (ret)
10327 10383 CH_ERR(adap, "config file %s failed %d\n",
10328 10384 (size == 0 ? "clear" : "download"), ret);
10329 10385 return ret;
10330 10386 }
10331 10387
10332 10388 /**
10333 10389 * t5_fw_init_extern_mem - initialize the external memory
10334 10390 * @adap: the adapter
10335 10391 *
10336 10392 * Initializes the external memory on T5.
10337 10393 */
10338 10394 int t5_fw_init_extern_mem(struct adapter *adap)
10339 10395 {
10340 10396 u32 params[1], val[1];
10341 10397 int ret;
10342 10398
10343 10399 if (!is_t5(adap->params.chip))
10344 10400 return 0;
10345 10401
10346 10402 val[0] = 0xff; /* Initialize all MCs */
10347 10403 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10348 10404 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10349 10405 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10350 10406 FW_CMD_MAX_TIMEOUT);
10351 10407
10352 10408 return ret;
10353 10409 }
10354 10410
10355 10411 /* BIOS boot headers */
10356 10412 typedef struct pci_expansion_rom_header {
10357 10413 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
10358 10414 u8 reserved[22]; /* Reserved per processor Architecture data */
10359 10415 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
10360 10416 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10361 10417
10362 10418 /* Legacy PCI Expansion ROM Header */
10363 10419 typedef struct legacy_pci_expansion_rom_header {
10364 10420 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
10365 10421 u8 size512; /* Current Image Size in units of 512 bytes */
10366 10422 u8 initentry_point[4];
10367 10423 u8 cksum; /* Checksum computed on the entire Image */
10368 10424 u8 reserved[16]; /* Reserved */
10369 10425 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
10370 10426 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10371 10427
10372 10428 /* EFI PCI Expansion ROM Header */
10373 10429 typedef struct efi_pci_expansion_rom_header {
10374 10430 u8 signature[2]; // ROM signature. The value 0xaa55
10375 10431 u8 initialization_size[2]; /* Units 512. Includes this header */
10376 10432 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10377 10433 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
10378 10434 u8 efi_machine_type[2]; /* Machine type from EFI image header */
10379 10435 u8 compression_type[2]; /* Compression type. */
10380 10436 /*
10381 10437 * Compression type definition
10382 10438 * 0x0: uncompressed
10383 10439 * 0x1: Compressed
10384 10440 * 0x2-0xFFFF: Reserved
10385 10441 */
10386 10442 u8 reserved[8]; /* Reserved */
10387 10443 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
10388 10444 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
10389 10445 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10390 10446
10391 10447 /* PCI Data Structure Format */
10392 10448 typedef struct pcir_data_structure { /* PCI Data Structure */
10393 10449 u8 signature[4]; /* Signature. The string "PCIR" */
10394 10450 u8 vendor_id[2]; /* Vendor Identification */
10395 10451 u8 device_id[2]; /* Device Identification */
10396 10452 u8 vital_product[2]; /* Pointer to Vital Product Data */
10397 10453 u8 length[2]; /* PCIR Data Structure Length */
10398 10454 u8 revision; /* PCIR Data Structure Revision */
10399 10455 u8 class_code[3]; /* Class Code */
10400 10456 u8 image_length[2]; /* Image Length. Multiple of 512B */
10401 10457 u8 code_revision[2]; /* Revision Level of Code/Data */
10402 10458 u8 code_type; /* Code Type. */
10403 10459 /*
10404 10460 * PCI Expansion ROM Code Types
10405 10461 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10406 10462 * 0x01: Open Firmware standard for PCI. FCODE
10407 10463 * 0x02: Hewlett-Packard PA RISC. HP reserved
10408 10464 * 0x03: EFI Image. EFI
10409 10465 * 0x04-0xFF: Reserved.
10410 10466 */
10411 10467 u8 indicator; /* Indicator. Identifies the last image in the ROM */
10412 10468 u8 reserved[2]; /* Reserved */
10413 10469 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10414 10470
10415 10471 /* BOOT constants */
10416 10472 enum {
10417 10473 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10418 10474 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
10419 10475 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
10420 10476 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10421 10477 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
10422 10478 VENDOR_ID = 0x1425, /* Vendor ID */
10423 10479 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10424 10480 };
10425 10481
10426 10482 /*
10427 10483 * modify_device_id - Modifies the device ID of the Boot BIOS image
10428 10484 * @adatper: the device ID to write.
10429 10485 * @boot_data: the boot image to modify.
10430 10486 *
10431 10487 * Write the supplied device ID to the boot BIOS image.
10432 10488 */
10433 10489 static void modify_device_id(int device_id, u8 *boot_data)
10434 10490 {
10435 10491 legacy_pci_exp_rom_header_t *header;
10436 10492 pcir_data_t *pcir_header;
10437 10493 u32 cur_header = 0;
10438 10494
10439 10495 /*
10440 10496 * Loop through all chained images and change the device ID's
10441 10497 */
10442 10498 while (1) {
10443 10499 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10444 10500 pcir_header = (pcir_data_t *) &boot_data[cur_header +
10445 10501 le16_to_cpu(*(u16*)header->pcir_offset)];
10446 10502
10447 10503 /*
10448 10504 * Only modify the Device ID if code type is Legacy or HP.
10449 10505 * 0x00: Okay to modify
10450 10506 * 0x01: FCODE. Do not be modify
10451 10507 * 0x03: Okay to modify
10452 10508 * 0x04-0xFF: Do not modify
10453 10509 */
10454 10510 if (pcir_header->code_type == 0x00) {
10455 10511 u8 csum = 0;
10456 10512 int i;
10457 10513
10458 10514 /*
10459 10515 * Modify Device ID to match current adatper
10460 10516 */
10461 10517 *(u16*) pcir_header->device_id = device_id;
10462 10518
10463 10519 /*
10464 10520 * Set checksum temporarily to 0.
10465 10521 * We will recalculate it later.
10466 10522 */
10467 10523 header->cksum = 0x0;
10468 10524
10469 10525 /*
10470 10526 * Calculate and update checksum
10471 10527 */
10472 10528 for (i = 0; i < (header->size512 * 512); i++)
10473 10529 csum += (u8)boot_data[cur_header + i];
10474 10530
10475 10531 /*
10476 10532 * Invert summed value to create the checksum
10477 10533 * Writing new checksum value directly to the boot data
10478 10534 */
10479 10535 boot_data[cur_header + 7] = -csum;
10480 10536
10481 10537 } else if (pcir_header->code_type == 0x03) {
10482 10538
10483 10539 /*
10484 10540 * Modify Device ID to match current adatper
10485 10541 */
10486 10542 *(u16*) pcir_header->device_id = device_id;
10487 10543
10488 10544 }
10489 10545
10490 10546
10491 10547 /*
10492 10548 * Check indicator element to identify if this is the last
10493 10549 * image in the ROM.
10494 10550 */
10495 10551 if (pcir_header->indicator & 0x80)
10496 10552 break;
10497 10553
10498 10554 /*
10499 10555 * Move header pointer up to the next image in the ROM.
10500 10556 */
10501 10557 cur_header += header->size512 * 512;
10502 10558 }
10503 10559 }
10504 10560
10505 10561 #ifdef CHELSIO_T4_DIAGS
10506 10562 /*
10507 10563 * t4_earse_sf - Erase entire serial Flash region
10508 10564 * @adapter: the adapter
10509 10565 *
10510 10566 * Clears the entire serial flash region.
10511 10567 */
10512 10568 int t4_erase_sf(struct adapter *adap)
10513 10569 {
10514 10570 unsigned int nsectors;
10515 10571 int ret;
10516 10572
10517 10573 nsectors = FLASH_END_SEC;
10518 10574 if (nsectors > adap->params.sf_nsec)
10519 10575 nsectors = adap->params.sf_nsec;
10520 10576
10521 10577 // Erase all sectors of flash before and including the FW.
10522 10578 // Flash layout is in t4_hw.h.
10523 10579 ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
10524 10580 if (ret)
10525 10581 CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
10526 10582 return ret;
10527 10583 }
10528 10584 #endif
10529 10585
10530 10586 /*
10531 10587 * t4_load_boot - download boot flash
10532 10588 * @adapter: the adapter
10533 10589 * @boot_data: the boot image to write
10534 10590 * @boot_addr: offset in flash to write boot_data
10535 10591 * @size: image size
10536 10592 *
10537 10593 * Write the supplied boot image to the card's serial flash.
10538 10594 * The boot image has the following sections: a 28-byte header and the
10539 10595 * boot image.
10540 10596 */
10541 10597 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10542 10598 unsigned int boot_addr, unsigned int size)
10543 10599 {
10544 10600 pci_exp_rom_header_t *header;
10545 10601 int pcir_offset ;
10546 10602 pcir_data_t *pcir_header;
10547 10603 int ret, addr;
10548 10604 uint16_t device_id;
10549 10605 unsigned int i;
10550 10606 unsigned int boot_sector = (boot_addr * 1024 );
10551 10607 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10552 10608
10553 10609 /*
10554 10610 * Make sure the boot image does not encroach on the firmware region
10555 10611 */
10556 10612 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10557 10613 CH_ERR(adap, "boot image encroaching on firmware region\n");
10558 10614 return -EFBIG;
10559 10615 }
10560 10616
10561 10617 /*
10562 10618 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10563 10619 * and Boot configuration data sections. These 3 boot sections span
10564 10620 * sectors 0 to 7 in flash and live right before the FW image location.
10565 10621 */
10566 10622 i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10567 10623 sf_sec_size);
10568 10624 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10569 10625 (boot_sector >> 16) + i - 1);
10570 10626
10571 10627 /*
10572 10628 * If size == 0 then we're simply erasing the FLASH sectors associated
10573 10629 * with the on-adapter option ROM file
10574 10630 */
10575 10631 if (ret || (size == 0))
10576 10632 goto out;
10577 10633
10578 10634 /* Get boot header */
10579 10635 header = (pci_exp_rom_header_t *)boot_data;
10580 10636 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10581 10637 /* PCIR Data Structure */
10582 10638 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10583 10639
10584 10640 /*
10585 10641 * Perform some primitive sanity testing to avoid accidentally
10586 10642 * writing garbage over the boot sectors. We ought to check for
10587 10643 * more but it's not worth it for now ...
10588 10644 */
10589 10645 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10590 10646 CH_ERR(adap, "boot image too small/large\n");
10591 10647 return -EFBIG;
10592 10648 }
10593 10649
10594 10650 #ifndef CHELSIO_T4_DIAGS
10595 10651 /*
10596 10652 * Check BOOT ROM header signature
10597 10653 */
10598 10654 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10599 10655 CH_ERR(adap, "Boot image missing signature\n");
10600 10656 return -EINVAL;
10601 10657 }
10602 10658
10603 10659 /*
10604 10660 * Check PCI header signature
10605 10661 */
10606 10662 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10607 10663 CH_ERR(adap, "PCI header missing signature\n");
10608 10664 return -EINVAL;
10609 10665 }
10610 10666
10611 10667 /*
10612 10668 * Check Vendor ID matches Chelsio ID
10613 10669 */
10614 10670 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10615 10671 CH_ERR(adap, "Vendor ID missing signature\n");
10616 10672 return -EINVAL;
10617 10673 }
10618 10674 #endif
10619 10675
10620 10676 /*
10621 10677 * Retrieve adapter's device ID
10622 10678 */
10623 10679 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10624 10680 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10625 10681 device_id = device_id & 0xf0ff;
10626 10682
10627 10683 /*
10628 10684 * Check PCIE Device ID
10629 10685 */
10630 10686 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10631 10687 /*
10632 10688 * Change the device ID in the Boot BIOS image to match
10633 10689 * the Device ID of the current adapter.
10634 10690 */
10635 10691 modify_device_id(device_id, boot_data);
10636 10692 }
10637 10693
10638 10694 /*
10639 10695 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10640 10696 * we finish copying the rest of the boot image. This will ensure
10641 10697 * that the BIOS boot header will only be written if the boot image
10642 10698 * was written in full.
10643 10699 */
10644 10700 addr = boot_sector;
10645 10701 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10646 10702 addr += SF_PAGE_SIZE;
10647 10703 boot_data += SF_PAGE_SIZE;
10648 10704 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10649 10705 if (ret)
10650 10706 goto out;
10651 10707 }
10652 10708
10653 10709 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10654 10710 (const u8 *)header, 0);
10655 10711
10656 10712 out:
10657 10713 if (ret)
10658 10714 CH_ERR(adap, "boot image download failed, error %d\n", ret);
10659 10715 return ret;
10660 10716 }
10661 10717
10662 10718 /*
10663 10719 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10664 10720 * @adapter: the adapter
10665 10721 *
10666 10722 * Return the address within the flash where the OptionROM Configuration
10667 10723 * is stored, or an error if the device FLASH is too small to contain
10668 10724 * a OptionROM Configuration.
10669 10725 */
10670 10726 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10671 10727 {
10672 10728 /*
10673 10729 * If the device FLASH isn't large enough to hold a Firmware
10674 10730 * Configuration File, return an error.
10675 10731 */
10676 10732 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10677 10733 return -ENOSPC;
10678 10734
10679 10735 return FLASH_BOOTCFG_START;
10680 10736 }
10681 10737
10682 10738 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10683 10739 {
10684 10740 int ret, i, n, cfg_addr;
10685 10741 unsigned int addr;
10686 10742 unsigned int flash_cfg_start_sec;
10687 10743 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10688 10744
10689 10745 cfg_addr = t4_flash_bootcfg_addr(adap);
10690 10746 if (cfg_addr < 0)
10691 10747 return cfg_addr;
10692 10748
10693 10749 addr = cfg_addr;
10694 10750 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10695 10751
10696 10752 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10697 10753 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10698 10754 FLASH_BOOTCFG_MAX_SIZE);
10699 10755 return -EFBIG;
10700 10756 }
10701 10757
10702 10758 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10703 10759 sf_sec_size);
10704 10760 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10705 10761 flash_cfg_start_sec + i - 1);
10706 10762
10707 10763 /*
10708 10764 * If size == 0 then we're simply erasing the FLASH sectors associated
10709 10765 * with the on-adapter OptionROM Configuration File.
10710 10766 */
10711 10767 if (ret || size == 0)
10712 10768 goto out;
10713 10769
10714 10770 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10715 10771 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10716 10772 if ( (size - i) < SF_PAGE_SIZE)
10717 10773 n = size - i;
10718 10774 else
10719 10775 n = SF_PAGE_SIZE;
10720 10776 ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10721 10777 if (ret)
10722 10778 goto out;
10723 10779
10724 10780 addr += SF_PAGE_SIZE;
10725 10781 cfg_data += SF_PAGE_SIZE;
10726 10782 }
10727 10783
10728 10784 out:
10729 10785 if (ret)
10730 10786 CH_ERR(adap, "boot config data %s failed %d\n",
10731 10787 (size == 0 ? "clear" : "download"), ret);
10732 10788 return ret;
10733 10789 }
10734 10790
10735 10791 /**
10736 10792 * t4_set_filter_mode - configure the optional components of filter tuples
10737 10793 * @adap: the adapter
10738 10794 * @mode_map: a bitmap selcting which optional filter components to enable
10739 10795 * @sleep_ok: if true we may sleep while awaiting command completion
10740 10796 *
10741 10797 * Sets the filter mode by selecting the optional components to enable
10742 10798 * in filter tuples. Returns 0 on success and a negative error if the
10743 10799 * requested mode needs more bits than are available for optional
10744 10800 * components.
10745 10801 */
10746 10802 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10747 10803 bool sleep_ok)
10748 10804 {
10749 10805 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10750 10806
10751 10807 int i, nbits = 0;
10752 10808
10753 10809 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10754 10810 if (mode_map & (1 << i))
10755 10811 nbits += width[i];
10756 10812 if (nbits > FILTER_OPT_LEN)
10757 10813 return -EINVAL;
10758 10814
10759 10815 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10760 10816
10761 10817 return 0;
10762 10818 }
10763 10819
10764 10820 /**
10765 10821 * t4_clr_port_stats - clear port statistics
10766 10822 * @adap: the adapter
10767 10823 * @idx: the port index
10768 10824 *
10769 10825 * Clear HW statistics for the given port.
10770 10826 */
10771 10827 void t4_clr_port_stats(struct adapter *adap, int idx)
10772 10828 {
10773 10829 unsigned int i;
10774 10830 u32 bgmap = t4_get_mps_bg_map(adap, idx);
10775 10831 u32 port_base_addr;
10776 10832
10777 10833 if (is_t4(adap->params.chip))
10778 10834 port_base_addr = PORT_BASE(idx);
10779 10835 else
10780 10836 port_base_addr = T5_PORT_BASE(idx);
10781 10837
10782 10838 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10783 10839 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10784 10840 t4_write_reg(adap, port_base_addr + i, 0);
10785 10841 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10786 10842 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10787 10843 t4_write_reg(adap, port_base_addr + i, 0);
10788 10844 for (i = 0; i < 4; i++)
10789 10845 if (bgmap & (1 << i)) {
10790 10846 t4_write_reg(adap,
10791 10847 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10792 10848 t4_write_reg(adap,
10793 10849 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10794 10850 }
10795 10851 }
10796 10852
10797 10853 /**
10798 10854 * t4_i2c_rd - read I2C data from adapter
10799 10855 * @adap: the adapter
10800 10856 * @port: Port number if per-port device; <0 if not
10801 10857 * @devid: per-port device ID or absolute device ID
10802 10858 * @offset: byte offset into device I2C space
10803 10859 * @len: byte length of I2C space data
10804 10860 * @buf: buffer in which to return I2C data
10805 10861 *
10806 10862 * Reads the I2C data from the indicated device and location.
10807 10863 */
10808 10864 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10809 10865 int port, unsigned int devid,
10810 10866 unsigned int offset, unsigned int len,
10811 10867 u8 *buf)
10812 10868 {
10813 10869 u32 ldst_addrspace;
10814 10870 struct fw_ldst_cmd ldst;
10815 10871 int ret;
10816 10872
10817 10873 if (port >= 4 ||
10818 10874 devid >= 256 ||
10819 10875 offset >= 256 ||
10820 10876 len > sizeof ldst.u.i2c.data)
10821 10877 return -EINVAL;
10822 10878
10823 10879 memset(&ldst, 0, sizeof ldst);
10824 10880 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10825 10881 ldst.op_to_addrspace =
10826 10882 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10827 10883 F_FW_CMD_REQUEST |
10828 10884 F_FW_CMD_READ |
10829 10885 ldst_addrspace);
10830 10886 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10831 10887 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10832 10888 ldst.u.i2c.did = devid;
10833 10889 ldst.u.i2c.boffset = offset;
10834 10890 ldst.u.i2c.blen = len;
10835 10891 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10836 10892 if (!ret)
10837 10893 memcpy(buf, ldst.u.i2c.data, len);
10838 10894 return ret;
10839 10895 }
10840 10896
10841 10897 /**
10842 10898 * t4_i2c_wr - write I2C data to adapter
10843 10899 * @adap: the adapter
10844 10900 * @port: Port number if per-port device; <0 if not
10845 10901 * @devid: per-port device ID or absolute device ID
10846 10902 * @offset: byte offset into device I2C space
10847 10903 * @len: byte length of I2C space data
10848 10904 * @buf: buffer containing new I2C data
10849 10905 *
10850 10906 * Write the I2C data to the indicated device and location.
10851 10907 */
10852 10908 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10853 10909 int port, unsigned int devid,
10854 10910 unsigned int offset, unsigned int len,
10855 10911 u8 *buf)
10856 10912 {
10857 10913 u32 ldst_addrspace;
10858 10914 struct fw_ldst_cmd ldst;
10859 10915
10860 10916 if (port >= 4 ||
10861 10917 devid >= 256 ||
10862 10918 offset >= 256 ||
10863 10919 len > sizeof ldst.u.i2c.data)
10864 10920 return -EINVAL;
10865 10921
10866 10922 memset(&ldst, 0, sizeof ldst);
10867 10923 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10868 10924 ldst.op_to_addrspace =
10869 10925 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10870 10926 F_FW_CMD_REQUEST |
10871 10927 F_FW_CMD_WRITE |
10872 10928 ldst_addrspace);
10873 10929 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10874 10930 ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10875 10931 ldst.u.i2c.did = devid;
10876 10932 ldst.u.i2c.boffset = offset;
10877 10933 ldst.u.i2c.blen = len;
10878 10934 memcpy(ldst.u.i2c.data, buf, len);
10879 10935 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10880 10936 }
10881 10937
10882 10938 /**
10883 10939 * t4_sge_ctxt_rd - read an SGE context through FW
10884 10940 * @adap: the adapter
10885 10941 * @mbox: mailbox to use for the FW command
10886 10942 * @cid: the context id
10887 10943 * @ctype: the context type
10888 10944 * @data: where to store the context data
10889 10945 *
10890 10946 * Issues a FW command through the given mailbox to read an SGE context.
10891 10947 */
10892 10948 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10893 10949 enum ctxt_type ctype, u32 *data)
10894 10950 {
10895 10951 int ret;
10896 10952 struct fw_ldst_cmd c;
10897 10953
10898 10954 if (ctype == CTXT_EGRESS)
10899 10955 ret = FW_LDST_ADDRSPC_SGE_EGRC;
10900 10956 else if (ctype == CTXT_INGRESS)
10901 10957 ret = FW_LDST_ADDRSPC_SGE_INGC;
10902 10958 else if (ctype == CTXT_FLM)
10903 10959 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10904 10960 else
10905 10961 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10906 10962
10907 10963 memset(&c, 0, sizeof(c));
10908 10964 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10909 10965 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10910 10966 V_FW_LDST_CMD_ADDRSPACE(ret));
10911 10967 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10912 10968 c.u.idctxt.physid = cpu_to_be32(cid);
10913 10969
10914 10970 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10915 10971 if (ret == 0) {
10916 10972 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10917 10973 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10918 10974 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10919 10975 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10920 10976 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10921 10977 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10922 10978 }
10923 10979 return ret;
10924 10980 }
10925 10981
10926 10982 /**
10927 10983 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10928 10984 * @adap: the adapter
10929 10985 * @cid: the context id
10930 10986 * @ctype: the context type
10931 10987 * @data: where to store the context data
10932 10988 *
10933 10989 * Reads an SGE context directly, bypassing FW. This is only for
10934 10990 * debugging when FW is unavailable.
10935 10991 */
10936 10992 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10937 10993 u32 *data)
10938 10994 {
10939 10995 int i, ret;
10940 10996
10941 10997 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10942 10998 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10943 10999 if (!ret)
10944 11000 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
10945 11001 *data++ = t4_read_reg(adap, i);
10946 11002 return ret;
10947 11003 }
10948 11004
10949 11005 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
10950 11006 {
10951 11007 struct fw_sched_cmd cmd;
10952 11008
10953 11009 memset(&cmd, 0, sizeof(cmd));
10954 11010 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10955 11011 F_FW_CMD_REQUEST |
10956 11012 F_FW_CMD_WRITE);
10957 11013 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10958 11014
10959 11015 cmd.u.config.sc = FW_SCHED_SC_CONFIG;
10960 11016 cmd.u.config.type = type;
10961 11017 cmd.u.config.minmaxen = minmaxen;
10962 11018
10963 11019 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10964 11020 NULL, 1);
10965 11021 }
10966 11022
10967 11023 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
10968 11024 int rateunit, int ratemode, int channel, int class,
10969 11025 int minrate, int maxrate, int weight, int pktsize)
10970 11026 {
10971 11027 struct fw_sched_cmd cmd;
10972 11028
10973 11029 memset(&cmd, 0, sizeof(cmd));
10974 11030 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10975 11031 F_FW_CMD_REQUEST |
10976 11032 F_FW_CMD_WRITE);
10977 11033 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10978 11034
10979 11035 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10980 11036 cmd.u.params.type = type;
10981 11037 cmd.u.params.level = level;
10982 11038 cmd.u.params.mode = mode;
10983 11039 cmd.u.params.ch = channel;
10984 11040 cmd.u.params.cl = class;
10985 11041 cmd.u.params.unit = rateunit;
10986 11042 cmd.u.params.rate = ratemode;
10987 11043 cmd.u.params.min = cpu_to_be32(minrate);
10988 11044 cmd.u.params.max = cpu_to_be32(maxrate);
10989 11045 cmd.u.params.weight = cpu_to_be16(weight);
10990 11046 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10991 11047
10992 11048 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10993 11049 NULL, 1);
10994 11050 }
10995 11051
10996 11052 /*
10997 11053 * t4_config_watchdog - configure (enable/disable) a watchdog timer
10998 11054 * @adapter: the adapter
10999 11055 * @mbox: mailbox to use for the FW command
11000 11056 * @pf: the PF owning the queue
11001 11057 * @vf: the VF owning the queue
11002 11058 * @timeout: watchdog timeout in ms
11003 11059 * @action: watchdog timer / action
11004 11060 *
11005 11061 * There are separate watchdog timers for each possible watchdog
11006 11062 * action. Configure one of the watchdog timers by setting a non-zero
11007 11063 * timeout. Disable a watchdog timer by using a timeout of zero.
11008 11064 */
11009 11065 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
11010 11066 unsigned int pf, unsigned int vf,
11011 11067 unsigned int timeout, unsigned int action)
11012 11068 {
11013 11069 struct fw_watchdog_cmd wdog;
11014 11070 unsigned int ticks;
11015 11071
11016 11072 /*
11017 11073 * The watchdog command expects a timeout in units of 10ms so we need
11018 11074 * to convert it here (via rounding) and force a minimum of one 10ms
11019 11075 * "tick" if the timeout is non-zero but the convertion results in 0
11020 11076 * ticks.
11021 11077 */
11022 11078 ticks = (timeout + 5)/10;
11023 11079 if (timeout && !ticks)
11024 11080 ticks = 1;
11025 11081
11026 11082 memset(&wdog, 0, sizeof wdog);
11027 11083 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
11028 11084 F_FW_CMD_REQUEST |
11029 11085 F_FW_CMD_WRITE |
11030 11086 V_FW_PARAMS_CMD_PFN(pf) |
11031 11087 V_FW_PARAMS_CMD_VFN(vf));
11032 11088 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
11033 11089 wdog.timeout = cpu_to_be32(ticks);
11034 11090 wdog.action = cpu_to_be32(action);
11035 11091
11036 11092 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
11037 11093 }
11038 11094
11039 11095 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
11040 11096 {
11041 11097 struct fw_devlog_cmd devlog_cmd;
11042 11098 int ret;
11043 11099
11044 11100 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11045 11101 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11046 11102 F_FW_CMD_REQUEST | F_FW_CMD_READ);
11047 11103 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11048 11104 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11049 11105 sizeof(devlog_cmd), &devlog_cmd);
11050 11106 if (ret)
11051 11107 return ret;
11052 11108
11053 11109 *level = devlog_cmd.level;
11054 11110 return 0;
11055 11111 }
11056 11112
11057 11113 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
11058 11114 {
11059 11115 struct fw_devlog_cmd devlog_cmd;
11060 11116
11061 11117 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11062 11118 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11063 11119 F_FW_CMD_REQUEST |
11064 11120 F_FW_CMD_WRITE);
11065 11121 devlog_cmd.level = level;
11066 11122 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11067 11123 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11068 11124 sizeof(devlog_cmd), &devlog_cmd);
11069 11125 }
11070 11126
|
↓ open down ↓ |
2110 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX