Print this page
3014 Intel X540 Support (fix lint)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_common.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_common.c
1 1 /******************************************************************************
2 2
3 3 Copyright (c) 2001-2012, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_common.h"
36 36 #include "ixgbe_phy.h"
37 37 #include "ixgbe_api.h"
38 38
39 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 45 u16 count);
46 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 50
51 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 53 u16 *san_mac_offset);
54 54 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 55 u16 words, u16 *data);
56 56 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 57 u16 words, u16 *data);
58 58 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 59 u16 offset);
60 60
61 61 /**
62 62 * ixgbe_init_ops_generic - Inits function ptrs
63 63 * @hw: pointer to the hardware structure
64 64 *
65 65 * Initialize the function pointers.
66 66 **/
67 67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
68 68 {
69 69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 70 struct ixgbe_mac_info *mac = &hw->mac;
71 71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
72 72
73 73 DEBUGFUNC("ixgbe_init_ops_generic");
74 74
75 75 /* EEPROM */
76 76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
77 77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 78 if (eec & IXGBE_EEC_PRES) {
79 79 eeprom->ops.read = &ixgbe_read_eerd_generic;
80 80 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 81 } else {
82 82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 83 eeprom->ops.read_buffer =
84 84 &ixgbe_read_eeprom_buffer_bit_bang_generic;
85 85 }
86 86 eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 87 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 88 eeprom->ops.validate_checksum =
89 89 &ixgbe_validate_eeprom_checksum_generic;
90 90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
92 92
93 93 /* MAC */
94 94 mac->ops.init_hw = &ixgbe_init_hw_generic;
95 95 mac->ops.reset_hw = NULL;
96 96 mac->ops.start_hw = &ixgbe_start_hw_generic;
97 97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 98 mac->ops.get_media_type = NULL;
99 99 mac->ops.get_supported_physical_layer = NULL;
100 100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
107 107
108 108 /* LEDs */
109 109 mac->ops.led_on = &ixgbe_led_on_generic;
110 110 mac->ops.led_off = &ixgbe_led_off_generic;
111 111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
113 113
114 114 /* RAR, Multicast, VLAN */
115 115 mac->ops.set_rar = &ixgbe_set_rar_generic;
116 116 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
117 117 mac->ops.insert_mac_addr = NULL;
118 118 mac->ops.set_vmdq = NULL;
119 119 mac->ops.clear_vmdq = NULL;
120 120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 123 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 124 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
125 125 mac->ops.clear_vfta = NULL;
126 126 mac->ops.set_vfta = NULL;
127 127 mac->ops.set_vlvf = NULL;
128 128 mac->ops.init_uta_tables = NULL;
129 129
130 130 /* Flow Control */
131 131 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
132 132
133 133 /* Link */
134 134 mac->ops.get_link_capabilities = NULL;
135 135 mac->ops.setup_link = NULL;
136 136 mac->ops.check_link = NULL;
137 137
138 138 return IXGBE_SUCCESS;
139 139 }
140 140
141 141 /**
142 142 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143 143 * control
144 144 * @hw: pointer to hardware structure
145 145 *
146 146 * There are several phys that do not support autoneg flow control. This
147 147 * function check the device id to see if the associated phy supports
148 148 * autoneg flow control.
149 149 **/
150 150 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 151 {
152 152
153 153 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154 154
155 155 switch (hw->device_id) {
156 156 case IXGBE_DEV_ID_X540T:
157 157 case IXGBE_DEV_ID_X540T1:
158 158 return IXGBE_SUCCESS;
159 159 case IXGBE_DEV_ID_82599_T3_LOM:
160 160 return IXGBE_SUCCESS;
161 161 default:
162 162 return IXGBE_ERR_FC_NOT_SUPPORTED;
163 163 }
164 164 }
165 165
166 166 /**
167 167 * ixgbe_setup_fc - Set up flow control
168 168 * @hw: pointer to hardware structure
169 169 *
170 170 * Called at init time to set up flow control.
171 171 **/
172 172 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
173 173 {
174 174 s32 ret_val = IXGBE_SUCCESS;
175 175 u32 reg = 0, reg_bp = 0;
176 176 u16 reg_cu = 0;
177 177
178 178 DEBUGFUNC("ixgbe_setup_fc");
179 179
180 180 /*
181 181 * Validate the requested mode. Strict IEEE mode does not allow
182 182 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 183 */
184 184 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 185 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 186 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 187 goto out;
188 188 }
189 189
190 190 /*
191 191 * 10gig parts do not have a word in the EEPROM to determine the
192 192 * default flow control setting, so we explicitly set it to full.
193 193 */
194 194 if (hw->fc.requested_mode == ixgbe_fc_default)
195 195 hw->fc.requested_mode = ixgbe_fc_full;
196 196
197 197 /*
198 198 * Set up the 1G and 10G flow control advertisement registers so the
199 199 * HW will be able to do fc autoneg once the cable is plugged in. If
200 200 * we link at 10G, the 1G advertisement is harmless and vice versa.
201 201 */
202 202 switch (hw->phy.media_type) {
203 203 case ixgbe_media_type_fiber:
204 204 case ixgbe_media_type_backplane:
205 205 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 206 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
207 207 break;
208 208 case ixgbe_media_type_copper:
209 209 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 210 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
211 211 break;
212 212 default:
213 213 break;
214 214 }
215 215
216 216 /*
217 217 * The possible values of fc.requested_mode are:
218 218 * 0: Flow control is completely disabled
219 219 * 1: Rx flow control is enabled (we can receive pause frames,
220 220 * but not send pause frames).
221 221 * 2: Tx flow control is enabled (we can send pause frames but
222 222 * we do not support receiving pause frames).
223 223 * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 224 * other: Invalid.
225 225 */
226 226 switch (hw->fc.requested_mode) {
227 227 case ixgbe_fc_none:
228 228 /* Flow control completely disabled by software override. */
229 229 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 230 if (hw->phy.media_type == ixgbe_media_type_backplane)
231 231 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 232 IXGBE_AUTOC_ASM_PAUSE);
233 233 else if (hw->phy.media_type == ixgbe_media_type_copper)
234 234 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 235 break;
236 236 case ixgbe_fc_tx_pause:
237 237 /*
238 238 * Tx Flow control is enabled, and Rx Flow control is
239 239 * disabled by software override.
240 240 */
241 241 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 242 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 243 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 244 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 245 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 246 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 247 reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 248 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 249 }
250 250 break;
251 251 case ixgbe_fc_rx_pause:
252 252 /*
253 253 * Rx Flow control is enabled and Tx Flow control is
254 254 * disabled by software override. Since there really
255 255 * isn't a way to advertise that we are capable of RX
256 256 * Pause ONLY, we will advertise that we support both
257 257 * symmetric and asymmetric Rx PAUSE, as such we fall
258 258 * through to the fc_full statement. Later, we will
259 259 * disable the adapter's ability to send PAUSE frames.
260 260 */
261 261 case ixgbe_fc_full:
262 262 /* Flow control (both Rx and Tx) is enabled by SW override. */
263 263 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 264 if (hw->phy.media_type == ixgbe_media_type_backplane)
265 265 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 266 IXGBE_AUTOC_ASM_PAUSE;
267 267 else if (hw->phy.media_type == ixgbe_media_type_copper)
268 268 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 269 break;
270 270 default:
271 271 DEBUGOUT("Flow control param set incorrectly\n");
272 272 ret_val = IXGBE_ERR_CONFIG;
273 273 goto out;
274 274 }
275 275
276 276 if (hw->mac.type != ixgbe_mac_X540) {
277 277 /*
278 278 * Enable auto-negotiation between the MAC & PHY;
279 279 * the MAC will advertise clause 37 flow control.
280 280 */
281 281 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 282 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283 283
284 284 /* Disable AN timeout */
285 285 if (hw->fc.strict_ieee)
286 286 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287 287
288 288 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
289 289 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 290 }
291 291
292 292 /*
293 293 * AUTOC restart handles negotiation of 1G and 10G on backplane
294 294 * and copper. There is no need to set the PCS1GCTL register.
295 295 *
296 296 */
297 297 if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 298 reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 299 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
300 300 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
301 301 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
302 302 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
303 303 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
304 304 }
305 305
306 306 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
307 307 out:
308 308 return ret_val;
309 309 }
310 310
311 311 /**
312 312 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
313 313 * @hw: pointer to hardware structure
314 314 *
315 315 * Starts the hardware by filling the bus info structure and media type, clears
316 316 * all on chip counters, initializes receive address registers, multicast
317 317 * table, VLAN filter table, calls routine to set up link and flow control
318 318 * settings, and leaves transmit and receive units disabled and uninitialized
319 319 **/
320 320 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
321 321 {
322 322 s32 ret_val;
323 323 u32 ctrl_ext;
324 324
325 325 DEBUGFUNC("ixgbe_start_hw_generic");
326 326
327 327 /* Set the media type */
328 328 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
329 329
330 330 /* PHY ops initialization must be done in reset_hw() */
331 331
332 332 /* Clear the VLAN filter table */
333 333 hw->mac.ops.clear_vfta(hw);
334 334
335 335 /* Clear statistics registers */
336 336 hw->mac.ops.clear_hw_cntrs(hw);
337 337
338 338 /* Set No Snoop Disable */
339 339 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
340 340 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
341 341 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
342 342 IXGBE_WRITE_FLUSH(hw);
343 343
344 344 /* Setup flow control */
345 345 ret_val = ixgbe_setup_fc(hw);
346 346 if (ret_val != IXGBE_SUCCESS)
347 347 goto out;
348 348
349 349 /* Clear adapter stopped flag */
350 350 hw->adapter_stopped = FALSE;
351 351
352 352 out:
353 353 return ret_val;
354 354 }
355 355
356 356 /**
357 357 * ixgbe_start_hw_gen2 - Init sequence for common device family
358 358 * @hw: pointer to hw structure
359 359 *
360 360 * Performs the init sequence common to the second generation
361 361 * of 10 GbE devices.
362 362 * Devices in the second generation:
363 363 * 82599
364 364 * X540
365 365 **/
366 366 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
367 367 {
368 368 u32 i;
369 369 u32 regval;
370 370
371 371 /* Clear the rate limiters */
372 372 for (i = 0; i < hw->mac.max_tx_queues; i++) {
373 373 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
374 374 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
375 375 }
376 376 IXGBE_WRITE_FLUSH(hw);
377 377
378 378 /* Disable relaxed ordering */
379 379 for (i = 0; i < hw->mac.max_tx_queues; i++) {
380 380 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
381 381 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
382 382 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
383 383 }
384 384
385 385 for (i = 0; i < hw->mac.max_rx_queues; i++) {
386 386 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
387 387 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
388 388 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
389 389 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
390 390 }
391 391
392 392 return IXGBE_SUCCESS;
393 393 }
394 394
395 395 /**
396 396 * ixgbe_init_hw_generic - Generic hardware initialization
397 397 * @hw: pointer to hardware structure
398 398 *
399 399 * Initialize the hardware by resetting the hardware, filling the bus info
400 400 * structure and media type, clears all on chip counters, initializes receive
401 401 * address registers, multicast table, VLAN filter table, calls routine to set
402 402 * up link and flow control settings, and leaves transmit and receive units
403 403 * disabled and uninitialized
404 404 **/
405 405 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
406 406 {
407 407 s32 status;
408 408
409 409 DEBUGFUNC("ixgbe_init_hw_generic");
410 410
411 411 /* Reset the hardware */
412 412 status = hw->mac.ops.reset_hw(hw);
413 413
414 414 if (status == IXGBE_SUCCESS) {
415 415 /* Start the HW */
416 416 status = hw->mac.ops.start_hw(hw);
417 417 }
418 418
419 419 return status;
420 420 }
421 421
422 422 /**
423 423 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424 424 * @hw: pointer to hardware structure
|
↓ open down ↓ |
424 lines elided |
↑ open up ↑ |
425 425 *
426 426 * Clears all hardware statistics counters by reading them from the hardware
427 427 * Statistics counters are clear on read.
428 428 **/
429 429 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 430 {
431 431 u16 i = 0;
432 432
433 433 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434 434
435 - IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 - IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 - IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 - IXGBE_READ_REG(hw, IXGBE_MSPDC);
435 + (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 + (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 + (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 + (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 439 for (i = 0; i < 8; i++)
440 - IXGBE_READ_REG(hw, IXGBE_MPC(i));
440 + (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
441 441
442 - IXGBE_READ_REG(hw, IXGBE_MLFC);
443 - IXGBE_READ_REG(hw, IXGBE_MRFC);
444 - IXGBE_READ_REG(hw, IXGBE_RLEC);
445 - IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 - IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
442 + (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
443 + (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
444 + (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
445 + (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 + (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 447 if (hw->mac.type >= ixgbe_mac_82599EB) {
448 - IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 - IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
448 + (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 + (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 450 } else {
451 - IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 - IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
451 + (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 + (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 453 }
454 454
455 455 for (i = 0; i < 8; i++) {
456 - IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 - IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
456 + (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 + (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 458 if (hw->mac.type >= ixgbe_mac_82599EB) {
459 - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 - IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
459 + (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 + (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 461 } else {
462 - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
462 + (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 + (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 464 }
465 465 }
466 466 if (hw->mac.type >= ixgbe_mac_82599EB)
467 467 for (i = 0; i < 8; i++)
468 - IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 - IXGBE_READ_REG(hw, IXGBE_PRC64);
470 - IXGBE_READ_REG(hw, IXGBE_PRC127);
471 - IXGBE_READ_REG(hw, IXGBE_PRC255);
472 - IXGBE_READ_REG(hw, IXGBE_PRC511);
473 - IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 - IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 - IXGBE_READ_REG(hw, IXGBE_GPRC);
476 - IXGBE_READ_REG(hw, IXGBE_BPRC);
477 - IXGBE_READ_REG(hw, IXGBE_MPRC);
478 - IXGBE_READ_REG(hw, IXGBE_GPTC);
479 - IXGBE_READ_REG(hw, IXGBE_GORCL);
480 - IXGBE_READ_REG(hw, IXGBE_GORCH);
481 - IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 - IXGBE_READ_REG(hw, IXGBE_GOTCH);
468 + (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 + (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
470 + (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
471 + (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
472 + (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
473 + (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 + (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 + (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
476 + (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
477 + (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
478 + (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
479 + (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
480 + (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
481 + (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 + (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 483 if (hw->mac.type == ixgbe_mac_82598EB)
484 484 for (i = 0; i < 8; i++)
485 - IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 - IXGBE_READ_REG(hw, IXGBE_RUC);
487 - IXGBE_READ_REG(hw, IXGBE_RFC);
488 - IXGBE_READ_REG(hw, IXGBE_ROC);
489 - IXGBE_READ_REG(hw, IXGBE_RJC);
490 - IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 - IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 - IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 - IXGBE_READ_REG(hw, IXGBE_TORL);
494 - IXGBE_READ_REG(hw, IXGBE_TORH);
495 - IXGBE_READ_REG(hw, IXGBE_TPR);
496 - IXGBE_READ_REG(hw, IXGBE_TPT);
497 - IXGBE_READ_REG(hw, IXGBE_PTC64);
498 - IXGBE_READ_REG(hw, IXGBE_PTC127);
499 - IXGBE_READ_REG(hw, IXGBE_PTC255);
500 - IXGBE_READ_REG(hw, IXGBE_PTC511);
501 - IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 - IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 - IXGBE_READ_REG(hw, IXGBE_MPTC);
504 - IXGBE_READ_REG(hw, IXGBE_BPTC);
485 + (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 + (void) IXGBE_READ_REG(hw, IXGBE_RUC);
487 + (void) IXGBE_READ_REG(hw, IXGBE_RFC);
488 + (void) IXGBE_READ_REG(hw, IXGBE_ROC);
489 + (void) IXGBE_READ_REG(hw, IXGBE_RJC);
490 + (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 + (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 + (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 + (void) IXGBE_READ_REG(hw, IXGBE_TORL);
494 + (void) IXGBE_READ_REG(hw, IXGBE_TORH);
495 + (void) IXGBE_READ_REG(hw, IXGBE_TPR);
496 + (void) IXGBE_READ_REG(hw, IXGBE_TPT);
497 + (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
498 + (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
499 + (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
500 + (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
501 + (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 + (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 + (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
504 + (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
505 505 for (i = 0; i < 16; i++) {
506 - IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 - IXGBE_READ_REG(hw, IXGBE_QPTC(i));
506 + (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 + (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 508 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 - IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 - IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 - IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 - IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 - IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
509 + (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 + (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 + (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 + (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 + (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 514 } else {
515 - IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 - IXGBE_READ_REG(hw, IXGBE_QBTC(i));
515 + (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 + (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 517 }
518 518 }
519 519
520 520 if (hw->mac.type == ixgbe_mac_X540) {
521 521 if (hw->phy.id == 0)
522 - ixgbe_identify_phy(hw);
522 + (void) ixgbe_identify_phy(hw);
523 523 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 524 IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 525 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 526 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 527 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 528 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 529 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 530 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 531 }
532 532
533 533 return IXGBE_SUCCESS;
534 534 }
535 535
536 536 /**
537 537 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538 538 * @hw: pointer to hardware structure
539 539 * @pba_num: stores the part number string from the EEPROM
540 540 * @pba_num_size: part number string buffer length
541 541 *
542 542 * Reads the part number string from the EEPROM.
543 543 **/
544 544 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
545 545 u32 pba_num_size)
546 546 {
547 547 s32 ret_val;
548 548 u16 data;
549 549 u16 pba_ptr;
550 550 u16 offset;
551 551 u16 length;
552 552
553 553 DEBUGFUNC("ixgbe_read_pba_string_generic");
554 554
555 555 if (pba_num == NULL) {
556 556 DEBUGOUT("PBA string buffer was null\n");
557 557 return IXGBE_ERR_INVALID_ARGUMENT;
558 558 }
559 559
560 560 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
561 561 if (ret_val) {
562 562 DEBUGOUT("NVM Read Error\n");
563 563 return ret_val;
564 564 }
565 565
566 566 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
567 567 if (ret_val) {
568 568 DEBUGOUT("NVM Read Error\n");
569 569 return ret_val;
570 570 }
571 571
572 572 /*
573 573 * if data is not ptr guard the PBA must be in legacy format which
574 574 * means pba_ptr is actually our second data word for the PBA number
575 575 * and we can decode it into an ascii string
576 576 */
577 577 if (data != IXGBE_PBANUM_PTR_GUARD) {
578 578 DEBUGOUT("NVM PBA number is not stored as string\n");
579 579
580 580 /* we will need 11 characters to store the PBA */
581 581 if (pba_num_size < 11) {
582 582 DEBUGOUT("PBA string buffer too small\n");
583 583 return IXGBE_ERR_NO_SPACE;
584 584 }
585 585
586 586 /* extract hex string from data and pba_ptr */
587 587 pba_num[0] = (data >> 12) & 0xF;
588 588 pba_num[1] = (data >> 8) & 0xF;
589 589 pba_num[2] = (data >> 4) & 0xF;
590 590 pba_num[3] = data & 0xF;
591 591 pba_num[4] = (pba_ptr >> 12) & 0xF;
592 592 pba_num[5] = (pba_ptr >> 8) & 0xF;
593 593 pba_num[6] = '-';
594 594 pba_num[7] = 0;
595 595 pba_num[8] = (pba_ptr >> 4) & 0xF;
596 596 pba_num[9] = pba_ptr & 0xF;
597 597
598 598 /* put a null character on the end of our string */
599 599 pba_num[10] = '\0';
600 600
601 601 /* switch all the data but the '-' to hex char */
602 602 for (offset = 0; offset < 10; offset++) {
603 603 if (pba_num[offset] < 0xA)
604 604 pba_num[offset] += '0';
605 605 else if (pba_num[offset] < 0x10)
606 606 pba_num[offset] += 'A' - 0xA;
607 607 }
608 608
609 609 return IXGBE_SUCCESS;
610 610 }
611 611
612 612 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
613 613 if (ret_val) {
614 614 DEBUGOUT("NVM Read Error\n");
615 615 return ret_val;
616 616 }
617 617
618 618 if (length == 0xFFFF || length == 0) {
619 619 DEBUGOUT("NVM PBA number section invalid length\n");
620 620 return IXGBE_ERR_PBA_SECTION;
621 621 }
622 622
623 623 /* check if pba_num buffer is big enough */
624 624 if (pba_num_size < (((u32)length * 2) - 1)) {
625 625 DEBUGOUT("PBA string buffer too small\n");
626 626 return IXGBE_ERR_NO_SPACE;
627 627 }
628 628
629 629 /* trim pba length from start of string */
630 630 pba_ptr++;
631 631 length--;
632 632
633 633 for (offset = 0; offset < length; offset++) {
634 634 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
635 635 if (ret_val) {
636 636 DEBUGOUT("NVM Read Error\n");
637 637 return ret_val;
638 638 }
639 639 pba_num[offset * 2] = (u8)(data >> 8);
640 640 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
641 641 }
642 642 pba_num[offset * 2] = '\0';
643 643
644 644 return IXGBE_SUCCESS;
645 645 }
646 646
647 647 /**
648 648 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
649 649 * @hw: pointer to hardware structure
650 650 * @pba_num: stores the part number from the EEPROM
651 651 *
652 652 * Reads the part number from the EEPROM.
653 653 **/
654 654 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
655 655 {
656 656 s32 ret_val;
657 657 u16 data;
658 658
659 659 DEBUGFUNC("ixgbe_read_pba_num_generic");
660 660
661 661 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
662 662 if (ret_val) {
663 663 DEBUGOUT("NVM Read Error\n");
664 664 return ret_val;
665 665 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
666 666 DEBUGOUT("NVM Not supported\n");
667 667 return IXGBE_NOT_IMPLEMENTED;
668 668 }
669 669 *pba_num = (u32)(data << 16);
670 670
671 671 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
672 672 if (ret_val) {
673 673 DEBUGOUT("NVM Read Error\n");
674 674 return ret_val;
675 675 }
676 676 *pba_num |= data;
677 677
678 678 return IXGBE_SUCCESS;
679 679 }
680 680
681 681 /**
682 682 * ixgbe_get_mac_addr_generic - Generic get MAC address
683 683 * @hw: pointer to hardware structure
684 684 * @mac_addr: Adapter MAC address
685 685 *
686 686 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
687 687 * A reset of the adapter must be performed prior to calling this function
688 688 * in order for the MAC address to have been loaded from the EEPROM into RAR0
689 689 **/
690 690 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
691 691 {
692 692 u32 rar_high;
693 693 u32 rar_low;
694 694 u16 i;
695 695
696 696 DEBUGFUNC("ixgbe_get_mac_addr_generic");
697 697
698 698 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
699 699 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
700 700
701 701 for (i = 0; i < 4; i++)
702 702 mac_addr[i] = (u8)(rar_low >> (i*8));
703 703
704 704 for (i = 0; i < 2; i++)
705 705 mac_addr[i+4] = (u8)(rar_high >> (i*8));
706 706
707 707 return IXGBE_SUCCESS;
708 708 }
709 709
710 710 /**
711 711 * ixgbe_get_bus_info_generic - Generic set PCI bus info
712 712 * @hw: pointer to hardware structure
713 713 *
714 714 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
715 715 **/
716 716 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
717 717 {
718 718 struct ixgbe_mac_info *mac = &hw->mac;
719 719 u16 link_status;
720 720
721 721 DEBUGFUNC("ixgbe_get_bus_info_generic");
722 722
723 723 hw->bus.type = ixgbe_bus_type_pci_express;
724 724
725 725 /* Get the negotiated link width and speed from PCI config space */
726 726 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
727 727
728 728 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
729 729 case IXGBE_PCI_LINK_WIDTH_1:
730 730 hw->bus.width = ixgbe_bus_width_pcie_x1;
731 731 break;
732 732 case IXGBE_PCI_LINK_WIDTH_2:
733 733 hw->bus.width = ixgbe_bus_width_pcie_x2;
734 734 break;
735 735 case IXGBE_PCI_LINK_WIDTH_4:
736 736 hw->bus.width = ixgbe_bus_width_pcie_x4;
737 737 break;
738 738 case IXGBE_PCI_LINK_WIDTH_8:
739 739 hw->bus.width = ixgbe_bus_width_pcie_x8;
740 740 break;
741 741 default:
742 742 hw->bus.width = ixgbe_bus_width_unknown;
743 743 break;
744 744 }
745 745
746 746 switch (link_status & IXGBE_PCI_LINK_SPEED) {
747 747 case IXGBE_PCI_LINK_SPEED_2500:
748 748 hw->bus.speed = ixgbe_bus_speed_2500;
749 749 break;
750 750 case IXGBE_PCI_LINK_SPEED_5000:
751 751 hw->bus.speed = ixgbe_bus_speed_5000;
752 752 break;
753 753 case IXGBE_PCI_LINK_SPEED_8000:
754 754 hw->bus.speed = ixgbe_bus_speed_8000;
755 755 break;
756 756 default:
757 757 hw->bus.speed = ixgbe_bus_speed_unknown;
758 758 break;
759 759 }
760 760
761 761 mac->ops.set_lan_id(hw);
762 762
763 763 return IXGBE_SUCCESS;
764 764 }
765 765
766 766 /**
767 767 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
768 768 * @hw: pointer to the HW structure
769 769 *
770 770 * Determines the LAN function id by reading memory-mapped registers
771 771 * and swaps the port value if requested.
772 772 **/
773 773 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
774 774 {
775 775 struct ixgbe_bus_info *bus = &hw->bus;
776 776 u32 reg;
777 777
778 778 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
779 779
780 780 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
781 781 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
782 782 bus->lan_id = bus->func;
783 783
784 784 /* check for a port swap */
785 785 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
786 786 if (reg & IXGBE_FACTPS_LFS)
787 787 bus->func ^= 0x1;
788 788 }
789 789
790 790 /**
791 791 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
792 792 * @hw: pointer to hardware structure
793 793 *
794 794 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
795 795 * disables transmit and receive units. The adapter_stopped flag is used by
796 796 * the shared code and drivers to determine if the adapter is in a stopped
797 797 * state and should not touch the hardware.
798 798 **/
799 799 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 800 {
801 801 u32 reg_val;
802 802 u16 i;
803 803
804 804 DEBUGFUNC("ixgbe_stop_adapter_generic");
805 805
806 806 /*
807 807 * Set the adapter_stopped flag so other driver functions stop touching
808 808 * the hardware
|
↓ open down ↓ |
276 lines elided |
↑ open up ↑ |
809 809 */
810 810 hw->adapter_stopped = TRUE;
811 811
812 812 /* Disable the receive unit */
813 813 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
814 814
815 815 /* Clear interrupt mask to stop interrupts from being generated */
816 816 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817 817
818 818 /* Clear any pending interrupts, flush previous writes */
819 - IXGBE_READ_REG(hw, IXGBE_EICR);
819 + (void) IXGBE_READ_REG(hw, IXGBE_EICR);
820 820
821 821 /* Disable the transmit unit. Each queue must be disabled. */
822 822 for (i = 0; i < hw->mac.max_tx_queues; i++)
823 823 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824 824
825 825 /* Disable the receive unit by stopping each queue */
826 826 for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 827 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 828 reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 829 reg_val |= IXGBE_RXDCTL_SWFLSH;
830 830 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 831 }
832 832
833 833 /* flush all queues disables */
834 834 IXGBE_WRITE_FLUSH(hw);
835 835 msec_delay(2);
836 836
837 837 /*
838 838 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
839 839 * access and verify no pending requests
840 840 */
841 841 return ixgbe_disable_pcie_master(hw);
842 842 }
843 843
844 844 /**
845 845 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
846 846 * @hw: pointer to hardware structure
847 847 * @index: led number to turn on
848 848 **/
849 849 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
850 850 {
851 851 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
852 852
853 853 DEBUGFUNC("ixgbe_led_on_generic");
854 854
855 855 /* To turn on the LED, set mode to ON. */
856 856 led_reg &= ~IXGBE_LED_MODE_MASK(index);
857 857 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
858 858 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
859 859 IXGBE_WRITE_FLUSH(hw);
860 860
861 861 return IXGBE_SUCCESS;
862 862 }
863 863
864 864 /**
865 865 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
866 866 * @hw: pointer to hardware structure
867 867 * @index: led number to turn off
868 868 **/
869 869 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
870 870 {
871 871 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
872 872
873 873 DEBUGFUNC("ixgbe_led_off_generic");
874 874
875 875 /* To turn off the LED, set mode to OFF. */
876 876 led_reg &= ~IXGBE_LED_MODE_MASK(index);
877 877 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
878 878 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
879 879 IXGBE_WRITE_FLUSH(hw);
880 880
881 881 return IXGBE_SUCCESS;
882 882 }
883 883
884 884 /**
885 885 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
886 886 * @hw: pointer to hardware structure
887 887 *
888 888 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
889 889 * ixgbe_hw struct in order to set up EEPROM access.
890 890 **/
891 891 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
892 892 {
893 893 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
894 894 u32 eec;
895 895 u16 eeprom_size;
896 896
897 897 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
898 898
899 899 if (eeprom->type == ixgbe_eeprom_uninitialized) {
900 900 eeprom->type = ixgbe_eeprom_none;
901 901 /* Set default semaphore delay to 10ms which is a well
902 902 * tested value */
903 903 eeprom->semaphore_delay = 10;
904 904 /* Clear EEPROM page size, it will be initialized as needed */
905 905 eeprom->word_page_size = 0;
906 906
907 907 /*
908 908 * Check for EEPROM present first.
909 909 * If not present leave as none
910 910 */
911 911 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
912 912 if (eec & IXGBE_EEC_PRES) {
913 913 eeprom->type = ixgbe_eeprom_spi;
914 914
915 915 /*
916 916 * SPI EEPROM is assumed here. This code would need to
917 917 * change if a future EEPROM is not SPI.
918 918 */
919 919 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
920 920 IXGBE_EEC_SIZE_SHIFT);
921 921 eeprom->word_size = 1 << (eeprom_size +
922 922 IXGBE_EEPROM_WORD_SIZE_SHIFT);
923 923 }
924 924
925 925 if (eec & IXGBE_EEC_ADDR_SIZE)
926 926 eeprom->address_bits = 16;
927 927 else
928 928 eeprom->address_bits = 8;
929 929 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
930 930 "%d\n", eeprom->type, eeprom->word_size,
931 931 eeprom->address_bits);
932 932 }
933 933
934 934 return IXGBE_SUCCESS;
935 935 }
936 936
937 937 /**
938 938 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
939 939 * @hw: pointer to hardware structure
940 940 * @offset: offset within the EEPROM to write
941 941 * @words: number of word(s)
942 942 * @data: 16 bit word(s) to write to EEPROM
943 943 *
944 944 * Reads 16 bit word(s) from EEPROM through bit-bang method
945 945 **/
946 946 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
947 947 u16 words, u16 *data)
948 948 {
949 949 s32 status = IXGBE_SUCCESS;
950 950 u16 i, count;
951 951
952 952 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953 953
954 954 hw->eeprom.ops.init_params(hw);
955 955
956 956 if (words == 0) {
957 957 status = IXGBE_ERR_INVALID_ARGUMENT;
958 958 goto out;
959 959 }
960 960
961 961 if (offset + words > hw->eeprom.word_size) {
|
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
962 962 status = IXGBE_ERR_EEPROM;
963 963 goto out;
964 964 }
965 965
966 966 /*
967 967 * The EEPROM page size cannot be queried from the chip. We do lazy
968 968 * initialization. It is worth to do that when we write large buffer.
969 969 */
970 970 if ((hw->eeprom.word_page_size == 0) &&
971 971 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 - ixgbe_detect_eeprom_page_size_generic(hw, offset);
972 + status = ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 + if (status != IXGBE_SUCCESS)
974 + goto out;
973 975
974 976 /*
975 977 * We cannot hold synchronization semaphores for too long
976 978 * to avoid other entity starvation. However it is more efficient
977 979 * to read in bursts than synchronizing access for each word.
978 980 */
979 981 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
980 982 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
981 983 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
982 984 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
983 985 count, &data[i]);
984 986
985 987 if (status != IXGBE_SUCCESS)
986 988 break;
987 989 }
988 990
989 991 out:
990 992 return status;
991 993 }
992 994
993 995 /**
994 996 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
995 997 * @hw: pointer to hardware structure
996 998 * @offset: offset within the EEPROM to be written to
997 999 * @words: number of word(s)
998 1000 * @data: 16 bit word(s) to be written to the EEPROM
999 1001 *
1000 1002 * If ixgbe_eeprom_update_checksum is not called after this function, the
1001 1003 * EEPROM will most likely contain an invalid checksum.
1002 1004 **/
1003 1005 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1004 1006 u16 words, u16 *data)
1005 1007 {
1006 1008 s32 status;
1007 1009 u16 word;
1008 1010 u16 page_size;
1009 1011 u16 i;
1010 1012 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1011 1013
1012 1014 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1013 1015
1014 1016 /* Prepare the EEPROM for writing */
1015 1017 status = ixgbe_acquire_eeprom(hw);
1016 1018
1017 1019 if (status == IXGBE_SUCCESS) {
1018 1020 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1019 1021 ixgbe_release_eeprom(hw);
1020 1022 status = IXGBE_ERR_EEPROM;
1021 1023 }
1022 1024 }
1023 1025
1024 1026 if (status == IXGBE_SUCCESS) {
1025 1027 for (i = 0; i < words; i++) {
1026 1028 ixgbe_standby_eeprom(hw);
1027 1029
1028 1030 /* Send the WRITE ENABLE command (8 bit opcode ) */
1029 1031 ixgbe_shift_out_eeprom_bits(hw,
1030 1032 IXGBE_EEPROM_WREN_OPCODE_SPI,
1031 1033 IXGBE_EEPROM_OPCODE_BITS);
1032 1034
1033 1035 ixgbe_standby_eeprom(hw);
1034 1036
1035 1037 /*
1036 1038 * Some SPI eeproms use the 8th address bit embedded
1037 1039 * in the opcode
1038 1040 */
1039 1041 if ((hw->eeprom.address_bits == 8) &&
1040 1042 ((offset + i) >= 128))
1041 1043 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1042 1044
1043 1045 /* Send the Write command (8-bit opcode + addr) */
1044 1046 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1045 1047 IXGBE_EEPROM_OPCODE_BITS);
1046 1048 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1047 1049 hw->eeprom.address_bits);
1048 1050
1049 1051 page_size = hw->eeprom.word_page_size;
1050 1052
1051 1053 /* Send the data in burst via SPI*/
1052 1054 do {
1053 1055 word = data[i];
1054 1056 word = (word >> 8) | (word << 8);
1055 1057 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1056 1058
1057 1059 if (page_size == 0)
1058 1060 break;
1059 1061
1060 1062 /* do not wrap around page */
1061 1063 if (((offset + i) & (page_size - 1)) ==
1062 1064 (page_size - 1))
1063 1065 break;
1064 1066 } while (++i < words);
1065 1067
1066 1068 ixgbe_standby_eeprom(hw);
1067 1069 msec_delay(10);
1068 1070 }
1069 1071 /* Done with writing - release the EEPROM */
1070 1072 ixgbe_release_eeprom(hw);
1071 1073 }
1072 1074
1073 1075 return status;
1074 1076 }
1075 1077
1076 1078 /**
1077 1079 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1078 1080 * @hw: pointer to hardware structure
1079 1081 * @offset: offset within the EEPROM to be written to
1080 1082 * @data: 16 bit word to be written to the EEPROM
1081 1083 *
1082 1084 * If ixgbe_eeprom_update_checksum is not called after this function, the
1083 1085 * EEPROM will most likely contain an invalid checksum.
1084 1086 **/
1085 1087 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1086 1088 {
1087 1089 s32 status;
1088 1090
1089 1091 DEBUGFUNC("ixgbe_write_eeprom_generic");
1090 1092
1091 1093 hw->eeprom.ops.init_params(hw);
1092 1094
1093 1095 if (offset >= hw->eeprom.word_size) {
1094 1096 status = IXGBE_ERR_EEPROM;
1095 1097 goto out;
1096 1098 }
1097 1099
1098 1100 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1099 1101
1100 1102 out:
1101 1103 return status;
1102 1104 }
1103 1105
1104 1106 /**
1105 1107 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1106 1108 * @hw: pointer to hardware structure
1107 1109 * @offset: offset within the EEPROM to be read
1108 1110 * @data: read 16 bit words(s) from EEPROM
1109 1111 * @words: number of word(s)
1110 1112 *
1111 1113 * Reads 16 bit word(s) from EEPROM through bit-bang method
1112 1114 **/
1113 1115 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1114 1116 u16 words, u16 *data)
1115 1117 {
1116 1118 s32 status = IXGBE_SUCCESS;
1117 1119 u16 i, count;
1118 1120
1119 1121 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1120 1122
1121 1123 hw->eeprom.ops.init_params(hw);
1122 1124
1123 1125 if (words == 0) {
1124 1126 status = IXGBE_ERR_INVALID_ARGUMENT;
1125 1127 goto out;
1126 1128 }
1127 1129
1128 1130 if (offset + words > hw->eeprom.word_size) {
1129 1131 status = IXGBE_ERR_EEPROM;
1130 1132 goto out;
1131 1133 }
1132 1134
1133 1135 /*
1134 1136 * We cannot hold synchronization semaphores for too long
1135 1137 * to avoid other entity starvation. However it is more efficient
1136 1138 * to read in bursts than synchronizing access for each word.
1137 1139 */
1138 1140 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1139 1141 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1140 1142 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1141 1143
1142 1144 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1143 1145 count, &data[i]);
1144 1146
1145 1147 if (status != IXGBE_SUCCESS)
1146 1148 break;
1147 1149 }
1148 1150
1149 1151 out:
1150 1152 return status;
1151 1153 }
1152 1154
1153 1155 /**
1154 1156 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1155 1157 * @hw: pointer to hardware structure
1156 1158 * @offset: offset within the EEPROM to be read
1157 1159 * @words: number of word(s)
1158 1160 * @data: read 16 bit word(s) from EEPROM
1159 1161 *
1160 1162 * Reads 16 bit word(s) from EEPROM through bit-bang method
1161 1163 **/
1162 1164 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1163 1165 u16 words, u16 *data)
1164 1166 {
1165 1167 s32 status;
1166 1168 u16 word_in;
1167 1169 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1168 1170 u16 i;
1169 1171
1170 1172 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1171 1173
1172 1174 /* Prepare the EEPROM for reading */
1173 1175 status = ixgbe_acquire_eeprom(hw);
1174 1176
1175 1177 if (status == IXGBE_SUCCESS) {
1176 1178 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1177 1179 ixgbe_release_eeprom(hw);
1178 1180 status = IXGBE_ERR_EEPROM;
1179 1181 }
1180 1182 }
1181 1183
1182 1184 if (status == IXGBE_SUCCESS) {
1183 1185 for (i = 0; i < words; i++) {
1184 1186 ixgbe_standby_eeprom(hw);
1185 1187 /*
1186 1188 * Some SPI eeproms use the 8th address bit embedded
1187 1189 * in the opcode
1188 1190 */
1189 1191 if ((hw->eeprom.address_bits == 8) &&
1190 1192 ((offset + i) >= 128))
1191 1193 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1192 1194
1193 1195 /* Send the READ command (opcode + addr) */
1194 1196 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1195 1197 IXGBE_EEPROM_OPCODE_BITS);
1196 1198 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1197 1199 hw->eeprom.address_bits);
1198 1200
1199 1201 /* Read the data. */
1200 1202 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1201 1203 data[i] = (word_in >> 8) | (word_in << 8);
1202 1204 }
1203 1205
1204 1206 /* End this read operation */
1205 1207 ixgbe_release_eeprom(hw);
1206 1208 }
1207 1209
1208 1210 return status;
1209 1211 }
1210 1212
1211 1213 /**
1212 1214 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1213 1215 * @hw: pointer to hardware structure
1214 1216 * @offset: offset within the EEPROM to be read
1215 1217 * @data: read 16 bit value from EEPROM
1216 1218 *
1217 1219 * Reads 16 bit value from EEPROM through bit-bang method
1218 1220 **/
1219 1221 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1220 1222 u16 *data)
1221 1223 {
1222 1224 s32 status;
1223 1225
1224 1226 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1225 1227
1226 1228 hw->eeprom.ops.init_params(hw);
1227 1229
1228 1230 if (offset >= hw->eeprom.word_size) {
1229 1231 status = IXGBE_ERR_EEPROM;
1230 1232 goto out;
1231 1233 }
1232 1234
1233 1235 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1234 1236
1235 1237 out:
1236 1238 return status;
1237 1239 }
1238 1240
1239 1241 /**
1240 1242 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1241 1243 * @hw: pointer to hardware structure
1242 1244 * @offset: offset of word in the EEPROM to read
1243 1245 * @words: number of word(s)
1244 1246 * @data: 16 bit word(s) from the EEPROM
1245 1247 *
1246 1248 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1247 1249 **/
1248 1250 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1249 1251 u16 words, u16 *data)
1250 1252 {
1251 1253 u32 eerd;
1252 1254 s32 status = IXGBE_SUCCESS;
1253 1255 u32 i;
1254 1256
1255 1257 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1256 1258
1257 1259 hw->eeprom.ops.init_params(hw);
1258 1260
1259 1261 if (words == 0) {
1260 1262 status = IXGBE_ERR_INVALID_ARGUMENT;
1261 1263 goto out;
1262 1264 }
1263 1265
1264 1266 if (offset >= hw->eeprom.word_size) {
1265 1267 status = IXGBE_ERR_EEPROM;
1266 1268 goto out;
1267 1269 }
1268 1270
1269 1271 for (i = 0; i < words; i++) {
1270 1272 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1271 1273 IXGBE_EEPROM_RW_REG_START;
1272 1274
1273 1275 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1274 1276 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1275 1277
1276 1278 if (status == IXGBE_SUCCESS) {
1277 1279 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1278 1280 IXGBE_EEPROM_RW_REG_DATA);
1279 1281 } else {
1280 1282 DEBUGOUT("Eeprom read timed out\n");
1281 1283 goto out;
1282 1284 }
1283 1285 }
1284 1286 out:
1285 1287 return status;
1286 1288 }
1287 1289
1288 1290 /**
1289 1291 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1290 1292 * @hw: pointer to hardware structure
1291 1293 * @offset: offset within the EEPROM to be used as a scratch pad
1292 1294 *
1293 1295 * Discover EEPROM page size by writing marching data at given offset.
1294 1296 * This function is called only when we are writing a new large buffer
1295 1297 * at given offset so the data would be overwritten anyway.
1296 1298 **/
1297 1299 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1298 1300 u16 offset)
1299 1301 {
1300 1302 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1301 1303 s32 status = IXGBE_SUCCESS;
1302 1304 u16 i;
1303 1305
1304 1306 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1305 1307
1306 1308 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1307 1309 data[i] = i;
1308 1310
1309 1311 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1310 1312 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1311 1313 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1312 1314 hw->eeprom.word_page_size = 0;
1313 1315 if (status != IXGBE_SUCCESS)
1314 1316 goto out;
1315 1317
1316 1318 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1317 1319 if (status != IXGBE_SUCCESS)
1318 1320 goto out;
1319 1321
1320 1322 /*
1321 1323 * When writing in burst more than the actual page size
1322 1324 * EEPROM address wraps around current page.
1323 1325 */
1324 1326 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1325 1327
1326 1328 DEBUGOUT1("Detected EEPROM page size = %d words.",
1327 1329 hw->eeprom.word_page_size);
1328 1330 out:
1329 1331 return status;
1330 1332 }
1331 1333
1332 1334 /**
1333 1335 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1334 1336 * @hw: pointer to hardware structure
1335 1337 * @offset: offset of word in the EEPROM to read
1336 1338 * @data: word read from the EEPROM
1337 1339 *
1338 1340 * Reads a 16 bit word from the EEPROM using the EERD register.
1339 1341 **/
1340 1342 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1341 1343 {
1342 1344 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1343 1345 }
1344 1346
1345 1347 /**
1346 1348 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1347 1349 * @hw: pointer to hardware structure
1348 1350 * @offset: offset of word in the EEPROM to write
1349 1351 * @words: number of word(s)
1350 1352 * @data: word(s) write to the EEPROM
1351 1353 *
1352 1354 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1353 1355 **/
1354 1356 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1355 1357 u16 words, u16 *data)
1356 1358 {
1357 1359 u32 eewr;
1358 1360 s32 status = IXGBE_SUCCESS;
1359 1361 u16 i;
1360 1362
1361 1363 DEBUGFUNC("ixgbe_write_eewr_generic");
1362 1364
1363 1365 hw->eeprom.ops.init_params(hw);
1364 1366
1365 1367 if (words == 0) {
1366 1368 status = IXGBE_ERR_INVALID_ARGUMENT;
1367 1369 goto out;
1368 1370 }
1369 1371
1370 1372 if (offset >= hw->eeprom.word_size) {
1371 1373 status = IXGBE_ERR_EEPROM;
1372 1374 goto out;
1373 1375 }
1374 1376
1375 1377 for (i = 0; i < words; i++) {
1376 1378 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1377 1379 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1378 1380 IXGBE_EEPROM_RW_REG_START;
1379 1381
1380 1382 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1381 1383 if (status != IXGBE_SUCCESS) {
1382 1384 DEBUGOUT("Eeprom write EEWR timed out\n");
1383 1385 goto out;
1384 1386 }
1385 1387
1386 1388 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1387 1389
1388 1390 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1389 1391 if (status != IXGBE_SUCCESS) {
1390 1392 DEBUGOUT("Eeprom write EEWR timed out\n");
1391 1393 goto out;
1392 1394 }
1393 1395 }
1394 1396
1395 1397 out:
1396 1398 return status;
1397 1399 }
1398 1400
1399 1401 /**
1400 1402 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1401 1403 * @hw: pointer to hardware structure
1402 1404 * @offset: offset of word in the EEPROM to write
1403 1405 * @data: word write to the EEPROM
1404 1406 *
1405 1407 * Write a 16 bit word to the EEPROM using the EEWR register.
1406 1408 **/
1407 1409 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1408 1410 {
1409 1411 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1410 1412 }
1411 1413
1412 1414 /**
1413 1415 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1414 1416 * @hw: pointer to hardware structure
1415 1417 * @ee_reg: EEPROM flag for polling
1416 1418 *
1417 1419 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1418 1420 * read or write is done respectively.
1419 1421 **/
1420 1422 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1421 1423 {
1422 1424 u32 i;
1423 1425 u32 reg;
1424 1426 s32 status = IXGBE_ERR_EEPROM;
1425 1427
1426 1428 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1427 1429
1428 1430 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1429 1431 if (ee_reg == IXGBE_NVM_POLL_READ)
1430 1432 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1431 1433 else
1432 1434 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1433 1435
1434 1436 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1435 1437 status = IXGBE_SUCCESS;
1436 1438 break;
1437 1439 }
1438 1440 usec_delay(5);
1439 1441 }
1440 1442 return status;
1441 1443 }
1442 1444
1443 1445 /**
1444 1446 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1445 1447 * @hw: pointer to hardware structure
1446 1448 *
1447 1449 * Prepares EEPROM for access using bit-bang method. This function should
1448 1450 * be called before issuing a command to the EEPROM.
1449 1451 **/
1450 1452 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1451 1453 {
1452 1454 s32 status = IXGBE_SUCCESS;
1453 1455 u32 eec;
1454 1456 u32 i;
1455 1457
1456 1458 DEBUGFUNC("ixgbe_acquire_eeprom");
1457 1459
1458 1460 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1459 1461 != IXGBE_SUCCESS)
1460 1462 status = IXGBE_ERR_SWFW_SYNC;
1461 1463
1462 1464 if (status == IXGBE_SUCCESS) {
1463 1465 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1464 1466
1465 1467 /* Request EEPROM Access */
1466 1468 eec |= IXGBE_EEC_REQ;
1467 1469 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1468 1470
1469 1471 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1470 1472 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1471 1473 if (eec & IXGBE_EEC_GNT)
1472 1474 break;
1473 1475 usec_delay(5);
1474 1476 }
1475 1477
1476 1478 /* Release if grant not acquired */
1477 1479 if (!(eec & IXGBE_EEC_GNT)) {
1478 1480 eec &= ~IXGBE_EEC_REQ;
1479 1481 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1480 1482 DEBUGOUT("Could not acquire EEPROM grant\n");
1481 1483
1482 1484 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1483 1485 status = IXGBE_ERR_EEPROM;
1484 1486 }
1485 1487
1486 1488 /* Setup EEPROM for Read/Write */
1487 1489 if (status == IXGBE_SUCCESS) {
1488 1490 /* Clear CS and SK */
1489 1491 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1490 1492 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1491 1493 IXGBE_WRITE_FLUSH(hw);
1492 1494 usec_delay(1);
1493 1495 }
1494 1496 }
1495 1497 return status;
1496 1498 }
1497 1499
1498 1500 /**
1499 1501 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1500 1502 * @hw: pointer to hardware structure
1501 1503 *
1502 1504 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1503 1505 **/
1504 1506 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1505 1507 {
1506 1508 s32 status = IXGBE_ERR_EEPROM;
1507 1509 u32 timeout = 2000;
1508 1510 u32 i;
1509 1511 u32 swsm;
1510 1512
1511 1513 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1512 1514
1513 1515
1514 1516 /* Get SMBI software semaphore between device drivers first */
1515 1517 for (i = 0; i < timeout; i++) {
1516 1518 /*
1517 1519 * If the SMBI bit is 0 when we read it, then the bit will be
1518 1520 * set and we have the semaphore
1519 1521 */
1520 1522 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1521 1523 if (!(swsm & IXGBE_SWSM_SMBI)) {
1522 1524 status = IXGBE_SUCCESS;
1523 1525 break;
1524 1526 }
1525 1527 usec_delay(50);
1526 1528 }
1527 1529
1528 1530 if (i == timeout) {
1529 1531 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1530 1532 "not granted.\n");
1531 1533 /*
1532 1534 * this release is particularly important because our attempts
1533 1535 * above to get the semaphore may have succeeded, and if there
1534 1536 * was a timeout, we should unconditionally clear the semaphore
1535 1537 * bits to free the driver to make progress
1536 1538 */
1537 1539 ixgbe_release_eeprom_semaphore(hw);
1538 1540
1539 1541 usec_delay(50);
1540 1542 /*
1541 1543 * one last try
1542 1544 * If the SMBI bit is 0 when we read it, then the bit will be
1543 1545 * set and we have the semaphore
1544 1546 */
1545 1547 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1546 1548 if (!(swsm & IXGBE_SWSM_SMBI))
1547 1549 status = IXGBE_SUCCESS;
1548 1550 }
1549 1551
1550 1552 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1551 1553 if (status == IXGBE_SUCCESS) {
1552 1554 for (i = 0; i < timeout; i++) {
1553 1555 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1554 1556
1555 1557 /* Set the SW EEPROM semaphore bit to request access */
1556 1558 swsm |= IXGBE_SWSM_SWESMBI;
1557 1559 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1558 1560
1559 1561 /*
1560 1562 * If we set the bit successfully then we got the
1561 1563 * semaphore.
1562 1564 */
1563 1565 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1564 1566 if (swsm & IXGBE_SWSM_SWESMBI)
1565 1567 break;
1566 1568
1567 1569 usec_delay(50);
1568 1570 }
1569 1571
1570 1572 /*
1571 1573 * Release semaphores and return error if SW EEPROM semaphore
1572 1574 * was not granted because we don't have access to the EEPROM
1573 1575 */
1574 1576 if (i >= timeout) {
1575 1577 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1576 1578 "not granted.\n");
1577 1579 ixgbe_release_eeprom_semaphore(hw);
1578 1580 status = IXGBE_ERR_EEPROM;
1579 1581 }
1580 1582 } else {
1581 1583 DEBUGOUT("Software semaphore SMBI between device drivers "
1582 1584 "not granted.\n");
1583 1585 }
1584 1586
1585 1587 return status;
1586 1588 }
1587 1589
1588 1590 /**
1589 1591 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1590 1592 * @hw: pointer to hardware structure
1591 1593 *
1592 1594 * This function clears hardware semaphore bits.
1593 1595 **/
1594 1596 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1595 1597 {
1596 1598 u32 swsm;
1597 1599
1598 1600 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1599 1601
1600 1602 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1601 1603
1602 1604 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1603 1605 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1604 1606 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1605 1607 IXGBE_WRITE_FLUSH(hw);
1606 1608 }
1607 1609
1608 1610 /**
1609 1611 * ixgbe_ready_eeprom - Polls for EEPROM ready
1610 1612 * @hw: pointer to hardware structure
1611 1613 **/
1612 1614 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1613 1615 {
1614 1616 s32 status = IXGBE_SUCCESS;
1615 1617 u16 i;
1616 1618 u8 spi_stat_reg;
1617 1619
1618 1620 DEBUGFUNC("ixgbe_ready_eeprom");
1619 1621
1620 1622 /*
1621 1623 * Read "Status Register" repeatedly until the LSB is cleared. The
1622 1624 * EEPROM will signal that the command has been completed by clearing
1623 1625 * bit 0 of the internal status register. If it's not cleared within
1624 1626 * 5 milliseconds, then error out.
1625 1627 */
1626 1628 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1627 1629 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1628 1630 IXGBE_EEPROM_OPCODE_BITS);
1629 1631 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1630 1632 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1631 1633 break;
1632 1634
1633 1635 usec_delay(5);
1634 1636 ixgbe_standby_eeprom(hw);
1635 1637 };
1636 1638
1637 1639 /*
1638 1640 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1639 1641 * devices (and only 0-5mSec on 5V devices)
1640 1642 */
1641 1643 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1642 1644 DEBUGOUT("SPI EEPROM Status error\n");
1643 1645 status = IXGBE_ERR_EEPROM;
1644 1646 }
1645 1647
1646 1648 return status;
1647 1649 }
1648 1650
1649 1651 /**
1650 1652 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1651 1653 * @hw: pointer to hardware structure
1652 1654 **/
1653 1655 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1654 1656 {
1655 1657 u32 eec;
1656 1658
1657 1659 DEBUGFUNC("ixgbe_standby_eeprom");
1658 1660
1659 1661 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1660 1662
1661 1663 /* Toggle CS to flush commands */
1662 1664 eec |= IXGBE_EEC_CS;
1663 1665 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1664 1666 IXGBE_WRITE_FLUSH(hw);
1665 1667 usec_delay(1);
1666 1668 eec &= ~IXGBE_EEC_CS;
1667 1669 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1668 1670 IXGBE_WRITE_FLUSH(hw);
1669 1671 usec_delay(1);
1670 1672 }
1671 1673
1672 1674 /**
1673 1675 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1674 1676 * @hw: pointer to hardware structure
1675 1677 * @data: data to send to the EEPROM
1676 1678 * @count: number of bits to shift out
1677 1679 **/
1678 1680 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1679 1681 u16 count)
1680 1682 {
1681 1683 u32 eec;
1682 1684 u32 mask;
1683 1685 u32 i;
1684 1686
1685 1687 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1686 1688
1687 1689 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1688 1690
1689 1691 /*
1690 1692 * Mask is used to shift "count" bits of "data" out to the EEPROM
1691 1693 * one bit at a time. Determine the starting bit based on count
1692 1694 */
1693 1695 mask = 0x01 << (count - 1);
1694 1696
1695 1697 for (i = 0; i < count; i++) {
1696 1698 /*
1697 1699 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1698 1700 * "1", and then raising and then lowering the clock (the SK
1699 1701 * bit controls the clock input to the EEPROM). A "0" is
1700 1702 * shifted out to the EEPROM by setting "DI" to "0" and then
1701 1703 * raising and then lowering the clock.
1702 1704 */
1703 1705 if (data & mask)
1704 1706 eec |= IXGBE_EEC_DI;
1705 1707 else
1706 1708 eec &= ~IXGBE_EEC_DI;
1707 1709
1708 1710 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1709 1711 IXGBE_WRITE_FLUSH(hw);
1710 1712
1711 1713 usec_delay(1);
1712 1714
1713 1715 ixgbe_raise_eeprom_clk(hw, &eec);
1714 1716 ixgbe_lower_eeprom_clk(hw, &eec);
1715 1717
1716 1718 /*
1717 1719 * Shift mask to signify next bit of data to shift in to the
1718 1720 * EEPROM
1719 1721 */
1720 1722 mask = mask >> 1;
1721 1723 };
1722 1724
1723 1725 /* We leave the "DI" bit set to "0" when we leave this routine. */
1724 1726 eec &= ~IXGBE_EEC_DI;
1725 1727 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1726 1728 IXGBE_WRITE_FLUSH(hw);
1727 1729 }
1728 1730
1729 1731 /**
1730 1732 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1731 1733 * @hw: pointer to hardware structure
1732 1734 **/
1733 1735 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1734 1736 {
1735 1737 u32 eec;
1736 1738 u32 i;
1737 1739 u16 data = 0;
1738 1740
1739 1741 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1740 1742
1741 1743 /*
1742 1744 * In order to read a register from the EEPROM, we need to shift
1743 1745 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1744 1746 * the clock input to the EEPROM (setting the SK bit), and then reading
1745 1747 * the value of the "DO" bit. During this "shifting in" process the
1746 1748 * "DI" bit should always be clear.
1747 1749 */
1748 1750 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1749 1751
1750 1752 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1751 1753
1752 1754 for (i = 0; i < count; i++) {
1753 1755 data = data << 1;
1754 1756 ixgbe_raise_eeprom_clk(hw, &eec);
1755 1757
1756 1758 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1757 1759
1758 1760 eec &= ~(IXGBE_EEC_DI);
1759 1761 if (eec & IXGBE_EEC_DO)
1760 1762 data |= 1;
1761 1763
1762 1764 ixgbe_lower_eeprom_clk(hw, &eec);
1763 1765 }
1764 1766
1765 1767 return data;
1766 1768 }
1767 1769
1768 1770 /**
1769 1771 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1770 1772 * @hw: pointer to hardware structure
1771 1773 * @eec: EEC register's current value
1772 1774 **/
1773 1775 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1774 1776 {
1775 1777 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1776 1778
1777 1779 /*
1778 1780 * Raise the clock input to the EEPROM
1779 1781 * (setting the SK bit), then delay
1780 1782 */
1781 1783 *eec = *eec | IXGBE_EEC_SK;
1782 1784 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1783 1785 IXGBE_WRITE_FLUSH(hw);
1784 1786 usec_delay(1);
1785 1787 }
1786 1788
1787 1789 /**
1788 1790 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1789 1791 * @hw: pointer to hardware structure
1790 1792 * @eecd: EECD's current value
1791 1793 **/
1792 1794 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1793 1795 {
1794 1796 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1795 1797
1796 1798 /*
1797 1799 * Lower the clock input to the EEPROM (clearing the SK bit), then
1798 1800 * delay
1799 1801 */
1800 1802 *eec = *eec & ~IXGBE_EEC_SK;
1801 1803 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1802 1804 IXGBE_WRITE_FLUSH(hw);
1803 1805 usec_delay(1);
1804 1806 }
1805 1807
1806 1808 /**
1807 1809 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1808 1810 * @hw: pointer to hardware structure
1809 1811 **/
1810 1812 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1811 1813 {
1812 1814 u32 eec;
1813 1815
1814 1816 DEBUGFUNC("ixgbe_release_eeprom");
1815 1817
1816 1818 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1817 1819
1818 1820 eec |= IXGBE_EEC_CS; /* Pull CS high */
1819 1821 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1820 1822
1821 1823 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1822 1824 IXGBE_WRITE_FLUSH(hw);
1823 1825
1824 1826 usec_delay(1);
1825 1827
1826 1828 /* Stop requesting EEPROM access */
1827 1829 eec &= ~IXGBE_EEC_REQ;
1828 1830 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1829 1831
1830 1832 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1831 1833
1832 1834 /* Delay before attempt to obtain semaphore again to allow FW access */
1833 1835 msec_delay(hw->eeprom.semaphore_delay);
1834 1836 }
1835 1837
1836 1838 /**
1837 1839 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1838 1840 * @hw: pointer to hardware structure
1839 1841 **/
1840 1842 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1841 1843 {
1842 1844 u16 i;
1843 1845 u16 j;
1844 1846 u16 checksum = 0;
1845 1847 u16 length = 0;
1846 1848 u16 pointer = 0;
1847 1849 u16 word = 0;
1848 1850
1849 1851 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1850 1852
1851 1853 /* Include 0x0-0x3F in the checksum */
1852 1854 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1853 1855 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1854 1856 DEBUGOUT("EEPROM read failed\n");
1855 1857 break;
1856 1858 }
1857 1859 checksum += word;
1858 1860 }
1859 1861
1860 1862 /* Include all data from pointers except for the fw pointer */
1861 1863 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1862 1864 hw->eeprom.ops.read(hw, i, &pointer);
1863 1865
1864 1866 /* Make sure the pointer seems valid */
1865 1867 if (pointer != 0xFFFF && pointer != 0) {
1866 1868 hw->eeprom.ops.read(hw, pointer, &length);
1867 1869
1868 1870 if (length != 0xFFFF && length != 0) {
1869 1871 for (j = pointer+1; j <= pointer+length; j++) {
1870 1872 hw->eeprom.ops.read(hw, j, &word);
1871 1873 checksum += word;
1872 1874 }
1873 1875 }
1874 1876 }
1875 1877 }
1876 1878
1877 1879 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1878 1880
1879 1881 return checksum;
1880 1882 }
1881 1883
1882 1884 /**
1883 1885 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1884 1886 * @hw: pointer to hardware structure
1885 1887 * @checksum_val: calculated checksum
1886 1888 *
1887 1889 * Performs checksum calculation and validates the EEPROM checksum. If the
1888 1890 * caller does not need checksum_val, the value can be NULL.
1889 1891 **/
1890 1892 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1891 1893 u16 *checksum_val)
1892 1894 {
1893 1895 s32 status;
1894 1896 u16 checksum;
1895 1897 u16 read_checksum = 0;
1896 1898
1897 1899 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1898 1900
1899 1901 /*
1900 1902 * Read the first word from the EEPROM. If this times out or fails, do
1901 1903 * not continue or we could be in for a very long wait while every
1902 1904 * EEPROM read fails
1903 1905 */
1904 1906 status = hw->eeprom.ops.read(hw, 0, &checksum);
1905 1907
1906 1908 if (status == IXGBE_SUCCESS) {
1907 1909 checksum = hw->eeprom.ops.calc_checksum(hw);
1908 1910
1909 1911 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1910 1912
1911 1913 /*
1912 1914 * Verify read checksum from EEPROM is the same as
1913 1915 * calculated checksum
1914 1916 */
1915 1917 if (read_checksum != checksum)
1916 1918 status = IXGBE_ERR_EEPROM_CHECKSUM;
1917 1919
1918 1920 /* If the user cares, return the calculated checksum */
1919 1921 if (checksum_val)
1920 1922 *checksum_val = checksum;
1921 1923 } else {
1922 1924 DEBUGOUT("EEPROM read failed\n");
1923 1925 }
1924 1926
1925 1927 return status;
1926 1928 }
1927 1929
1928 1930 /**
1929 1931 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1930 1932 * @hw: pointer to hardware structure
1931 1933 **/
1932 1934 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1933 1935 {
1934 1936 s32 status;
1935 1937 u16 checksum;
1936 1938
1937 1939 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1938 1940
1939 1941 /*
1940 1942 * Read the first word from the EEPROM. If this times out or fails, do
1941 1943 * not continue or we could be in for a very long wait while every
1942 1944 * EEPROM read fails
1943 1945 */
1944 1946 status = hw->eeprom.ops.read(hw, 0, &checksum);
1945 1947
1946 1948 if (status == IXGBE_SUCCESS) {
1947 1949 checksum = hw->eeprom.ops.calc_checksum(hw);
1948 1950 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1949 1951 checksum);
1950 1952 } else {
1951 1953 DEBUGOUT("EEPROM read failed\n");
1952 1954 }
1953 1955
1954 1956 return status;
1955 1957 }
1956 1958
1957 1959 /**
1958 1960 * ixgbe_validate_mac_addr - Validate MAC address
1959 1961 * @mac_addr: pointer to MAC address.
1960 1962 *
1961 1963 * Tests a MAC address to ensure it is a valid Individual Address
1962 1964 **/
1963 1965 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1964 1966 {
1965 1967 s32 status = IXGBE_SUCCESS;
1966 1968
1967 1969 DEBUGFUNC("ixgbe_validate_mac_addr");
1968 1970
1969 1971 /* Make sure it is not a multicast address */
1970 1972 if (IXGBE_IS_MULTICAST(mac_addr)) {
1971 1973 DEBUGOUT("MAC address is multicast\n");
1972 1974 status = IXGBE_ERR_INVALID_MAC_ADDR;
1973 1975 /* Not a broadcast address */
1974 1976 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1975 1977 DEBUGOUT("MAC address is broadcast\n");
1976 1978 status = IXGBE_ERR_INVALID_MAC_ADDR;
1977 1979 /* Reject the zero address */
1978 1980 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1979 1981 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1980 1982 DEBUGOUT("MAC address is all zeros\n");
1981 1983 status = IXGBE_ERR_INVALID_MAC_ADDR;
1982 1984 }
1983 1985 return status;
1984 1986 }
1985 1987
1986 1988 /**
1987 1989 * ixgbe_set_rar_generic - Set Rx address register
1988 1990 * @hw: pointer to hardware structure
1989 1991 * @index: Receive address register to write
1990 1992 * @addr: Address to put into receive address register
1991 1993 * @vmdq: VMDq "set" or "pool" index
1992 1994 * @enable_addr: set flag that address is active
1993 1995 *
1994 1996 * Puts an ethernet address into a receive address register.
1995 1997 **/
1996 1998 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1997 1999 u32 enable_addr)
1998 2000 {
1999 2001 u32 rar_low, rar_high;
2000 2002 u32 rar_entries = hw->mac.num_rar_entries;
2001 2003
2002 2004 DEBUGFUNC("ixgbe_set_rar_generic");
2003 2005
2004 2006 /* Make sure we are using a valid rar index range */
2005 2007 if (index >= rar_entries) {
2006 2008 DEBUGOUT1("RAR index %d is out of range.\n", index);
2007 2009 return IXGBE_ERR_INVALID_ARGUMENT;
2008 2010 }
2009 2011
2010 2012 /* setup VMDq pool selection before this RAR gets enabled */
2011 2013 hw->mac.ops.set_vmdq(hw, index, vmdq);
2012 2014
2013 2015 /*
2014 2016 * HW expects these in little endian so we reverse the byte
2015 2017 * order from network order (big endian) to little endian
2016 2018 */
2017 2019 rar_low = ((u32)addr[0] |
2018 2020 ((u32)addr[1] << 8) |
2019 2021 ((u32)addr[2] << 16) |
2020 2022 ((u32)addr[3] << 24));
2021 2023 /*
2022 2024 * Some parts put the VMDq setting in the extra RAH bits,
2023 2025 * so save everything except the lower 16 bits that hold part
2024 2026 * of the address and the address valid bit.
2025 2027 */
2026 2028 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2027 2029 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2028 2030 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2029 2031
2030 2032 if (enable_addr != 0)
2031 2033 rar_high |= IXGBE_RAH_AV;
2032 2034
2033 2035 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2034 2036 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2035 2037
2036 2038 return IXGBE_SUCCESS;
2037 2039 }
2038 2040
2039 2041 /**
2040 2042 * ixgbe_clear_rar_generic - Remove Rx address register
2041 2043 * @hw: pointer to hardware structure
2042 2044 * @index: Receive address register to write
2043 2045 *
2044 2046 * Clears an ethernet address from a receive address register.
2045 2047 **/
2046 2048 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2047 2049 {
2048 2050 u32 rar_high;
2049 2051 u32 rar_entries = hw->mac.num_rar_entries;
2050 2052
2051 2053 DEBUGFUNC("ixgbe_clear_rar_generic");
2052 2054
2053 2055 /* Make sure we are using a valid rar index range */
2054 2056 if (index >= rar_entries) {
2055 2057 DEBUGOUT1("RAR index %d is out of range.\n", index);
2056 2058 return IXGBE_ERR_INVALID_ARGUMENT;
2057 2059 }
2058 2060
2059 2061 /*
2060 2062 * Some parts put the VMDq setting in the extra RAH bits,
2061 2063 * so save everything except the lower 16 bits that hold part
2062 2064 * of the address and the address valid bit.
2063 2065 */
2064 2066 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2065 2067 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2066 2068
2067 2069 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2068 2070 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2069 2071
2070 2072 /* clear VMDq pool/queue selection for this RAR */
2071 2073 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2072 2074
2073 2075 return IXGBE_SUCCESS;
2074 2076 }
2075 2077
2076 2078 /**
2077 2079 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2078 2080 * @hw: pointer to hardware structure
2079 2081 *
2080 2082 * Places the MAC address in receive address register 0 and clears the rest
2081 2083 * of the receive address registers. Clears the multicast table. Assumes
2082 2084 * the receiver is in reset when the routine is called.
2083 2085 **/
2084 2086 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2085 2087 {
2086 2088 u32 i;
2087 2089 u32 rar_entries = hw->mac.num_rar_entries;
2088 2090
2089 2091 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2090 2092
2091 2093 /*
2092 2094 * If the current mac address is valid, assume it is a software override
2093 2095 * to the permanent address.
2094 2096 * Otherwise, use the permanent address from the eeprom.
2095 2097 */
2096 2098 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2097 2099 IXGBE_ERR_INVALID_MAC_ADDR) {
2098 2100 /* Get the MAC address from the RAR0 for later reference */
2099 2101 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2100 2102
2101 2103 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2102 2104 hw->mac.addr[0], hw->mac.addr[1],
2103 2105 hw->mac.addr[2]);
2104 2106 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2105 2107 hw->mac.addr[4], hw->mac.addr[5]);
2106 2108 } else {
2107 2109 /* Setup the receive address. */
2108 2110 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2109 2111 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2110 2112 hw->mac.addr[0], hw->mac.addr[1],
2111 2113 hw->mac.addr[2]);
2112 2114 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2113 2115 hw->mac.addr[4], hw->mac.addr[5]);
2114 2116
2115 2117 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2116 2118
2117 2119 /* clear VMDq pool/queue selection for RAR 0 */
2118 2120 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2119 2121 }
2120 2122 hw->addr_ctrl.overflow_promisc = 0;
2121 2123
2122 2124 hw->addr_ctrl.rar_used_count = 1;
2123 2125
2124 2126 /* Zero out the other receive addresses. */
2125 2127 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2126 2128 for (i = 1; i < rar_entries; i++) {
2127 2129 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2128 2130 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
|
↓ open down ↓ |
1146 lines elided |
↑ open up ↑ |
2129 2131 }
2130 2132
2131 2133 /* Clear the MTA */
2132 2134 hw->addr_ctrl.mta_in_use = 0;
2133 2135 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2134 2136
2135 2137 DEBUGOUT(" Clearing MTA\n");
2136 2138 for (i = 0; i < hw->mac.mcft_size; i++)
2137 2139 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2138 2140
2139 - ixgbe_init_uta_tables(hw);
2140 -
2141 - return IXGBE_SUCCESS;
2141 + /* Should always be IXGBE_SUCCESS. */
2142 + return ixgbe_init_uta_tables(hw);
2142 2143 }
2143 2144
2144 2145 /**
2145 2146 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2146 2147 * @hw: pointer to hardware structure
2147 2148 * @addr: new address
2148 2149 *
2149 2150 * Adds it to unused receive address register or goes into promiscuous mode.
2150 2151 **/
2151 2152 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2152 2153 {
2153 2154 u32 rar_entries = hw->mac.num_rar_entries;
2154 2155 u32 rar;
2155 2156
2156 2157 DEBUGFUNC("ixgbe_add_uc_addr");
2157 2158
2158 2159 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2159 2160 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2160 2161
2161 2162 /*
2162 2163 * Place this address in the RAR if there is room,
2163 2164 * else put the controller into promiscuous mode
2164 2165 */
2165 2166 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2166 2167 rar = hw->addr_ctrl.rar_used_count;
2167 2168 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2168 2169 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2169 2170 hw->addr_ctrl.rar_used_count++;
2170 2171 } else {
2171 2172 hw->addr_ctrl.overflow_promisc++;
2172 2173 }
2173 2174
2174 2175 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2175 2176 }
2176 2177
2177 2178 /**
2178 2179 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2179 2180 * @hw: pointer to hardware structure
2180 2181 * @addr_list: the list of new addresses
2181 2182 * @addr_count: number of addresses
2182 2183 * @next: iterator function to walk the address list
2183 2184 *
2184 2185 * The given list replaces any existing list. Clears the secondary addrs from
2185 2186 * receive address registers. Uses unused receive address registers for the
2186 2187 * first secondary addresses, and falls back to promiscuous mode as needed.
2187 2188 *
2188 2189 * Drivers using secondary unicast addresses must set user_set_promisc when
2189 2190 * manually putting the device into promiscuous mode.
2190 2191 **/
2191 2192 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2192 2193 u32 addr_count, ixgbe_mc_addr_itr next)
2193 2194 {
2194 2195 u8 *addr;
2195 2196 u32 i;
2196 2197 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2197 2198 u32 uc_addr_in_use;
2198 2199 u32 fctrl;
2199 2200 u32 vmdq;
2200 2201
2201 2202 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2202 2203
2203 2204 /*
2204 2205 * Clear accounting of old secondary address list,
2205 2206 * don't count RAR[0]
2206 2207 */
2207 2208 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2208 2209 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2209 2210 hw->addr_ctrl.overflow_promisc = 0;
2210 2211
2211 2212 /* Zero out the other receive addresses */
2212 2213 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2213 2214 for (i = 0; i < uc_addr_in_use; i++) {
2214 2215 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2215 2216 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2216 2217 }
2217 2218
2218 2219 /* Add the new addresses */
2219 2220 for (i = 0; i < addr_count; i++) {
2220 2221 DEBUGOUT(" Adding the secondary addresses:\n");
2221 2222 addr = next(hw, &addr_list, &vmdq);
2222 2223 ixgbe_add_uc_addr(hw, addr, vmdq);
2223 2224 }
2224 2225
2225 2226 if (hw->addr_ctrl.overflow_promisc) {
2226 2227 /* enable promisc if not already in overflow or set by user */
2227 2228 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2228 2229 DEBUGOUT(" Entering address overflow promisc mode\n");
2229 2230 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2230 2231 fctrl |= IXGBE_FCTRL_UPE;
2231 2232 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2232 2233 }
2233 2234 } else {
2234 2235 /* only disable if set by overflow, not by user */
2235 2236 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2236 2237 DEBUGOUT(" Leaving address overflow promisc mode\n");
2237 2238 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2238 2239 fctrl &= ~IXGBE_FCTRL_UPE;
2239 2240 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2240 2241 }
2241 2242 }
2242 2243
2243 2244 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2244 2245 return IXGBE_SUCCESS;
2245 2246 }
2246 2247
2247 2248 /**
2248 2249 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2249 2250 * @hw: pointer to hardware structure
2250 2251 * @mc_addr: the multicast address
2251 2252 *
2252 2253 * Extracts the 12 bits, from a multicast address, to determine which
2253 2254 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2254 2255 * incoming rx multicast addresses, to determine the bit-vector to check in
2255 2256 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2256 2257 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2257 2258 * to mc_filter_type.
2258 2259 **/
2259 2260 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2260 2261 {
2261 2262 u32 vector = 0;
2262 2263
2263 2264 DEBUGFUNC("ixgbe_mta_vector");
2264 2265
2265 2266 switch (hw->mac.mc_filter_type) {
2266 2267 case 0: /* use bits [47:36] of the address */
2267 2268 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2268 2269 break;
2269 2270 case 1: /* use bits [46:35] of the address */
2270 2271 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2271 2272 break;
2272 2273 case 2: /* use bits [45:34] of the address */
2273 2274 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2274 2275 break;
2275 2276 case 3: /* use bits [43:32] of the address */
2276 2277 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2277 2278 break;
2278 2279 default: /* Invalid mc_filter_type */
2279 2280 DEBUGOUT("MC filter type param set incorrectly\n");
2280 2281 ASSERT(0);
2281 2282 break;
2282 2283 }
2283 2284
2284 2285 /* vector can only be 12-bits or boundary will be exceeded */
2285 2286 vector &= 0xFFF;
2286 2287 return vector;
2287 2288 }
2288 2289
2289 2290 /**
2290 2291 * ixgbe_set_mta - Set bit-vector in multicast table
2291 2292 * @hw: pointer to hardware structure
2292 2293 * @hash_value: Multicast address hash value
2293 2294 *
2294 2295 * Sets the bit-vector in the multicast table.
2295 2296 **/
2296 2297 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2297 2298 {
2298 2299 u32 vector;
2299 2300 u32 vector_bit;
2300 2301 u32 vector_reg;
2301 2302
2302 2303 DEBUGFUNC("ixgbe_set_mta");
2303 2304
2304 2305 hw->addr_ctrl.mta_in_use++;
2305 2306
2306 2307 vector = ixgbe_mta_vector(hw, mc_addr);
2307 2308 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2308 2309
2309 2310 /*
2310 2311 * The MTA is a register array of 128 32-bit registers. It is treated
2311 2312 * like an array of 4096 bits. We want to set bit
2312 2313 * BitArray[vector_value]. So we figure out what register the bit is
2313 2314 * in, read it, OR in the new bit, then write back the new value. The
2314 2315 * register is determined by the upper 7 bits of the vector value and
2315 2316 * the bit within that register are determined by the lower 5 bits of
2316 2317 * the value.
2317 2318 */
2318 2319 vector_reg = (vector >> 5) & 0x7F;
2319 2320 vector_bit = vector & 0x1F;
2320 2321 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2321 2322 }
2322 2323
2323 2324 /**
2324 2325 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2325 2326 * @hw: pointer to hardware structure
2326 2327 * @mc_addr_list: the list of new multicast addresses
2327 2328 * @mc_addr_count: number of addresses
2328 2329 * @next: iterator function to walk the multicast address list
2329 2330 * @clear: flag, when set clears the table beforehand
2330 2331 *
2331 2332 * When the clear flag is set, the given list replaces any existing list.
2332 2333 * Hashes the given addresses into the multicast table.
2333 2334 **/
2334 2335 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2335 2336 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2336 2337 bool clear)
2337 2338 {
2338 2339 u32 i;
2339 2340 u32 vmdq;
2340 2341
2341 2342 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2342 2343
|
↓ open down ↓ |
191 lines elided |
↑ open up ↑ |
2343 2344 /*
2344 2345 * Set the new number of MC addresses that we are being requested to
2345 2346 * use.
2346 2347 */
2347 2348 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2348 2349 hw->addr_ctrl.mta_in_use = 0;
2349 2350
2350 2351 /* Clear mta_shadow */
2351 2352 if (clear) {
2352 2353 DEBUGOUT(" Clearing MTA\n");
2353 - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2354 + (void) memset(&hw->mac.mta_shadow, 0,
2355 + sizeof(hw->mac.mta_shadow));
2354 2356 }
2355 2357
2356 2358 /* Update mta_shadow */
2357 2359 for (i = 0; i < mc_addr_count; i++) {
2358 2360 DEBUGOUT(" Adding the multicast addresses:\n");
2359 2361 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2360 2362 }
2361 2363
2362 2364 /* Enable mta */
2363 2365 for (i = 0; i < hw->mac.mcft_size; i++)
2364 2366 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2365 2367 hw->mac.mta_shadow[i]);
2366 2368
2367 2369 if (hw->addr_ctrl.mta_in_use > 0)
2368 2370 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2369 2371 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2370 2372
2371 2373 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2372 2374 return IXGBE_SUCCESS;
2373 2375 }
2374 2376
2375 2377 /**
2376 2378 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2377 2379 * @hw: pointer to hardware structure
2378 2380 *
2379 2381 * Enables multicast address in RAR and the use of the multicast hash table.
2380 2382 **/
2381 2383 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2382 2384 {
2383 2385 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2384 2386
2385 2387 DEBUGFUNC("ixgbe_enable_mc_generic");
2386 2388
2387 2389 if (a->mta_in_use > 0)
2388 2390 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2389 2391 hw->mac.mc_filter_type);
2390 2392
2391 2393 return IXGBE_SUCCESS;
2392 2394 }
2393 2395
2394 2396 /**
2395 2397 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2396 2398 * @hw: pointer to hardware structure
2397 2399 *
2398 2400 * Disables multicast address in RAR and the use of the multicast hash table.
2399 2401 **/
2400 2402 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2401 2403 {
2402 2404 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2403 2405
2404 2406 DEBUGFUNC("ixgbe_disable_mc_generic");
2405 2407
2406 2408 if (a->mta_in_use > 0)
2407 2409 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2408 2410
2409 2411 return IXGBE_SUCCESS;
2410 2412 }
2411 2413
2412 2414 /**
2413 2415 * ixgbe_fc_enable_generic - Enable flow control
2414 2416 * @hw: pointer to hardware structure
2415 2417 *
2416 2418 * Enable flow control according to the current settings.
2417 2419 **/
2418 2420 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2419 2421 {
2420 2422 s32 ret_val = IXGBE_SUCCESS;
2421 2423 u32 mflcn_reg, fccfg_reg;
2422 2424 u32 reg;
2423 2425 u32 fcrtl, fcrth;
2424 2426 int i;
2425 2427
2426 2428 DEBUGFUNC("ixgbe_fc_enable_generic");
2427 2429
2428 2430 /* Validate the water mark configuration */
2429 2431 if (!hw->fc.pause_time) {
2430 2432 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2431 2433 goto out;
2432 2434 }
2433 2435
2434 2436 /* Low water mark of zero causes XOFF floods */
2435 2437 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2436 2438 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2437 2439 hw->fc.high_water[i]) {
2438 2440 if (!hw->fc.low_water[i] ||
2439 2441 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2440 2442 DEBUGOUT("Invalid water mark configuration\n");
2441 2443 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2442 2444 goto out;
2443 2445 }
2444 2446 }
2445 2447 }
2446 2448
2447 2449 /* Negotiate the fc mode to use */
2448 2450 ixgbe_fc_autoneg(hw);
2449 2451
2450 2452 /* Disable any previous flow control settings */
2451 2453 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2452 2454 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2453 2455
2454 2456 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2455 2457 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2456 2458
2457 2459 /*
2458 2460 * The possible values of fc.current_mode are:
2459 2461 * 0: Flow control is completely disabled
2460 2462 * 1: Rx flow control is enabled (we can receive pause frames,
2461 2463 * but not send pause frames).
2462 2464 * 2: Tx flow control is enabled (we can send pause frames but
2463 2465 * we do not support receiving pause frames).
2464 2466 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2465 2467 * other: Invalid.
2466 2468 */
2467 2469 switch (hw->fc.current_mode) {
2468 2470 case ixgbe_fc_none:
2469 2471 /*
2470 2472 * Flow control is disabled by software override or autoneg.
2471 2473 * The code below will actually disable it in the HW.
2472 2474 */
2473 2475 break;
2474 2476 case ixgbe_fc_rx_pause:
2475 2477 /*
2476 2478 * Rx Flow control is enabled and Tx Flow control is
2477 2479 * disabled by software override. Since there really
2478 2480 * isn't a way to advertise that we are capable of RX
2479 2481 * Pause ONLY, we will advertise that we support both
2480 2482 * symmetric and asymmetric Rx PAUSE. Later, we will
2481 2483 * disable the adapter's ability to send PAUSE frames.
2482 2484 */
2483 2485 mflcn_reg |= IXGBE_MFLCN_RFCE;
2484 2486 break;
2485 2487 case ixgbe_fc_tx_pause:
2486 2488 /*
2487 2489 * Tx Flow control is enabled, and Rx Flow control is
2488 2490 * disabled by software override.
2489 2491 */
2490 2492 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2491 2493 break;
2492 2494 case ixgbe_fc_full:
2493 2495 /* Flow control (both Rx and Tx) is enabled by SW override. */
2494 2496 mflcn_reg |= IXGBE_MFLCN_RFCE;
2495 2497 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2496 2498 break;
2497 2499 default:
2498 2500 DEBUGOUT("Flow control param set incorrectly\n");
2499 2501 ret_val = IXGBE_ERR_CONFIG;
2500 2502 goto out;
2501 2503 }
2502 2504
2503 2505 /* Set 802.3x based flow control settings. */
2504 2506 mflcn_reg |= IXGBE_MFLCN_DPF;
2505 2507 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2506 2508 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2507 2509
2508 2510
2509 2511 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2510 2512 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2511 2513 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2512 2514 hw->fc.high_water[i]) {
2513 2515 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2514 2516 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2515 2517 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2516 2518 } else {
2517 2519 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2518 2520 /*
2519 2521 * In order to prevent Tx hangs when the internal Tx
2520 2522 * switch is enabled we must set the high water mark
2521 2523 * to the maximum FCRTH value. This allows the Tx
2522 2524 * switch to function even under heavy Rx workloads.
2523 2525 */
2524 2526 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2525 2527 }
2526 2528
2527 2529 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2528 2530 }
2529 2531
2530 2532 /* Configure pause time (2 TCs per register) */
2531 2533 reg = hw->fc.pause_time * 0x00010001;
2532 2534 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2533 2535 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2534 2536
2535 2537 /* Configure flow control refresh threshold value */
2536 2538 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2537 2539
2538 2540 out:
2539 2541 return ret_val;
2540 2542 }
2541 2543
2542 2544 /**
2543 2545 * ixgbe_negotiate_fc - Negotiate flow control
2544 2546 * @hw: pointer to hardware structure
2545 2547 * @adv_reg: flow control advertised settings
2546 2548 * @lp_reg: link partner's flow control settings
2547 2549 * @adv_sym: symmetric pause bit in advertisement
2548 2550 * @adv_asm: asymmetric pause bit in advertisement
2549 2551 * @lp_sym: symmetric pause bit in link partner advertisement
2550 2552 * @lp_asm: asymmetric pause bit in link partner advertisement
2551 2553 *
2552 2554 * Find the intersection between advertised settings and link partner's
2553 2555 * advertised settings
2554 2556 **/
2555 2557 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2556 2558 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2557 2559 {
2558 2560 if ((!(adv_reg)) || (!(lp_reg)))
2559 2561 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2560 2562
2561 2563 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2562 2564 /*
2563 2565 * Now we need to check if the user selected Rx ONLY
2564 2566 * of pause frames. In this case, we had to advertise
2565 2567 * FULL flow control because we could not advertise RX
2566 2568 * ONLY. Hence, we must now check to see if we need to
2567 2569 * turn OFF the TRANSMISSION of PAUSE frames.
2568 2570 */
2569 2571 if (hw->fc.requested_mode == ixgbe_fc_full) {
2570 2572 hw->fc.current_mode = ixgbe_fc_full;
2571 2573 DEBUGOUT("Flow Control = FULL.\n");
2572 2574 } else {
2573 2575 hw->fc.current_mode = ixgbe_fc_rx_pause;
2574 2576 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2575 2577 }
2576 2578 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2577 2579 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2578 2580 hw->fc.current_mode = ixgbe_fc_tx_pause;
2579 2581 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2580 2582 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2581 2583 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2582 2584 hw->fc.current_mode = ixgbe_fc_rx_pause;
2583 2585 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2584 2586 } else {
2585 2587 hw->fc.current_mode = ixgbe_fc_none;
2586 2588 DEBUGOUT("Flow Control = NONE.\n");
2587 2589 }
2588 2590 return IXGBE_SUCCESS;
2589 2591 }
2590 2592
2591 2593 /**
2592 2594 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2593 2595 * @hw: pointer to hardware structure
2594 2596 *
2595 2597 * Enable flow control according on 1 gig fiber.
2596 2598 **/
2597 2599 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2598 2600 {
2599 2601 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2600 2602 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2601 2603
2602 2604 /*
2603 2605 * On multispeed fiber at 1g, bail out if
2604 2606 * - link is up but AN did not complete, or if
2605 2607 * - link is up and AN completed but timed out
2606 2608 */
2607 2609
2608 2610 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2609 2611 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2610 2612 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2611 2613 goto out;
2612 2614
2613 2615 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2614 2616 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2615 2617
2616 2618 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2617 2619 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2618 2620 IXGBE_PCS1GANA_ASM_PAUSE,
2619 2621 IXGBE_PCS1GANA_SYM_PAUSE,
2620 2622 IXGBE_PCS1GANA_ASM_PAUSE);
2621 2623
2622 2624 out:
2623 2625 return ret_val;
2624 2626 }
2625 2627
2626 2628 /**
2627 2629 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2628 2630 * @hw: pointer to hardware structure
2629 2631 *
2630 2632 * Enable flow control according to IEEE clause 37.
2631 2633 **/
2632 2634 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2633 2635 {
2634 2636 u32 links2, anlp1_reg, autoc_reg, links;
2635 2637 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2636 2638
2637 2639 /*
2638 2640 * On backplane, bail out if
2639 2641 * - backplane autoneg was not completed, or if
2640 2642 * - we are 82599 and link partner is not AN enabled
2641 2643 */
2642 2644 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2643 2645 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2644 2646 goto out;
2645 2647
2646 2648 if (hw->mac.type == ixgbe_mac_82599EB) {
2647 2649 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2648 2650 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2649 2651 goto out;
2650 2652 }
2651 2653 /*
2652 2654 * Read the 10g AN autoc and LP ability registers and resolve
2653 2655 * local flow control settings accordingly
2654 2656 */
2655 2657 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2656 2658 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2657 2659
2658 2660 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2659 2661 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2660 2662 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2661 2663
2662 2664 out:
2663 2665 return ret_val;
2664 2666 }
2665 2667
2666 2668 /**
2667 2669 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2668 2670 * @hw: pointer to hardware structure
2669 2671 *
2670 2672 * Enable flow control according to IEEE clause 37.
2671 2673 **/
2672 2674 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2673 2675 {
2674 2676 u16 technology_ability_reg = 0;
2675 2677 u16 lp_technology_ability_reg = 0;
2676 2678
2677 2679 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2678 2680 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2679 2681 &technology_ability_reg);
2680 2682 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2681 2683 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2682 2684 &lp_technology_ability_reg);
2683 2685
2684 2686 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2685 2687 (u32)lp_technology_ability_reg,
2686 2688 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2687 2689 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2688 2690 }
2689 2691
2690 2692 /**
2691 2693 * ixgbe_fc_autoneg - Configure flow control
2692 2694 * @hw: pointer to hardware structure
2693 2695 *
2694 2696 * Compares our advertised flow control capabilities to those advertised by
2695 2697 * our link partner, and determines the proper flow control mode to use.
2696 2698 **/
2697 2699 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2698 2700 {
2699 2701 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2700 2702 ixgbe_link_speed speed;
2701 2703 bool link_up;
2702 2704
2703 2705 DEBUGFUNC("ixgbe_fc_autoneg");
2704 2706
2705 2707 /*
2706 2708 * AN should have completed when the cable was plugged in.
2707 2709 * Look for reasons to bail out. Bail out if:
2708 2710 * - FC autoneg is disabled, or if
2709 2711 * - link is not up.
2710 2712 */
2711 2713 if (hw->fc.disable_fc_autoneg)
2712 2714 goto out;
2713 2715
2714 2716 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2715 2717 if (!link_up)
2716 2718 goto out;
2717 2719
2718 2720 switch (hw->phy.media_type) {
2719 2721 /* Autoneg flow control on fiber adapters */
2720 2722 case ixgbe_media_type_fiber:
2721 2723 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2722 2724 ret_val = ixgbe_fc_autoneg_fiber(hw);
2723 2725 break;
2724 2726
2725 2727 /* Autoneg flow control on backplane adapters */
2726 2728 case ixgbe_media_type_backplane:
2727 2729 ret_val = ixgbe_fc_autoneg_backplane(hw);
2728 2730 break;
2729 2731
2730 2732 /* Autoneg flow control on copper adapters */
2731 2733 case ixgbe_media_type_copper:
2732 2734 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2733 2735 ret_val = ixgbe_fc_autoneg_copper(hw);
2734 2736 break;
2735 2737
2736 2738 default:
2737 2739 break;
2738 2740 }
2739 2741
2740 2742 out:
2741 2743 if (ret_val == IXGBE_SUCCESS) {
2742 2744 hw->fc.fc_was_autonegged = TRUE;
2743 2745 } else {
2744 2746 hw->fc.fc_was_autonegged = FALSE;
2745 2747 hw->fc.current_mode = hw->fc.requested_mode;
2746 2748 }
2747 2749 }
2748 2750
2749 2751 /**
2750 2752 * ixgbe_disable_pcie_master - Disable PCI-express master access
2751 2753 * @hw: pointer to hardware structure
2752 2754 *
2753 2755 * Disables PCI-Express master access and verifies there are no pending
2754 2756 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2755 2757 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2756 2758 * is returned signifying master requests disabled.
2757 2759 **/
2758 2760 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2759 2761 {
2760 2762 s32 status = IXGBE_SUCCESS;
2761 2763 u32 i;
2762 2764
2763 2765 DEBUGFUNC("ixgbe_disable_pcie_master");
2764 2766
2765 2767 /* Always set this bit to ensure any future transactions are blocked */
2766 2768 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2767 2769
2768 2770 /* Exit if master requets are blocked */
2769 2771 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2770 2772 goto out;
2771 2773
2772 2774 /* Poll for master request bit to clear */
2773 2775 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2774 2776 usec_delay(100);
2775 2777 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2776 2778 goto out;
2777 2779 }
2778 2780
2779 2781 /*
2780 2782 * Two consecutive resets are required via CTRL.RST per datasheet
2781 2783 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2782 2784 * of this need. The first reset prevents new master requests from
2783 2785 * being issued by our device. We then must wait 1usec or more for any
2784 2786 * remaining completions from the PCIe bus to trickle in, and then reset
2785 2787 * again to clear out any effects they may have had on our device.
2786 2788 */
2787 2789 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2788 2790 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2789 2791
2790 2792 /*
2791 2793 * Before proceeding, make sure that the PCIe block does not have
2792 2794 * transactions pending.
2793 2795 */
2794 2796 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2795 2797 usec_delay(100);
2796 2798 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2797 2799 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2798 2800 goto out;
2799 2801 }
2800 2802
2801 2803 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2802 2804 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2803 2805
2804 2806 out:
2805 2807 return status;
2806 2808 }
2807 2809
2808 2810 /**
2809 2811 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2810 2812 * @hw: pointer to hardware structure
2811 2813 * @mask: Mask to specify which semaphore to acquire
2812 2814 *
2813 2815 * Acquires the SWFW semaphore through the GSSR register for the specified
2814 2816 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2815 2817 **/
2816 2818 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2817 2819 {
2818 2820 u32 gssr;
2819 2821 u32 swmask = mask;
2820 2822 u32 fwmask = mask << 5;
2821 2823 s32 timeout = 200;
2822 2824
2823 2825 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2824 2826
2825 2827 while (timeout) {
2826 2828 /*
2827 2829 * SW EEPROM semaphore bit is used for access to all
2828 2830 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2829 2831 */
2830 2832 if (ixgbe_get_eeprom_semaphore(hw))
2831 2833 return IXGBE_ERR_SWFW_SYNC;
2832 2834
2833 2835 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2834 2836 if (!(gssr & (fwmask | swmask)))
2835 2837 break;
2836 2838
2837 2839 /*
2838 2840 * Firmware currently using resource (fwmask) or other software
2839 2841 * thread currently using resource (swmask)
2840 2842 */
2841 2843 ixgbe_release_eeprom_semaphore(hw);
2842 2844 msec_delay(5);
2843 2845 timeout--;
2844 2846 }
2845 2847
2846 2848 if (!timeout) {
2847 2849 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2848 2850 return IXGBE_ERR_SWFW_SYNC;
2849 2851 }
2850 2852
2851 2853 gssr |= swmask;
2852 2854 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2853 2855
2854 2856 ixgbe_release_eeprom_semaphore(hw);
2855 2857 return IXGBE_SUCCESS;
2856 2858 }
2857 2859
2858 2860 /**
2859 2861 * ixgbe_release_swfw_sync - Release SWFW semaphore
2860 2862 * @hw: pointer to hardware structure
2861 2863 * @mask: Mask to specify which semaphore to release
2862 2864 *
|
↓ open down ↓ |
499 lines elided |
↑ open up ↑ |
2863 2865 * Releases the SWFW semaphore through the GSSR register for the specified
2864 2866 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2865 2867 **/
2866 2868 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2867 2869 {
2868 2870 u32 gssr;
2869 2871 u32 swmask = mask;
2870 2872
2871 2873 DEBUGFUNC("ixgbe_release_swfw_sync");
2872 2874
2873 - ixgbe_get_eeprom_semaphore(hw);
2875 + (void) ixgbe_get_eeprom_semaphore(hw);
2874 2876
2875 2877 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2876 2878 gssr &= ~swmask;
2877 2879 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2878 2880
2879 2881 ixgbe_release_eeprom_semaphore(hw);
2880 2882 }
2881 2883
2882 2884 /**
2883 2885 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2884 2886 * @hw: pointer to hardware structure
2885 2887 *
2886 2888 * Stops the receive data path and waits for the HW to internally empty
2887 2889 * the Rx security block
2888 2890 **/
2889 2891 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2890 2892 {
2891 2893 #define IXGBE_MAX_SECRX_POLL 40
2892 2894
2893 2895 int i;
2894 2896 int secrxreg;
2895 2897
2896 2898 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2897 2899
2898 2900
2899 2901 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2900 2902 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2901 2903 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2902 2904 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2903 2905 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2904 2906 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2905 2907 break;
2906 2908 else
2907 2909 /* Use interrupt-safe sleep just in case */
2908 2910 usec_delay(1000);
2909 2911 }
2910 2912
2911 2913 /* For informational purposes only */
2912 2914 if (i >= IXGBE_MAX_SECRX_POLL)
2913 2915 DEBUGOUT("Rx unit being enabled before security "
2914 2916 "path fully disabled. Continuing with init.\n");
2915 2917
2916 2918 return IXGBE_SUCCESS;
2917 2919 }
2918 2920
2919 2921 /**
2920 2922 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2921 2923 * @hw: pointer to hardware structure
2922 2924 *
2923 2925 * Enables the receive data path.
2924 2926 **/
2925 2927 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2926 2928 {
2927 2929 int secrxreg;
2928 2930
2929 2931 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2930 2932
2931 2933 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2932 2934 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2933 2935 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2934 2936 IXGBE_WRITE_FLUSH(hw);
2935 2937
2936 2938 return IXGBE_SUCCESS;
2937 2939 }
2938 2940
2939 2941 /**
2940 2942 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2941 2943 * @hw: pointer to hardware structure
2942 2944 * @regval: register value to write to RXCTRL
2943 2945 *
2944 2946 * Enables the Rx DMA unit
2945 2947 **/
2946 2948 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2947 2949 {
2948 2950 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2949 2951
2950 2952 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2951 2953
2952 2954 return IXGBE_SUCCESS;
2953 2955 }
2954 2956
2955 2957 /**
2956 2958 * ixgbe_blink_led_start_generic - Blink LED based on index.
2957 2959 * @hw: pointer to hardware structure
2958 2960 * @index: led number to blink
2959 2961 **/
2960 2962 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2961 2963 {
2962 2964 ixgbe_link_speed speed = 0;
2963 2965 bool link_up = 0;
2964 2966 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2965 2967 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2966 2968
2967 2969 DEBUGFUNC("ixgbe_blink_led_start_generic");
2968 2970
2969 2971 /*
2970 2972 * Link must be up to auto-blink the LEDs;
2971 2973 * Force it if link is down.
2972 2974 */
2973 2975 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2974 2976
2975 2977 if (!link_up) {
2976 2978 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2977 2979 autoc_reg |= IXGBE_AUTOC_FLU;
2978 2980 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2979 2981 IXGBE_WRITE_FLUSH(hw);
2980 2982 msec_delay(10);
2981 2983 }
2982 2984
2983 2985 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2984 2986 led_reg |= IXGBE_LED_BLINK(index);
2985 2987 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2986 2988 IXGBE_WRITE_FLUSH(hw);
2987 2989
2988 2990 return IXGBE_SUCCESS;
2989 2991 }
2990 2992
2991 2993 /**
2992 2994 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2993 2995 * @hw: pointer to hardware structure
2994 2996 * @index: led number to stop blinking
2995 2997 **/
2996 2998 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2997 2999 {
2998 3000 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2999 3001 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3000 3002
3001 3003 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3002 3004
3003 3005
3004 3006 autoc_reg &= ~IXGBE_AUTOC_FLU;
3005 3007 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3006 3008 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3007 3009
3008 3010 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3009 3011 led_reg &= ~IXGBE_LED_BLINK(index);
3010 3012 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3011 3013 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3012 3014 IXGBE_WRITE_FLUSH(hw);
3013 3015
3014 3016 return IXGBE_SUCCESS;
3015 3017 }
3016 3018
3017 3019 /**
3018 3020 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3019 3021 * @hw: pointer to hardware structure
3020 3022 * @san_mac_offset: SAN MAC address offset
3021 3023 *
3022 3024 * This function will read the EEPROM location for the SAN MAC address
3023 3025 * pointer, and returns the value at that location. This is used in both
3024 3026 * get and set mac_addr routines.
3025 3027 **/
3026 3028 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3027 3029 u16 *san_mac_offset)
3028 3030 {
3029 3031 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3030 3032
3031 3033 /*
3032 3034 * First read the EEPROM pointer to see if the MAC addresses are
3033 3035 * available.
3034 3036 */
3035 3037 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3036 3038
3037 3039 return IXGBE_SUCCESS;
3038 3040 }
3039 3041
3040 3042 /**
3041 3043 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3042 3044 * @hw: pointer to hardware structure
3043 3045 * @san_mac_addr: SAN MAC address
3044 3046 *
3045 3047 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3046 3048 * per-port, so set_lan_id() must be called before reading the addresses.
3047 3049 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3048 3050 * upon for non-SFP connections, so we must call it here.
3049 3051 **/
3050 3052 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
|
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
3051 3053 {
3052 3054 u16 san_mac_data, san_mac_offset;
3053 3055 u8 i;
3054 3056
3055 3057 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3056 3058
3057 3059 /*
3058 3060 * First read the EEPROM pointer to see if the MAC addresses are
3059 3061 * available. If they're not, no point in calling set_lan_id() here.
3060 3062 */
3061 - ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3063 + (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3062 3064
3063 3065 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3064 3066 /*
3065 3067 * No addresses available in this EEPROM. It's not an
3066 3068 * error though, so just wipe the local address and return.
3067 3069 */
3068 3070 for (i = 0; i < 6; i++)
3069 3071 san_mac_addr[i] = 0xFF;
3070 3072
3071 3073 goto san_mac_addr_out;
3072 3074 }
3073 3075
3074 3076 /* make sure we know which port we need to program */
3075 3077 hw->mac.ops.set_lan_id(hw);
3076 3078 /* apply the port offset to the address offset */
3077 3079 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3078 3080 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3079 3081 for (i = 0; i < 3; i++) {
3080 3082 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3081 3083 san_mac_addr[i * 2] = (u8)(san_mac_data);
3082 3084 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3083 3085 san_mac_offset++;
3084 3086 }
3085 3087
3086 3088 san_mac_addr_out:
3087 3089 return IXGBE_SUCCESS;
3088 3090 }
3089 3091
3090 3092 /**
3091 3093 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3092 3094 * @hw: pointer to hardware structure
3093 3095 * @san_mac_addr: SAN MAC address
3094 3096 *
3095 3097 * Write a SAN MAC address to the EEPROM.
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
3096 3098 **/
3097 3099 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3098 3100 {
3099 3101 s32 status = IXGBE_SUCCESS;
3100 3102 u16 san_mac_data, san_mac_offset;
3101 3103 u8 i;
3102 3104
3103 3105 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3104 3106
3105 3107 /* Look for SAN mac address pointer. If not defined, return */
3106 - ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3108 + (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3107 3109
3108 3110 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3109 3111 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3110 3112 goto san_mac_addr_out;
3111 3113 }
3112 3114
3113 3115 /* Make sure we know which port we need to write */
3114 3116 hw->mac.ops.set_lan_id(hw);
3115 3117 /* Apply the port offset to the address offset */
3116 3118 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3117 3119 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3118 3120
3119 3121 for (i = 0; i < 3; i++) {
3120 3122 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3121 3123 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3122 3124 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3123 3125 san_mac_offset++;
3124 3126 }
3125 3127
3126 3128 san_mac_addr_out:
3127 3129 return status;
3128 3130 }
3129 3131
3130 3132 /**
3131 3133 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3132 3134 * @hw: pointer to hardware structure
3133 3135 *
3134 3136 * Read PCIe configuration space, and get the MSI-X vector count from
3135 3137 * the capabilities table.
3136 3138 **/
3137 3139 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3138 3140 {
3139 3141 u16 msix_count = 1;
3140 3142 u16 max_msix_count;
3141 3143 u16 pcie_offset;
3142 3144
3143 3145 switch (hw->mac.type) {
3144 3146 case ixgbe_mac_82598EB:
3145 3147 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3146 3148 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3147 3149 break;
3148 3150 case ixgbe_mac_82599EB:
3149 3151 case ixgbe_mac_X540:
3150 3152 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3151 3153 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3152 3154 break;
3153 3155 default:
3154 3156 return msix_count;
3155 3157 }
3156 3158
3157 3159 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3158 3160 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3159 3161 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3160 3162
3161 3163 /* MSI-X count is zero-based in HW */
3162 3164 msix_count++;
3163 3165
3164 3166 if (msix_count > max_msix_count)
3165 3167 msix_count = max_msix_count;
3166 3168
3167 3169 return msix_count;
3168 3170 }
3169 3171
3170 3172 /**
3171 3173 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3172 3174 * @hw: pointer to hardware structure
3173 3175 * @addr: Address to put into receive address register
3174 3176 * @vmdq: VMDq pool to assign
3175 3177 *
3176 3178 * Puts an ethernet address into a receive address register, or
3177 3179 * finds the rar that it is aleady in; adds to the pool list
3178 3180 **/
3179 3181 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3180 3182 {
3181 3183 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3182 3184 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3183 3185 u32 rar;
3184 3186 u32 rar_low, rar_high;
3185 3187 u32 addr_low, addr_high;
3186 3188
3187 3189 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3188 3190
3189 3191 /* swap bytes for HW little endian */
3190 3192 addr_low = addr[0] | (addr[1] << 8)
3191 3193 | (addr[2] << 16)
3192 3194 | (addr[3] << 24);
3193 3195 addr_high = addr[4] | (addr[5] << 8);
3194 3196
3195 3197 /*
3196 3198 * Either find the mac_id in rar or find the first empty space.
3197 3199 * rar_highwater points to just after the highest currently used
3198 3200 * rar in order to shorten the search. It grows when we add a new
3199 3201 * rar to the top.
3200 3202 */
3201 3203 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3202 3204 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3203 3205
3204 3206 if (((IXGBE_RAH_AV & rar_high) == 0)
3205 3207 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
|
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
3206 3208 first_empty_rar = rar;
3207 3209 } else if ((rar_high & 0xFFFF) == addr_high) {
3208 3210 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3209 3211 if (rar_low == addr_low)
3210 3212 break; /* found it already in the rars */
3211 3213 }
3212 3214 }
3213 3215
3214 3216 if (rar < hw->mac.rar_highwater) {
3215 3217 /* already there so just add to the pool bits */
3216 - ixgbe_set_vmdq(hw, rar, vmdq);
3218 + (void) ixgbe_set_vmdq(hw, rar, vmdq);
3217 3219 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3218 3220 /* stick it into first empty RAR slot we found */
3219 3221 rar = first_empty_rar;
3220 - ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3222 + (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3221 3223 } else if (rar == hw->mac.rar_highwater) {
3222 3224 /* add it to the top of the list and inc the highwater mark */
3223 - ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3225 + (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3224 3226 hw->mac.rar_highwater++;
3225 3227 } else if (rar >= hw->mac.num_rar_entries) {
3226 3228 return IXGBE_ERR_INVALID_MAC_ADDR;
3227 3229 }
3228 3230
3229 3231 /*
3230 3232 * If we found rar[0], make sure the default pool bit (we use pool 0)
3231 3233 * remains cleared to be sure default pool packets will get delivered
3232 3234 */
3233 3235 if (rar == 0)
3234 - ixgbe_clear_vmdq(hw, rar, 0);
3236 + (void) ixgbe_clear_vmdq(hw, rar, 0);
3235 3237
3236 3238 return rar;
3237 3239 }
3238 3240
3239 3241 /**
3240 3242 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3241 3243 * @hw: pointer to hardware struct
3242 3244 * @rar: receive address register index to disassociate
3243 3245 * @vmdq: VMDq pool index to remove from the rar
3244 3246 **/
3245 3247 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3246 3248 {
3247 3249 u32 mpsar_lo, mpsar_hi;
3248 3250 u32 rar_entries = hw->mac.num_rar_entries;
3249 3251
3250 3252 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3251 3253
3252 3254 /* Make sure we are using a valid rar index range */
3253 3255 if (rar >= rar_entries) {
3254 3256 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3255 3257 return IXGBE_ERR_INVALID_ARGUMENT;
3256 3258 }
3257 3259
3258 3260 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3259 3261 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3260 3262
3261 3263 if (!mpsar_lo && !mpsar_hi)
3262 3264 goto done;
3263 3265
3264 3266 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3265 3267 if (mpsar_lo) {
3266 3268 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3267 3269 mpsar_lo = 0;
3268 3270 }
3269 3271 if (mpsar_hi) {
3270 3272 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3271 3273 mpsar_hi = 0;
3272 3274 }
3273 3275 } else if (vmdq < 32) {
3274 3276 mpsar_lo &= ~(1 << vmdq);
3275 3277 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3276 3278 } else {
3277 3279 mpsar_hi &= ~(1 << (vmdq - 32));
3278 3280 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3279 3281 }
3280 3282
3281 3283 /* was that the last pool using this rar? */
3282 3284 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3283 3285 hw->mac.ops.clear_rar(hw, rar);
3284 3286 done:
3285 3287 return IXGBE_SUCCESS;
3286 3288 }
3287 3289
3288 3290 /**
3289 3291 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3290 3292 * @hw: pointer to hardware struct
3291 3293 * @rar: receive address register index to associate with a VMDq index
3292 3294 * @vmdq: VMDq pool index
3293 3295 **/
3294 3296 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3295 3297 {
3296 3298 u32 mpsar;
3297 3299 u32 rar_entries = hw->mac.num_rar_entries;
3298 3300
3299 3301 DEBUGFUNC("ixgbe_set_vmdq_generic");
3300 3302
3301 3303 /* Make sure we are using a valid rar index range */
3302 3304 if (rar >= rar_entries) {
3303 3305 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3304 3306 return IXGBE_ERR_INVALID_ARGUMENT;
3305 3307 }
3306 3308
3307 3309 if (vmdq < 32) {
3308 3310 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3309 3311 mpsar |= 1 << vmdq;
3310 3312 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3311 3313 } else {
3312 3314 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3313 3315 mpsar |= 1 << (vmdq - 32);
3314 3316 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3315 3317 }
3316 3318 return IXGBE_SUCCESS;
3317 3319 }
3318 3320
3319 3321 /**
3320 3322 * This function should only be involved in the IOV mode.
3321 3323 * In IOV mode, Default pool is next pool after the number of
3322 3324 * VFs advertized and not 0.
3323 3325 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3324 3326 *
3325 3327 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3326 3328 * @hw: pointer to hardware struct
3327 3329 * @vmdq: VMDq pool index
3328 3330 **/
3329 3331 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3330 3332 {
3331 3333 u32 rar = hw->mac.san_mac_rar_index;
3332 3334
3333 3335 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3334 3336
3335 3337 if (vmdq < 32) {
3336 3338 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3337 3339 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3338 3340 } else {
3339 3341 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3340 3342 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3341 3343 }
3342 3344
3343 3345 return IXGBE_SUCCESS;
3344 3346 }
3345 3347
3346 3348 /**
3347 3349 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3348 3350 * @hw: pointer to hardware structure
3349 3351 **/
3350 3352 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3351 3353 {
3352 3354 int i;
3353 3355
3354 3356 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3355 3357 DEBUGOUT(" Clearing UTA\n");
3356 3358
3357 3359 for (i = 0; i < 128; i++)
3358 3360 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3359 3361
3360 3362 return IXGBE_SUCCESS;
3361 3363 }
3362 3364
3363 3365 /**
3364 3366 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3365 3367 * @hw: pointer to hardware structure
3366 3368 * @vlan: VLAN id to write to VLAN filter
3367 3369 *
3368 3370 * return the VLVF index where this VLAN id should be placed
3369 3371 *
3370 3372 **/
3371 3373 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3372 3374 {
3373 3375 u32 bits = 0;
3374 3376 u32 first_empty_slot = 0;
3375 3377 s32 regindex;
3376 3378
3377 3379 /* short cut the special case */
3378 3380 if (vlan == 0)
3379 3381 return 0;
3380 3382
3381 3383 /*
3382 3384 * Search for the vlan id in the VLVF entries. Save off the first empty
3383 3385 * slot found along the way
3384 3386 */
3385 3387 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3386 3388 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3387 3389 if (!bits && !(first_empty_slot))
3388 3390 first_empty_slot = regindex;
3389 3391 else if ((bits & 0x0FFF) == vlan)
3390 3392 break;
3391 3393 }
3392 3394
3393 3395 /*
3394 3396 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3395 3397 * in the VLVF. Else use the first empty VLVF register for this
3396 3398 * vlan id.
3397 3399 */
3398 3400 if (regindex >= IXGBE_VLVF_ENTRIES) {
3399 3401 if (first_empty_slot)
3400 3402 regindex = first_empty_slot;
3401 3403 else {
3402 3404 DEBUGOUT("No space in VLVF.\n");
3403 3405 regindex = IXGBE_ERR_NO_SPACE;
3404 3406 }
3405 3407 }
3406 3408
3407 3409 return regindex;
3408 3410 }
3409 3411
3410 3412 /**
3411 3413 * ixgbe_set_vfta_generic - Set VLAN filter table
3412 3414 * @hw: pointer to hardware structure
3413 3415 * @vlan: VLAN id to write to VLAN filter
3414 3416 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3415 3417 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3416 3418 *
3417 3419 * Turn on/off specified VLAN in the VLAN filter table.
3418 3420 **/
3419 3421 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3420 3422 bool vlan_on)
3421 3423 {
3422 3424 s32 regindex;
3423 3425 u32 bitindex;
3424 3426 u32 vfta;
3425 3427 u32 targetbit;
3426 3428 s32 ret_val = IXGBE_SUCCESS;
3427 3429 bool vfta_changed = FALSE;
3428 3430
3429 3431 DEBUGFUNC("ixgbe_set_vfta_generic");
3430 3432
3431 3433 if (vlan > 4095)
3432 3434 return IXGBE_ERR_PARAM;
3433 3435
3434 3436 /*
3435 3437 * this is a 2 part operation - first the VFTA, then the
3436 3438 * VLVF and VLVFB if VT Mode is set
3437 3439 * We don't write the VFTA until we know the VLVF part succeeded.
3438 3440 */
3439 3441
3440 3442 /* Part 1
3441 3443 * The VFTA is a bitstring made up of 128 32-bit registers
3442 3444 * that enable the particular VLAN id, much like the MTA:
3443 3445 * bits[11-5]: which register
3444 3446 * bits[4-0]: which bit in the register
3445 3447 */
3446 3448 regindex = (vlan >> 5) & 0x7F;
3447 3449 bitindex = vlan & 0x1F;
3448 3450 targetbit = (1 << bitindex);
3449 3451 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3450 3452
3451 3453 if (vlan_on) {
3452 3454 if (!(vfta & targetbit)) {
3453 3455 vfta |= targetbit;
3454 3456 vfta_changed = TRUE;
3455 3457 }
3456 3458 } else {
3457 3459 if ((vfta & targetbit)) {
3458 3460 vfta &= ~targetbit;
3459 3461 vfta_changed = TRUE;
3460 3462 }
3461 3463 }
3462 3464
3463 3465 /* Part 2
3464 3466 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3465 3467 */
3466 3468 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3467 3469 &vfta_changed);
3468 3470 if (ret_val != IXGBE_SUCCESS)
3469 3471 return ret_val;
3470 3472
3471 3473 if (vfta_changed)
3472 3474 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3473 3475
3474 3476 return IXGBE_SUCCESS;
3475 3477 }
3476 3478
3477 3479 /**
3478 3480 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3479 3481 * @hw: pointer to hardware structure
3480 3482 * @vlan: VLAN id to write to VLAN filter
3481 3483 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3482 3484 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3483 3485 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3484 3486 * should be changed
3485 3487 *
3486 3488 * Turn on/off specified bit in VLVF table.
3487 3489 **/
3488 3490 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3489 3491 bool vlan_on, bool *vfta_changed)
3490 3492 {
3491 3493 u32 vt;
3492 3494
3493 3495 DEBUGFUNC("ixgbe_set_vlvf_generic");
3494 3496
3495 3497 if (vlan > 4095)
3496 3498 return IXGBE_ERR_PARAM;
3497 3499
3498 3500 /* If VT Mode is set
3499 3501 * Either vlan_on
3500 3502 * make sure the vlan is in VLVF
3501 3503 * set the vind bit in the matching VLVFB
3502 3504 * Or !vlan_on
3503 3505 * clear the pool bit and possibly the vind
3504 3506 */
3505 3507 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3506 3508 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3507 3509 s32 vlvf_index;
3508 3510 u32 bits;
3509 3511
3510 3512 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3511 3513 if (vlvf_index < 0)
3512 3514 return vlvf_index;
3513 3515
3514 3516 if (vlan_on) {
3515 3517 /* set the pool bit */
3516 3518 if (vind < 32) {
3517 3519 bits = IXGBE_READ_REG(hw,
3518 3520 IXGBE_VLVFB(vlvf_index * 2));
3519 3521 bits |= (1 << vind);
3520 3522 IXGBE_WRITE_REG(hw,
3521 3523 IXGBE_VLVFB(vlvf_index * 2),
3522 3524 bits);
3523 3525 } else {
3524 3526 bits = IXGBE_READ_REG(hw,
3525 3527 IXGBE_VLVFB((vlvf_index * 2) + 1));
3526 3528 bits |= (1 << (vind - 32));
3527 3529 IXGBE_WRITE_REG(hw,
3528 3530 IXGBE_VLVFB((vlvf_index * 2) + 1),
3529 3531 bits);
3530 3532 }
3531 3533 } else {
3532 3534 /* clear the pool bit */
3533 3535 if (vind < 32) {
3534 3536 bits = IXGBE_READ_REG(hw,
3535 3537 IXGBE_VLVFB(vlvf_index * 2));
3536 3538 bits &= ~(1 << vind);
3537 3539 IXGBE_WRITE_REG(hw,
3538 3540 IXGBE_VLVFB(vlvf_index * 2),
3539 3541 bits);
3540 3542 bits |= IXGBE_READ_REG(hw,
3541 3543 IXGBE_VLVFB((vlvf_index * 2) + 1));
3542 3544 } else {
3543 3545 bits = IXGBE_READ_REG(hw,
3544 3546 IXGBE_VLVFB((vlvf_index * 2) + 1));
3545 3547 bits &= ~(1 << (vind - 32));
3546 3548 IXGBE_WRITE_REG(hw,
3547 3549 IXGBE_VLVFB((vlvf_index * 2) + 1),
3548 3550 bits);
3549 3551 bits |= IXGBE_READ_REG(hw,
3550 3552 IXGBE_VLVFB(vlvf_index * 2));
3551 3553 }
3552 3554 }
3553 3555
3554 3556 /*
3555 3557 * If there are still bits set in the VLVFB registers
3556 3558 * for the VLAN ID indicated we need to see if the
3557 3559 * caller is requesting that we clear the VFTA entry bit.
3558 3560 * If the caller has requested that we clear the VFTA
3559 3561 * entry bit but there are still pools/VFs using this VLAN
3560 3562 * ID entry then ignore the request. We're not worried
3561 3563 * about the case where we're turning the VFTA VLAN ID
3562 3564 * entry bit on, only when requested to turn it off as
3563 3565 * there may be multiple pools and/or VFs using the
3564 3566 * VLAN ID entry. In that case we cannot clear the
3565 3567 * VFTA bit until all pools/VFs using that VLAN ID have also
3566 3568 * been cleared. This will be indicated by "bits" being
3567 3569 * zero.
3568 3570 */
3569 3571 if (bits) {
3570 3572 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3571 3573 (IXGBE_VLVF_VIEN | vlan));
3572 3574 if ((!vlan_on) && (vfta_changed != NULL)) {
3573 3575 /* someone wants to clear the vfta entry
3574 3576 * but some pools/VFs are still using it.
3575 3577 * Ignore it. */
3576 3578 *vfta_changed = FALSE;
3577 3579 }
3578 3580 } else
3579 3581 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3580 3582 }
3581 3583
3582 3584 return IXGBE_SUCCESS;
3583 3585 }
3584 3586
3585 3587 /**
3586 3588 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3587 3589 * @hw: pointer to hardware structure
3588 3590 *
3589 3591 * Clears the VLAN filer table, and the VMDq index associated with the filter
3590 3592 **/
3591 3593 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3592 3594 {
3593 3595 u32 offset;
3594 3596
3595 3597 DEBUGFUNC("ixgbe_clear_vfta_generic");
3596 3598
3597 3599 for (offset = 0; offset < hw->mac.vft_size; offset++)
3598 3600 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3599 3601
3600 3602 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3601 3603 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3602 3604 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3603 3605 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3604 3606 }
3605 3607
3606 3608 return IXGBE_SUCCESS;
3607 3609 }
3608 3610
3609 3611 /**
3610 3612 * ixgbe_check_mac_link_generic - Determine link and speed status
3611 3613 * @hw: pointer to hardware structure
3612 3614 * @speed: pointer to link speed
3613 3615 * @link_up: TRUE when link is up
3614 3616 * @link_up_wait_to_complete: bool used to wait for link up or not
3615 3617 *
3616 3618 * Reads the links register to determine if link is up and the current speed
3617 3619 **/
3618 3620 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3619 3621 bool *link_up, bool link_up_wait_to_complete)
3620 3622 {
3621 3623 u32 links_reg, links_orig;
3622 3624 u32 i;
3623 3625
3624 3626 DEBUGFUNC("ixgbe_check_mac_link_generic");
3625 3627
3626 3628 /* clear the old state */
3627 3629 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3628 3630
3629 3631 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3630 3632
3631 3633 if (links_orig != links_reg) {
3632 3634 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3633 3635 links_orig, links_reg);
3634 3636 }
3635 3637
3636 3638 if (link_up_wait_to_complete) {
3637 3639 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3638 3640 if (links_reg & IXGBE_LINKS_UP) {
3639 3641 *link_up = TRUE;
3640 3642 break;
3641 3643 } else {
3642 3644 *link_up = FALSE;
3643 3645 }
3644 3646 msec_delay(100);
3645 3647 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3646 3648 }
3647 3649 } else {
3648 3650 if (links_reg & IXGBE_LINKS_UP)
3649 3651 *link_up = TRUE;
3650 3652 else
3651 3653 *link_up = FALSE;
3652 3654 }
3653 3655
3654 3656 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3655 3657 IXGBE_LINKS_SPEED_10G_82599)
3656 3658 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3657 3659 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3658 3660 IXGBE_LINKS_SPEED_1G_82599)
3659 3661 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3660 3662 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3661 3663 IXGBE_LINKS_SPEED_100_82599)
3662 3664 *speed = IXGBE_LINK_SPEED_100_FULL;
3663 3665 else
3664 3666 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3665 3667
3666 3668 return IXGBE_SUCCESS;
3667 3669 }
3668 3670
3669 3671 /**
3670 3672 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3671 3673 * the EEPROM
3672 3674 * @hw: pointer to hardware structure
3673 3675 * @wwnn_prefix: the alternative WWNN prefix
3674 3676 * @wwpn_prefix: the alternative WWPN prefix
3675 3677 *
3676 3678 * This function will read the EEPROM from the alternative SAN MAC address
3677 3679 * block to check the support for the alternative WWNN/WWPN prefix support.
3678 3680 **/
3679 3681 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3680 3682 u16 *wwpn_prefix)
3681 3683 {
3682 3684 u16 offset, caps;
3683 3685 u16 alt_san_mac_blk_offset;
3684 3686
3685 3687 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3686 3688
3687 3689 /* clear output first */
3688 3690 *wwnn_prefix = 0xFFFF;
3689 3691 *wwpn_prefix = 0xFFFF;
3690 3692
3691 3693 /* check if alternative SAN MAC is supported */
3692 3694 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3693 3695 &alt_san_mac_blk_offset);
3694 3696
3695 3697 if ((alt_san_mac_blk_offset == 0) ||
3696 3698 (alt_san_mac_blk_offset == 0xFFFF))
3697 3699 goto wwn_prefix_out;
3698 3700
3699 3701 /* check capability in alternative san mac address block */
3700 3702 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3701 3703 hw->eeprom.ops.read(hw, offset, &caps);
3702 3704 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3703 3705 goto wwn_prefix_out;
3704 3706
3705 3707 /* get the corresponding prefix for WWNN/WWPN */
3706 3708 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3707 3709 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3708 3710
3709 3711 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3710 3712 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3711 3713
3712 3714 wwn_prefix_out:
3713 3715 return IXGBE_SUCCESS;
3714 3716 }
3715 3717
3716 3718 /**
3717 3719 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3718 3720 * @hw: pointer to hardware structure
3719 3721 * @bs: the fcoe boot status
3720 3722 *
3721 3723 * This function will read the FCOE boot status from the iSCSI FCOE block
3722 3724 **/
3723 3725 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3724 3726 {
3725 3727 u16 offset, caps, flags;
3726 3728 s32 status;
3727 3729
3728 3730 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3729 3731
3730 3732 /* clear output first */
3731 3733 *bs = ixgbe_fcoe_bootstatus_unavailable;
3732 3734
3733 3735 /* check if FCOE IBA block is present */
3734 3736 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3735 3737 status = hw->eeprom.ops.read(hw, offset, &caps);
3736 3738 if (status != IXGBE_SUCCESS)
3737 3739 goto out;
3738 3740
3739 3741 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3740 3742 goto out;
3741 3743
3742 3744 /* check if iSCSI FCOE block is populated */
3743 3745 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3744 3746 if (status != IXGBE_SUCCESS)
3745 3747 goto out;
3746 3748
3747 3749 if ((offset == 0) || (offset == 0xFFFF))
3748 3750 goto out;
3749 3751
3750 3752 /* read fcoe flags in iSCSI FCOE block */
3751 3753 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3752 3754 status = hw->eeprom.ops.read(hw, offset, &flags);
3753 3755 if (status != IXGBE_SUCCESS)
3754 3756 goto out;
3755 3757
3756 3758 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3757 3759 *bs = ixgbe_fcoe_bootstatus_enabled;
3758 3760 else
3759 3761 *bs = ixgbe_fcoe_bootstatus_disabled;
3760 3762
3761 3763 out:
3762 3764 return status;
3763 3765 }
3764 3766
3765 3767 /**
3766 3768 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3767 3769 * @hw: pointer to hardware structure
3768 3770 * @enable: enable or disable switch for anti-spoofing
3769 3771 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3770 3772 *
3771 3773 **/
3772 3774 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3773 3775 {
3774 3776 int j;
3775 3777 int pf_target_reg = pf >> 3;
3776 3778 int pf_target_shift = pf % 8;
3777 3779 u32 pfvfspoof = 0;
3778 3780
3779 3781 if (hw->mac.type == ixgbe_mac_82598EB)
3780 3782 return;
3781 3783
3782 3784 if (enable)
3783 3785 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3784 3786
3785 3787 /*
3786 3788 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3787 3789 * MAC anti-spoof enables in each register array element.
3788 3790 */
3789 3791 for (j = 0; j < pf_target_reg; j++)
3790 3792 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3791 3793
3792 3794 /*
3793 3795 * The PF should be allowed to spoof so that it can support
3794 3796 * emulation mode NICs. Do not set the bits assigned to the PF
3795 3797 */
3796 3798 pfvfspoof &= (1 << pf_target_shift) - 1;
3797 3799 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3798 3800
3799 3801 /*
3800 3802 * Remaining pools belong to the PF so they do not need to have
3801 3803 * anti-spoofing enabled.
3802 3804 */
3803 3805 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3804 3806 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3805 3807 }
3806 3808
3807 3809 /**
3808 3810 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3809 3811 * @hw: pointer to hardware structure
3810 3812 * @enable: enable or disable switch for VLAN anti-spoofing
3811 3813 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3812 3814 *
3813 3815 **/
3814 3816 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3815 3817 {
3816 3818 int vf_target_reg = vf >> 3;
3817 3819 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3818 3820 u32 pfvfspoof;
3819 3821
3820 3822 if (hw->mac.type == ixgbe_mac_82598EB)
3821 3823 return;
3822 3824
3823 3825 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3824 3826 if (enable)
3825 3827 pfvfspoof |= (1 << vf_target_shift);
3826 3828 else
3827 3829 pfvfspoof &= ~(1 << vf_target_shift);
3828 3830 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3829 3831 }
3830 3832
3831 3833 /**
3832 3834 * ixgbe_get_device_caps_generic - Get additional device capabilities
3833 3835 * @hw: pointer to hardware structure
3834 3836 * @device_caps: the EEPROM word with the extra device capabilities
3835 3837 *
3836 3838 * This function will read the EEPROM location for the device capabilities,
3837 3839 * and return the word through device_caps.
3838 3840 **/
3839 3841 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3840 3842 {
3841 3843 DEBUGFUNC("ixgbe_get_device_caps_generic");
3842 3844
3843 3845 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3844 3846
3845 3847 return IXGBE_SUCCESS;
3846 3848 }
3847 3849
3848 3850 /**
3849 3851 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3850 3852 * @hw: pointer to hardware structure
3851 3853 *
3852 3854 **/
3853 3855 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3854 3856 {
3855 3857 u32 regval;
3856 3858 u32 i;
3857 3859
3858 3860 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3859 3861
3860 3862 /* Enable relaxed ordering */
3861 3863 for (i = 0; i < hw->mac.max_tx_queues; i++) {
3862 3864 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3863 3865 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3864 3866 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3865 3867 }
3866 3868
3867 3869 for (i = 0; i < hw->mac.max_rx_queues; i++) {
3868 3870 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3869 3871 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3870 3872 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3871 3873 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3872 3874 }
3873 3875
3874 3876 }
3875 3877
3876 3878 /**
3877 3879 * ixgbe_calculate_checksum - Calculate checksum for buffer
3878 3880 * @buffer: pointer to EEPROM
3879 3881 * @length: size of EEPROM to calculate a checksum for
3880 3882 * Calculates the checksum for some buffer on a specified length. The
3881 3883 * checksum calculated is returned.
3882 3884 **/
3883 3885 static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3884 3886 {
3885 3887 u32 i;
3886 3888 u8 sum = 0;
3887 3889
3888 3890 DEBUGFUNC("ixgbe_calculate_checksum");
3889 3891
3890 3892 if (!buffer)
3891 3893 return 0;
3892 3894
3893 3895 for (i = 0; i < length; i++)
3894 3896 sum += buffer[i];
3895 3897
3896 3898 return (u8) (0 - sum);
3897 3899 }
3898 3900
3899 3901 /**
3900 3902 * ixgbe_host_interface_command - Issue command to manageability block
3901 3903 * @hw: pointer to the HW structure
3902 3904 * @buffer: contains the command to write and where the return status will
3903 3905 * be placed
3904 3906 * @length: length of buffer, must be multiple of 4 bytes
3905 3907 *
3906 3908 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3907 3909 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3908 3910 **/
3909 3911 static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3910 3912 u32 length)
3911 3913 {
3912 3914 u32 hicr, i, bi;
3913 3915 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3914 3916 u8 buf_len, dword_len;
3915 3917
3916 3918 s32 ret_val = IXGBE_SUCCESS;
3917 3919
3918 3920 DEBUGFUNC("ixgbe_host_interface_command");
3919 3921
3920 3922 if (length == 0 || length & 0x3 ||
3921 3923 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3922 3924 DEBUGOUT("Buffer length failure.\n");
3923 3925 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3924 3926 goto out;
3925 3927 }
3926 3928
3927 3929 /* Check that the host interface is enabled. */
3928 3930 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3929 3931 if ((hicr & IXGBE_HICR_EN) == 0) {
3930 3932 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3931 3933 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3932 3934 goto out;
3933 3935 }
3934 3936
3935 3937 /* Calculate length in DWORDs */
3936 3938 dword_len = length >> 2;
3937 3939
3938 3940 /*
3939 3941 * The device driver writes the relevant command block
3940 3942 * into the ram area.
3941 3943 */
3942 3944 for (i = 0; i < dword_len; i++)
3943 3945 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3944 3946 i, IXGBE_CPU_TO_LE32(buffer[i]));
3945 3947
3946 3948 /* Setting this bit tells the ARC that a new command is pending. */
3947 3949 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3948 3950
3949 3951 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3950 3952 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3951 3953 if (!(hicr & IXGBE_HICR_C))
3952 3954 break;
3953 3955 msec_delay(1);
3954 3956 }
3955 3957
3956 3958 /* Check command successful completion. */
3957 3959 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3958 3960 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3959 3961 DEBUGOUT("Command has failed with no status valid.\n");
3960 3962 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3961 3963 goto out;
3962 3964 }
3963 3965
3964 3966 /* Calculate length in DWORDs */
3965 3967 dword_len = hdr_size >> 2;
3966 3968
3967 3969 /* first pull in the header so we know the buffer length */
3968 3970 for (bi = 0; bi < dword_len; bi++) {
3969 3971 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3970 3972 IXGBE_LE32_TO_CPUS(&buffer[bi]);
3971 3973 }
3972 3974
3973 3975 /* If there is any thing in data position pull it in */
3974 3976 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3975 3977 if (buf_len == 0)
3976 3978 goto out;
3977 3979
3978 3980 if (length < (buf_len + hdr_size)) {
3979 3981 DEBUGOUT("Buffer not large enough for reply message.\n");
3980 3982 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3981 3983 goto out;
3982 3984 }
3983 3985
3984 3986 /* Calculate length in DWORDs, add 3 for odd lengths */
3985 3987 dword_len = (buf_len + 3) >> 2;
3986 3988
3987 3989 /* Pull in the rest of the buffer (bi is where we left off)*/
3988 3990 for (; bi <= dword_len; bi++) {
3989 3991 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3990 3992 IXGBE_LE32_TO_CPUS(&buffer[bi]);
3991 3993 }
3992 3994
3993 3995 out:
3994 3996 return ret_val;
3995 3997 }
3996 3998
3997 3999 /**
3998 4000 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3999 4001 * @hw: pointer to the HW structure
4000 4002 * @maj: driver version major number
4001 4003 * @min: driver version minor number
4002 4004 * @build: driver version build number
4003 4005 * @sub: driver version sub build number
4004 4006 *
4005 4007 * Sends driver version number to firmware through the manageability
4006 4008 * block. On success return IXGBE_SUCCESS
4007 4009 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4008 4010 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4009 4011 **/
4010 4012 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4011 4013 u8 build, u8 sub)
4012 4014 {
4013 4015 struct ixgbe_hic_drv_info fw_cmd;
4014 4016 int i;
4015 4017 s32 ret_val = IXGBE_SUCCESS;
4016 4018
4017 4019 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4018 4020
4019 4021 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4020 4022 != IXGBE_SUCCESS) {
4021 4023 ret_val = IXGBE_ERR_SWFW_SYNC;
4022 4024 goto out;
4023 4025 }
4024 4026
4025 4027 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4026 4028 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4027 4029 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4028 4030 fw_cmd.port_num = (u8)hw->bus.func;
4029 4031 fw_cmd.ver_maj = maj;
|
↓ open down ↓ |
785 lines elided |
↑ open up ↑ |
4030 4032 fw_cmd.ver_min = min;
4031 4033 fw_cmd.ver_build = build;
4032 4034 fw_cmd.ver_sub = sub;
4033 4035 fw_cmd.hdr.checksum = 0;
4034 4036 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4035 4037 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4036 4038 fw_cmd.pad = 0;
4037 4039 fw_cmd.pad2 = 0;
4038 4040
4039 4041 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4042 + /* LINTED */
4040 4043 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4041 4044 sizeof(fw_cmd));
4042 4045 if (ret_val != IXGBE_SUCCESS)
4043 4046 continue;
4044 4047
4045 4048 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4046 4049 FW_CEM_RESP_STATUS_SUCCESS)
4047 4050 ret_val = IXGBE_SUCCESS;
4048 4051 else
4049 4052 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4050 4053
4051 4054 break;
4052 4055 }
4053 4056
4054 4057 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4055 4058 out:
4056 4059 return ret_val;
4057 4060 }
4058 4061
4059 4062 /**
4060 4063 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4061 4064 * @hw: pointer to hardware structure
4062 4065 * @num_pb: number of packet buffers to allocate
4063 4066 * @headroom: reserve n KB of headroom
4064 4067 * @strategy: packet buffer allocation strategy
4065 4068 **/
4066 4069 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4067 4070 int strategy)
4068 4071 {
4069 4072 u32 pbsize = hw->mac.rx_pb_size;
4070 4073 int i = 0;
4071 4074 u32 rxpktsize, txpktsize, txpbthresh;
4072 4075
4073 4076 /* Reserve headroom */
4074 4077 pbsize -= headroom;
4075 4078
4076 4079 if (!num_pb)
4077 4080 num_pb = 1;
4078 4081
4079 4082 /* Divide remaining packet buffer space amongst the number of packet
4080 4083 * buffers requested using supplied strategy.
4081 4084 */
4082 4085 switch (strategy) {
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4083 4086 case PBA_STRATEGY_WEIGHTED:
4084 4087 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4085 4088 * buffer with 5/8 of the packet buffer space.
4086 4089 */
4087 4090 rxpktsize = (pbsize * 5) / (num_pb * 4);
4088 4091 pbsize -= rxpktsize * (num_pb / 2);
4089 4092 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4090 4093 for (; i < (num_pb / 2); i++)
4091 4094 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4092 4095 /* Fall through to configure remaining packet buffers */
4096 + /* FALLTHRU */
4093 4097 case PBA_STRATEGY_EQUAL:
4094 4098 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4095 4099 for (; i < num_pb; i++)
4096 4100 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4097 4101 break;
4098 4102 default:
4099 4103 break;
4100 4104 }
4101 4105
4102 4106 /* Only support an equally distributed Tx packet buffer strategy. */
4103 4107 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4104 4108 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4105 4109 for (i = 0; i < num_pb; i++) {
4106 4110 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4107 4111 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4108 4112 }
4109 4113
4110 4114 /* Clear unused TCs, if any, to zero buffer size*/
4111 4115 for (; i < IXGBE_MAX_PB; i++) {
4112 4116 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4113 4117 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4114 4118 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4115 4119 }
4116 4120 }
4117 4121
4118 4122 /**
4119 4123 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4120 4124 * @hw: pointer to the hardware structure
4121 4125 *
4122 4126 * The 82599 and x540 MACs can experience issues if TX work is still pending
4123 4127 * when a reset occurs. This function prevents this by flushing the PCIe
4124 4128 * buffers on the system.
4125 4129 **/
4126 4130 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4127 4131 {
4128 4132 u32 gcr_ext, hlreg0;
4129 4133
4130 4134 /*
4131 4135 * If double reset is not requested then all transactions should
4132 4136 * already be clear and as such there is no work to do
4133 4137 */
4134 4138 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4135 4139 return;
4136 4140
4137 4141 /*
4138 4142 * Set loopback enable to prevent any transmits from being sent
4139 4143 * should the link come up. This assumes that the RXCTRL.RXEN bit
4140 4144 * has already been cleared.
4141 4145 */
4142 4146 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4143 4147 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4144 4148
4145 4149 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4146 4150 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4147 4151 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4148 4152 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4149 4153
4150 4154 /* Flush all writes and allow 20usec for all transactions to clear */
4151 4155 IXGBE_WRITE_FLUSH(hw);
4152 4156 usec_delay(20);
4153 4157
4154 4158 /* restore previous register values */
4155 4159 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4156 4160 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4157 4161 }
4158 4162
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX