Print this page
XXXX Intel X540 support
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2010, Intel Corporation
3 + Copyright (c) 2001-2012, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 -/*$FreeBSD$*/
33 +/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_type.h"
36 +#include "ixgbe_82598.h"
36 37 #include "ixgbe_api.h"
37 38 #include "ixgbe_common.h"
38 39 #include "ixgbe_phy.h"
39 40
40 -u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
41 -s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
42 41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43 - ixgbe_link_speed *speed,
44 - bool *autoneg);
42 + ixgbe_link_speed *speed,
43 + bool *autoneg);
45 44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 -s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
47 45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
48 - bool autoneg_wait_to_complete);
46 + bool autoneg_wait_to_complete);
49 47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
50 - ixgbe_link_speed *speed, bool *link_up,
51 - bool link_up_wait_to_complete);
48 + ixgbe_link_speed *speed, bool *link_up,
49 + bool link_up_wait_to_complete);
52 50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
53 - ixgbe_link_speed speed,
54 - bool autoneg,
55 - bool autoneg_wait_to_complete);
51 + ixgbe_link_speed speed,
52 + bool autoneg,
53 + bool autoneg_wait_to_complete);
56 54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
57 - ixgbe_link_speed speed,
58 - bool autoneg,
59 - bool autoneg_wait_to_complete);
55 + ixgbe_link_speed speed,
56 + bool autoneg,
57 + bool autoneg_wait_to_complete);
60 58 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
61 -s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
62 -void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
63 -s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
64 59 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 -s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
66 - u32 vind, bool vlan_on);
67 60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
68 -s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
69 -s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
70 -s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
71 - u8 *eeprom_data);
72 -u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
73 -s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
74 -void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
75 -void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
61 +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 + u32 headroom, int strategy);
76 63
77 64 /**
78 65 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
79 66 * @hw: pointer to the HW structure
80 67 *
81 68 * The defaults for 82598 should be in the range of 50us to 50ms,
82 69 * however the hardware default for these parts is 500us to 1ms which is less
83 70 * than the 10ms recommended by the pci-e spec. To address this we need to
84 71 * increase the value to either 10ms to 250ms for capability version 1 config,
85 72 * or 16ms to 55ms for version 2.
86 73 **/
87 74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
88 75 {
89 76 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
90 77 u16 pcie_devctl2;
91 78
92 79 /* only take action if timeout value is defaulted to 0 */
93 80 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
94 81 goto out;
95 82
96 83 /*
97 84 * if capababilities version is type 1 we can write the
98 85 * timeout of 10ms to 250ms through the GCR register
99 86 */
100 87 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
101 88 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
102 89 goto out;
103 90 }
104 91
105 92 /*
106 93 * for version 2 capabilities we need to write the config space
107 94 * directly in order to set the completion timeout value for
108 95 * 16ms to 55ms
109 96 */
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
110 97 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
111 98 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
112 99 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
113 100 out:
114 101 /* disable completion timeout resend */
115 102 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
116 103 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
117 104 }
118 105
119 106 /**
120 - * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
121 - * @hw: pointer to hardware structure
122 - *
123 - * Read PCIe configuration space, and get the MSI-X vector count from
124 - * the capabilities table.
125 - **/
126 -u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
127 -{
128 - u32 msix_count = 18;
129 -
130 - DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
131 -
132 - if (hw->mac.msix_vectors_from_pcie) {
133 - msix_count = IXGBE_READ_PCIE_WORD(hw,
134 - IXGBE_PCIE_MSIX_82598_CAPS);
135 - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
136 -
137 - /* MSI-X count is zero-based in HW, so increment to give
138 - * proper value */
139 - msix_count++;
140 - }
141 - return msix_count;
142 -}
143 -
144 -/**
145 107 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
146 108 * @hw: pointer to hardware structure
147 109 *
148 110 * Initialize the function pointers and assign the MAC type for 82598.
149 111 * Does not touch the hardware.
150 112 **/
151 113 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
152 114 {
153 115 struct ixgbe_mac_info *mac = &hw->mac;
154 116 struct ixgbe_phy_info *phy = &hw->phy;
155 117 s32 ret_val;
156 118
157 119 DEBUGFUNC("ixgbe_init_ops_82598");
158 120
159 121 ret_val = ixgbe_init_phy_ops_generic(hw);
160 122 ret_val = ixgbe_init_ops_generic(hw);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
161 123
162 124 /* PHY */
163 125 phy->ops.init = &ixgbe_init_phy_ops_82598;
164 126
165 127 /* MAC */
166 128 mac->ops.start_hw = &ixgbe_start_hw_82598;
167 129 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
168 130 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
169 131 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
170 132 mac->ops.get_supported_physical_layer =
171 - &ixgbe_get_supported_physical_layer_82598;
133 + &ixgbe_get_supported_physical_layer_82598;
172 134 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
173 135 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
174 136 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
175 137
176 138 /* RAR, Multicast, VLAN */
177 139 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
178 140 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
179 141 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142 + mac->ops.set_vlvf = NULL;
180 143 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
181 144
182 145 /* Flow Control */
183 146 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
184 147
185 - mac->mcft_size = 128;
186 - mac->vft_size = 128;
187 - mac->num_rar_entries = 16;
188 - mac->rx_pb_size = 512;
189 - mac->max_tx_queues = 32;
190 - mac->max_rx_queues = 64;
191 - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
148 + mac->mcft_size = 128;
149 + mac->vft_size = 128;
150 + mac->num_rar_entries = 16;
151 + mac->rx_pb_size = 512;
152 + mac->max_tx_queues = 32;
153 + mac->max_rx_queues = 64;
154 + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
192 155
193 156 /* SFP+ Module */
194 157 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
195 158
196 159 /* Link */
197 160 mac->ops.check_link = &ixgbe_check_mac_link_82598;
198 161 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
199 162 mac->ops.flap_tx_laser = NULL;
200 - mac->ops.get_link_capabilities =
201 - &ixgbe_get_link_capabilities_82598;
163 + mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164 + mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
202 165
166 + /* Manageability interface */
167 + mac->ops.set_fw_drv_ver = NULL;
168 +
203 169 return ret_val;
204 170 }
205 171
206 172 /**
207 173 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
208 174 * @hw: pointer to hardware structure
209 175 *
210 176 * Initialize any function pointers that were not able to be
211 177 * set during init_shared_code because the PHY/SFP type was
212 178 * not known. Perform the SFP init if necessary.
213 179 *
214 180 **/
215 181 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
216 182 {
217 183 struct ixgbe_mac_info *mac = &hw->mac;
218 184 struct ixgbe_phy_info *phy = &hw->phy;
219 185 s32 ret_val = IXGBE_SUCCESS;
220 186 u16 list_offset, data_offset;
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
221 187
222 188 DEBUGFUNC("ixgbe_init_phy_ops_82598");
223 189
224 190 /* Identify the PHY */
225 191 phy->ops.identify(hw);
226 192
227 193 /* Overwrite the link function pointers if copper PHY */
228 194 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
229 195 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
230 196 mac->ops.get_link_capabilities =
231 - &ixgbe_get_copper_link_capabilities_generic;
197 + &ixgbe_get_copper_link_capabilities_generic;
232 198 }
233 199
234 200 switch (hw->phy.type) {
235 201 case ixgbe_phy_tn:
236 202 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
237 203 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
238 204 phy->ops.get_firmware_version =
239 - &ixgbe_get_phy_firmware_version_tnx;
205 + &ixgbe_get_phy_firmware_version_tnx;
240 206 break;
241 - case ixgbe_phy_aq:
242 - phy->ops.get_firmware_version =
243 - &ixgbe_get_phy_firmware_version_generic;
244 - break;
245 207 case ixgbe_phy_nl:
246 208 phy->ops.reset = &ixgbe_reset_phy_nl;
247 209
248 210 /* Call SFP+ identify routine to get the SFP+ module type */
249 211 ret_val = phy->ops.identify_sfp(hw);
250 212 if (ret_val != IXGBE_SUCCESS)
251 213 goto out;
252 214 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
253 215 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
254 216 goto out;
255 217 }
256 218
257 219 /* Check to see if SFP+ module is supported */
258 220 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
259 - &list_offset,
260 - &data_offset);
221 + &list_offset,
222 + &data_offset);
261 223 if (ret_val != IXGBE_SUCCESS) {
262 224 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
263 225 goto out;
264 226 }
265 227 break;
266 228 default:
267 229 break;
268 230 }
269 231
270 232 out:
271 233 return ret_val;
272 234 }
273 235
274 236 /**
275 237 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
276 238 * @hw: pointer to hardware structure
277 239 *
278 240 * Starts the hardware using the generic start_hw function.
279 241 * Disables relaxed ordering Then set pcie completion timeout
280 242 *
281 243 **/
282 244 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
283 245 {
284 246 u32 regval;
285 247 u32 i;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
286 248 s32 ret_val = IXGBE_SUCCESS;
287 249
288 250 DEBUGFUNC("ixgbe_start_hw_82598");
289 251
290 252 ret_val = ixgbe_start_hw_generic(hw);
291 253
292 254 /* Disable relaxed ordering */
293 255 for (i = 0; ((i < hw->mac.max_tx_queues) &&
294 256 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
295 257 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
296 - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
258 + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
297 259 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
298 260 }
299 261
300 262 for (i = 0; ((i < hw->mac.max_rx_queues) &&
301 263 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
302 264 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
303 - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
304 - IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
265 + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266 + IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
305 267 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
306 268 }
307 269
308 270 /* set the completion timeout for interface */
309 271 if (ret_val == IXGBE_SUCCESS)
310 272 ixgbe_set_pcie_completion_timeout(hw);
311 273
312 274 return ret_val;
313 275 }
314 276
315 277 /**
316 278 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
317 279 * @hw: pointer to hardware structure
318 280 * @speed: pointer to link speed
319 281 * @autoneg: boolean auto-negotiation value
320 282 *
321 283 * Determines the link capabilities by reading the AUTOC register.
322 284 **/
323 285 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
324 - ixgbe_link_speed *speed,
325 - bool *autoneg)
286 + ixgbe_link_speed *speed,
287 + bool *autoneg)
326 288 {
327 289 s32 status = IXGBE_SUCCESS;
328 290 u32 autoc = 0;
329 291
330 292 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
331 293
332 294 /*
333 295 * Determine link capabilities based on the stored value of AUTOC,
334 296 * which represents EEPROM defaults. If AUTOC value has not been
335 297 * stored, use the current register value.
336 298 */
337 299 if (hw->mac.orig_link_settings_stored)
338 300 autoc = hw->mac.orig_autoc;
339 301 else
340 302 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
341 303
342 304 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
343 305 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
344 306 *speed = IXGBE_LINK_SPEED_1GB_FULL;
345 307 *autoneg = FALSE;
346 308 break;
347 309
348 310 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
349 311 *speed = IXGBE_LINK_SPEED_10GB_FULL;
350 312 *autoneg = FALSE;
351 313 break;
352 314
353 315 case IXGBE_AUTOC_LMS_1G_AN:
354 316 *speed = IXGBE_LINK_SPEED_1GB_FULL;
355 317 *autoneg = TRUE;
356 318 break;
357 319
358 320 case IXGBE_AUTOC_LMS_KX4_AN:
359 321 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
360 322 *speed = IXGBE_LINK_SPEED_UNKNOWN;
361 323 if (autoc & IXGBE_AUTOC_KX4_SUPP)
362 324 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
363 325 if (autoc & IXGBE_AUTOC_KX_SUPP)
364 326 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
365 327 *autoneg = TRUE;
366 328 break;
367 329
368 330 default:
369 331 status = IXGBE_ERR_LINK_SETUP;
370 332 break;
371 333 }
372 334
373 335 return status;
374 336 }
375 337
376 338 /**
377 339 * ixgbe_get_media_type_82598 - Determines media type
378 340 * @hw: pointer to hardware structure
379 341 *
380 342 * Returns the media type (fiber, copper, backplane)
381 343 **/
|
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
382 344 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
383 345 {
384 346 enum ixgbe_media_type media_type;
385 347
386 348 DEBUGFUNC("ixgbe_get_media_type_82598");
387 349
388 350 /* Detect if there is a copper PHY attached. */
389 351 switch (hw->phy.type) {
390 352 case ixgbe_phy_cu_unknown:
391 353 case ixgbe_phy_tn:
392 - case ixgbe_phy_aq:
393 354 media_type = ixgbe_media_type_copper;
394 355 goto out;
395 356 default:
396 357 break;
397 358 }
398 359
399 360 /* Media type for I82598 is based on device ID */
400 361 switch (hw->device_id) {
401 362 case IXGBE_DEV_ID_82598:
402 363 case IXGBE_DEV_ID_82598_BX:
403 364 /* Default device ID is mezzanine card KX/KX4 */
404 365 media_type = ixgbe_media_type_backplane;
405 366 break;
406 367 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
407 368 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
408 369 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
409 370 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
410 371 case IXGBE_DEV_ID_82598EB_XF_LR:
411 372 case IXGBE_DEV_ID_82598EB_SFP_LOM:
412 373 media_type = ixgbe_media_type_fiber;
413 374 break;
414 375 case IXGBE_DEV_ID_82598EB_CX4:
415 376 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
416 377 media_type = ixgbe_media_type_cx4;
417 378 break;
418 379 case IXGBE_DEV_ID_82598AT:
419 380 case IXGBE_DEV_ID_82598AT2:
420 381 media_type = ixgbe_media_type_copper;
421 382 break;
422 383 default:
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
423 384 media_type = ixgbe_media_type_unknown;
424 385 break;
425 386 }
426 387 out:
427 388 return media_type;
428 389 }
429 390
430 391 /**
431 392 * ixgbe_fc_enable_82598 - Enable flow control
432 393 * @hw: pointer to hardware structure
433 - * @packetbuf_num: packet buffer number (0-7)
434 394 *
435 395 * Enable flow control according to the current settings.
436 396 **/
437 -s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
397 +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
438 398 {
439 399 s32 ret_val = IXGBE_SUCCESS;
440 400 u32 fctrl_reg;
441 401 u32 rmcs_reg;
442 402 u32 reg;
443 - u32 rx_pba_size;
403 + u32 fcrtl, fcrth;
444 404 u32 link_speed = 0;
405 + int i;
445 406 bool link_up;
446 407
447 408 DEBUGFUNC("ixgbe_fc_enable_82598");
448 409
410 + /* Validate the water mark configuration */
411 + if (!hw->fc.pause_time) {
412 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413 + goto out;
414 + }
415 +
416 + /* Low water mark of zero causes XOFF floods */
417 + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418 + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419 + hw->fc.high_water[i]) {
420 + if (!hw->fc.low_water[i] ||
421 + hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422 + DEBUGOUT("Invalid water mark configuration\n");
423 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 + goto out;
425 + }
426 + }
427 + }
428 +
449 429 /*
450 430 * On 82598 having Rx FC on causes resets while doing 1G
451 431 * so if it's on turn it off once we know link_speed. For
452 432 * more details see 82598 Specification update.
453 433 */
454 434 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
455 435 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
456 436 switch (hw->fc.requested_mode) {
457 437 case ixgbe_fc_full:
458 438 hw->fc.requested_mode = ixgbe_fc_tx_pause;
459 439 break;
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
460 440 case ixgbe_fc_rx_pause:
461 441 hw->fc.requested_mode = ixgbe_fc_none;
462 442 break;
463 443 default:
464 444 /* no change */
465 445 break;
466 446 }
467 447 }
468 448
469 449 /* Negotiate the fc mode to use */
470 - ret_val = ixgbe_fc_autoneg(hw);
471 - if (ret_val == IXGBE_ERR_FLOW_CONTROL)
472 - goto out;
450 + ixgbe_fc_autoneg(hw);
473 451
474 452 /* Disable any previous flow control settings */
475 453 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
476 454 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
477 455
478 456 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
479 457 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
480 458
481 459 /*
482 460 * The possible values of fc.current_mode are:
483 461 * 0: Flow control is completely disabled
484 462 * 1: Rx flow control is enabled (we can receive pause frames,
485 463 * but not send pause frames).
486 464 * 2: Tx flow control is enabled (we can send pause frames but
487 465 * we do not support receiving pause frames).
488 466 * 3: Both Rx and Tx flow control (symmetric) are enabled.
489 467 * other: Invalid.
490 468 */
491 469 switch (hw->fc.current_mode) {
492 470 case ixgbe_fc_none:
493 471 /*
494 472 * Flow control is disabled by software override or autoneg.
495 473 * The code below will actually disable it in the HW.
496 474 */
497 475 break;
498 476 case ixgbe_fc_rx_pause:
499 477 /*
500 478 * Rx Flow control is enabled and Tx Flow control is
501 479 * disabled by software override. Since there really
502 480 * isn't a way to advertise that we are capable of RX
503 481 * Pause ONLY, we will advertise that we support both
504 482 * symmetric and asymmetric Rx PAUSE. Later, we will
505 483 * disable the adapter's ability to send PAUSE frames.
506 484 */
507 485 fctrl_reg |= IXGBE_FCTRL_RFCE;
508 486 break;
509 487 case ixgbe_fc_tx_pause:
510 488 /*
511 489 * Tx Flow control is enabled, and Rx Flow control is
512 490 * disabled by software override.
513 491 */
514 492 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
515 493 break;
516 494 case ixgbe_fc_full:
517 495 /* Flow control (both Rx and Tx) is enabled by SW override. */
518 496 fctrl_reg |= IXGBE_FCTRL_RFCE;
519 497 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
520 498 break;
521 499 default:
522 500 DEBUGOUT("Flow control param set incorrectly\n");
|
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
523 501 ret_val = IXGBE_ERR_CONFIG;
524 502 goto out;
525 503 }
526 504
527 505 /* Set 802.3x based flow control settings. */
528 506 fctrl_reg |= IXGBE_FCTRL_DPF;
529 507 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
530 508 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
531 509
532 510 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
533 - if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
534 - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
535 - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
511 + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
512 + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
513 + hw->fc.high_water[i]) {
514 + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
515 + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
516 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
517 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
518 + } else {
519 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
520 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
521 + }
536 522
537 - reg = (rx_pba_size - hw->fc.low_water) << 6;
538 - if (hw->fc.send_xon)
539 - reg |= IXGBE_FCRTL_XONE;
540 -
541 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
542 -
543 - reg = (rx_pba_size - hw->fc.high_water) << 6;
544 - reg |= IXGBE_FCRTH_FCEN;
545 -
546 - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
547 523 }
548 524
549 525 /* Configure pause time (2 TCs per register) */
550 - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
551 - if ((packetbuf_num & 1) == 0)
552 - reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
553 - else
554 - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
555 - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
526 + reg = hw->fc.pause_time * 0x00010001;
527 + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
528 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
556 529
557 - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
530 + /* Configure flow control refresh threshold value */
531 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
558 532
559 533 out:
560 534 return ret_val;
561 535 }
562 536
563 537 /**
564 538 * ixgbe_start_mac_link_82598 - Configures MAC link settings
565 539 * @hw: pointer to hardware structure
566 540 *
567 541 * Configures link settings based on values in the ixgbe_hw struct.
568 542 * Restarts the link. Performs autonegotiation if needed.
569 543 **/
570 544 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
571 - bool autoneg_wait_to_complete)
545 + bool autoneg_wait_to_complete)
572 546 {
573 547 u32 autoc_reg;
574 548 u32 links_reg;
575 549 u32 i;
576 550 s32 status = IXGBE_SUCCESS;
577 551
578 552 DEBUGFUNC("ixgbe_start_mac_link_82598");
579 553
580 554 /* Restart link */
581 555 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
582 556 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
583 557 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
584 558
585 559 /* Only poll for autoneg to complete if specified to do so */
586 560 if (autoneg_wait_to_complete) {
587 561 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
588 562 IXGBE_AUTOC_LMS_KX4_AN ||
589 563 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
590 564 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
591 565 links_reg = 0; /* Just in case Autoneg time = 0 */
592 566 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
593 567 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
594 568 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
595 569 break;
596 570 msec_delay(100);
597 571 }
598 572 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
599 573 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
600 574 DEBUGOUT("Autonegotiation did not complete.\n");
601 575 }
602 576 }
603 577 }
604 578
605 579 /* Add delay to filter out noises during initial link setup */
606 580 msec_delay(50);
607 581
608 582 return status;
609 583 }
610 584
611 585 /**
612 586 * ixgbe_validate_link_ready - Function looks for phy link
613 587 * @hw: pointer to hardware structure
614 588 *
615 589 * Function indicates success when phy link is available. If phy is not ready
616 590 * within 5 seconds of MAC indicating link, the function returns error.
617 591 **/
618 592 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
619 593 {
620 594 u32 timeout;
621 595 u16 an_reg;
622 596
623 597 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
624 598 return IXGBE_SUCCESS;
625 599
626 600 for (timeout = 0;
627 601 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
628 602 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
629 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
603 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
630 604
631 605 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
632 606 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
633 607 break;
634 608
635 609 msec_delay(100);
636 610 }
637 611
638 612 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
639 613 DEBUGOUT("Link was indicated but link is down\n");
640 614 return IXGBE_ERR_LINK_SETUP;
641 615 }
642 616
643 617 return IXGBE_SUCCESS;
644 618 }
645 619
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
646 620 /**
647 621 * ixgbe_check_mac_link_82598 - Get link/speed status
648 622 * @hw: pointer to hardware structure
649 623 * @speed: pointer to link speed
650 624 * @link_up: TRUE is link is up, FALSE otherwise
651 625 * @link_up_wait_to_complete: bool used to wait for link up or not
652 626 *
653 627 * Reads the links register to determine if link is up and the current speed
654 628 **/
655 629 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
656 - ixgbe_link_speed *speed, bool *link_up,
657 - bool link_up_wait_to_complete)
630 + ixgbe_link_speed *speed, bool *link_up,
631 + bool link_up_wait_to_complete)
658 632 {
659 633 u32 links_reg;
660 634 u32 i;
661 635 u16 link_reg, adapt_comp_reg;
662 636
663 637 DEBUGFUNC("ixgbe_check_mac_link_82598");
664 638
665 639 /*
666 640 * SERDES PHY requires us to read link status from undocumented
667 641 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
668 642 * indicates link down. OxC00C is read to check that the XAUI lanes
669 643 * are active. Bit 0 clear indicates active; set indicates inactive.
670 644 */
671 645 if (hw->phy.type == ixgbe_phy_nl) {
672 646 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
673 647 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
674 648 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
675 - &adapt_comp_reg);
649 + &adapt_comp_reg);
676 650 if (link_up_wait_to_complete) {
677 651 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
678 652 if ((link_reg & 1) &&
679 653 ((adapt_comp_reg & 1) == 0)) {
680 654 *link_up = TRUE;
681 655 break;
682 656 } else {
683 657 *link_up = FALSE;
684 658 }
685 659 msec_delay(100);
686 660 hw->phy.ops.read_reg(hw, 0xC79F,
687 - IXGBE_TWINAX_DEV,
688 - &link_reg);
661 + IXGBE_TWINAX_DEV,
662 + &link_reg);
689 663 hw->phy.ops.read_reg(hw, 0xC00C,
690 - IXGBE_TWINAX_DEV,
691 - &adapt_comp_reg);
664 + IXGBE_TWINAX_DEV,
665 + &adapt_comp_reg);
692 666 }
693 667 } else {
694 668 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
695 669 *link_up = TRUE;
696 670 else
697 671 *link_up = FALSE;
698 672 }
699 673
700 674 if (*link_up == FALSE)
701 675 goto out;
702 676 }
703 677
704 678 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
705 679 if (link_up_wait_to_complete) {
706 680 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
707 681 if (links_reg & IXGBE_LINKS_UP) {
708 682 *link_up = TRUE;
709 683 break;
710 684 } else {
711 685 *link_up = FALSE;
712 686 }
713 687 msec_delay(100);
714 688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
715 689 }
716 690 } else {
717 691 if (links_reg & IXGBE_LINKS_UP)
718 692 *link_up = TRUE;
719 693 else
720 694 *link_up = FALSE;
721 695 }
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
722 696
723 697 if (links_reg & IXGBE_LINKS_SPEED)
724 698 *speed = IXGBE_LINK_SPEED_10GB_FULL;
725 699 else
726 700 *speed = IXGBE_LINK_SPEED_1GB_FULL;
727 701
728 702 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
729 703 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
730 704 *link_up = FALSE;
731 705
732 - /* if link is down, zero out the current_mode */
733 - if (*link_up == FALSE) {
734 - hw->fc.current_mode = ixgbe_fc_none;
735 - hw->fc.fc_was_autonegged = FALSE;
736 - }
737 706 out:
738 707 return IXGBE_SUCCESS;
739 708 }
740 709
741 710 /**
742 711 * ixgbe_setup_mac_link_82598 - Set MAC link speed
743 712 * @hw: pointer to hardware structure
744 713 * @speed: new link speed
745 714 * @autoneg: TRUE if autonegotiation enabled
746 715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
747 716 *
748 717 * Set the link speed in the AUTOC register and restarts link.
749 718 **/
750 719 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
751 - ixgbe_link_speed speed, bool autoneg,
752 - bool autoneg_wait_to_complete)
720 + ixgbe_link_speed speed, bool autoneg,
721 + bool autoneg_wait_to_complete)
753 722 {
754 - s32 status = IXGBE_SUCCESS;
723 + s32 status = IXGBE_SUCCESS;
755 724 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
756 - u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
757 - u32 autoc = curr_autoc;
758 - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
725 + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 + u32 autoc = curr_autoc;
727 + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
759 728
760 729 DEBUGFUNC("ixgbe_setup_mac_link_82598");
761 730
762 731 /* Check to see if speed passed in is supported. */
763 - (void) ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
732 + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
764 733 speed &= link_capabilities;
765 734
766 735 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
767 736 status = IXGBE_ERR_LINK_SETUP;
768 737
769 738 /* Set KX4/KX support according to speed requested */
770 739 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
771 - link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
740 + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
772 741 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
773 742 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
774 743 autoc |= IXGBE_AUTOC_KX4_SUPP;
775 744 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
776 745 autoc |= IXGBE_AUTOC_KX_SUPP;
777 746 if (autoc != curr_autoc)
778 747 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
779 748 }
780 749
781 750 if (status == IXGBE_SUCCESS) {
782 751 /*
783 752 * Setup and restart the link based on the new values in
784 753 * ixgbe_hw This will write the AUTOC register based on the new
785 754 * stored values
786 755 */
787 756 status = ixgbe_start_mac_link_82598(hw,
788 - autoneg_wait_to_complete);
757 + autoneg_wait_to_complete);
789 758 }
790 759
791 760 return status;
792 761 }
793 762
794 763
795 764 /**
796 765 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
797 766 * @hw: pointer to hardware structure
798 767 * @speed: new link speed
799 768 * @autoneg: TRUE if autonegotiation enabled
800 769 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
801 770 *
802 771 * Sets the link speed in the AUTOC register in the MAC and restarts link.
803 772 **/
804 773 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
805 - ixgbe_link_speed speed,
806 - bool autoneg,
807 - bool autoneg_wait_to_complete)
774 + ixgbe_link_speed speed,
775 + bool autoneg,
776 + bool autoneg_wait_to_complete)
808 777 {
809 778 s32 status;
810 779
811 780 DEBUGFUNC("ixgbe_setup_copper_link_82598");
812 781
813 782 /* Setup the PHY according to input speed */
814 783 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
815 - autoneg_wait_to_complete);
784 + autoneg_wait_to_complete);
816 785 /* Set up MAC */
817 - (void) ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
786 + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
818 787
819 788 return status;
820 789 }
821 790
822 791 /**
823 792 * ixgbe_reset_hw_82598 - Performs hardware reset
824 793 * @hw: pointer to hardware structure
825 794 *
826 795 * Resets the hardware by resetting the transmit and receive units, masks and
827 796 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
828 797 * reset.
829 798 **/
830 799 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
831 800 {
832 801 s32 status = IXGBE_SUCCESS;
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
833 802 s32 phy_status = IXGBE_SUCCESS;
834 803 u32 ctrl;
835 804 u32 gheccr;
836 805 u32 i;
837 806 u32 autoc;
838 807 u8 analog_val;
839 808
840 809 DEBUGFUNC("ixgbe_reset_hw_82598");
841 810
842 811 /* Call adapter stop to disable tx/rx and clear interrupts */
843 - hw->mac.ops.stop_adapter(hw);
812 + status = hw->mac.ops.stop_adapter(hw);
813 + if (status != IXGBE_SUCCESS)
814 + goto reset_hw_out;
844 815
845 816 /*
846 817 * Power up the Atlas Tx lanes if they are currently powered down.
847 818 * Atlas Tx lanes are powered down for MAC loopback tests, but
848 819 * they are not automatically restored on reset.
849 820 */
850 821 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
851 822 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
852 823 /* Enable Tx Atlas so packets can be transmitted again */
853 824 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
854 - &analog_val);
825 + &analog_val);
855 826 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
856 827 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
857 - analog_val);
828 + analog_val);
858 829
859 830 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
860 - &analog_val);
831 + &analog_val);
861 832 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
862 833 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
863 - analog_val);
834 + analog_val);
864 835
865 836 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
866 - &analog_val);
837 + &analog_val);
867 838 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
868 839 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
869 - analog_val);
840 + analog_val);
870 841
871 842 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
872 - &analog_val);
843 + &analog_val);
873 844 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
874 845 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
875 - analog_val);
846 + analog_val);
876 847 }
877 848
878 849 /* Reset PHY */
879 850 if (hw->phy.reset_disable == FALSE) {
880 851 /* PHY ops must be identified and initialized prior to reset */
881 852
882 853 /* Init PHY and function pointers, perform SFP setup */
883 854 phy_status = hw->phy.ops.init(hw);
884 855 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
885 856 goto reset_hw_out;
886 - else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
887 - goto no_phy_reset;
857 + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
858 + goto mac_reset_top;
888 859
889 860 hw->phy.ops.reset(hw);
890 861 }
891 862
892 -no_phy_reset:
893 - /*
894 - * Prevent the PCI-E bus from from hanging by disabling PCI-E master
895 - * access and verify no pending requests before reset
896 - */
897 - (void) ixgbe_disable_pcie_master(hw);
898 -
899 863 mac_reset_top:
900 864 /*
901 865 * Issue global reset to the MAC. This needs to be a SW reset.
902 866 * If link reset is used, it might reset the MAC when mng is using it
903 867 */
904 - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
905 - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
868 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
869 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
906 870 IXGBE_WRITE_FLUSH(hw);
907 871
908 872 /* Poll for reset bit to self-clear indicating reset is complete */
909 873 for (i = 0; i < 10; i++) {
910 874 usec_delay(1);
911 875 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
912 876 if (!(ctrl & IXGBE_CTRL_RST))
913 877 break;
914 878 }
915 879 if (ctrl & IXGBE_CTRL_RST) {
916 880 status = IXGBE_ERR_RESET_FAILED;
917 881 DEBUGOUT("Reset polling failed to complete.\n");
918 882 }
919 883
884 + msec_delay(50);
885 +
920 886 /*
921 887 * Double resets are required for recovery from certain error
922 888 * conditions. Between resets, it is necessary to stall to allow time
923 - * for any pending HW events to complete. We use 1usec since that is
924 - * what is needed for ixgbe_disable_pcie_master(). The second reset
925 - * then clears out any effects of those events.
889 + * for any pending HW events to complete.
926 890 */
927 891 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
928 892 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
929 - usec_delay(1);
930 893 goto mac_reset_top;
931 894 }
932 895
933 - msec_delay(50);
934 -
935 896 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
936 897 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
937 898 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
938 899
939 900 /*
940 901 * Store the original AUTOC value if it has not been
941 902 * stored off yet. Otherwise restore the stored original
942 903 * AUTOC value since the reset operation sets back to deaults.
943 904 */
944 905 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
945 906 if (hw->mac.orig_link_settings_stored == FALSE) {
946 907 hw->mac.orig_autoc = autoc;
947 908 hw->mac.orig_link_settings_stored = TRUE;
948 909 } else if (autoc != hw->mac.orig_autoc) {
949 910 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
950 911 }
951 912
952 913 /* Store the permanent mac address */
953 914 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
954 915
955 916 /*
956 917 * Store MAC address from RAR0, clear receive address registers, and
957 918 * clear the multicast table
958 919 */
959 920 hw->mac.ops.init_rx_addrs(hw);
960 921
961 922 reset_hw_out:
962 923 if (phy_status != IXGBE_SUCCESS)
963 924 status = phy_status;
964 925
965 926 return status;
966 927 }
967 928
968 929 /**
969 930 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
970 931 * @hw: pointer to hardware struct
971 932 * @rar: receive address register index to associate with a VMDq index
972 933 * @vmdq: VMDq set index
973 934 **/
974 935 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
975 936 {
976 937 u32 rar_high;
977 938 u32 rar_entries = hw->mac.num_rar_entries;
978 939
979 940 DEBUGFUNC("ixgbe_set_vmdq_82598");
980 941
981 942 /* Make sure we are using a valid rar index range */
982 943 if (rar >= rar_entries) {
983 944 DEBUGOUT1("RAR index %d is out of range.\n", rar);
984 945 return IXGBE_ERR_INVALID_ARGUMENT;
985 946 }
986 947
987 948 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
988 949 rar_high &= ~IXGBE_RAH_VIND_MASK;
989 950 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
990 951 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
991 952 return IXGBE_SUCCESS;
992 953 }
993 954
994 955 /**
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
995 956 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
996 957 * @hw: pointer to hardware struct
997 958 * @rar: receive address register index to associate with a VMDq index
998 959 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
999 960 **/
1000 961 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1001 962 {
1002 963 u32 rar_high;
1003 964 u32 rar_entries = hw->mac.num_rar_entries;
1004 965
1005 - UNREFERENCED_PARAMETER(vmdq);
966 + UNREFERENCED_1PARAMETER(vmdq);
1006 967
1007 968 /* Make sure we are using a valid rar index range */
1008 969 if (rar >= rar_entries) {
1009 970 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1010 971 return IXGBE_ERR_INVALID_ARGUMENT;
1011 972 }
1012 973
1013 974 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1014 975 if (rar_high & IXGBE_RAH_VIND_MASK) {
1015 976 rar_high &= ~IXGBE_RAH_VIND_MASK;
1016 977 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
1017 978 }
1018 979
1019 980 return IXGBE_SUCCESS;
1020 981 }
1021 982
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1022 983 /**
1023 984 * ixgbe_set_vfta_82598 - Set VLAN filter table
1024 985 * @hw: pointer to hardware structure
1025 986 * @vlan: VLAN id to write to VLAN filter
1026 987 * @vind: VMDq output index that maps queue to VLAN id in VFTA
1027 988 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1028 989 *
1029 990 * Turn on/off specified VLAN in the VLAN filter table.
1030 991 **/
1031 992 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1032 - bool vlan_on)
993 + bool vlan_on)
1033 994 {
1034 995 u32 regindex;
1035 996 u32 bitindex;
1036 997 u32 bits;
1037 998 u32 vftabyte;
1038 999
1039 1000 DEBUGFUNC("ixgbe_set_vfta_82598");
1040 1001
1041 1002 if (vlan > 4095)
1042 1003 return IXGBE_ERR_PARAM;
1043 1004
1044 1005 /* Determine 32-bit word position in array */
1045 1006 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1046 1007
1047 1008 /* Determine the location of the (VMD) queue index */
1048 1009 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1049 1010 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1050 1011
1051 1012 /* Set the nibble for VMD queue index */
1052 1013 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1053 1014 bits &= (~(0x0F << bitindex));
1054 1015 bits |= (vind << bitindex);
1055 1016 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1056 1017
1057 1018 /* Determine the location of the bit for this VLAN id */
1058 1019 bitindex = vlan & 0x1F; /* lower five bits */
1059 1020
1060 1021 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1061 1022 if (vlan_on)
1062 1023 /* Turn on this VLAN id */
1063 1024 bits |= (1 << bitindex);
1064 1025 else
1065 1026 /* Turn off this VLAN id */
1066 1027 bits &= ~(1 << bitindex);
1067 1028 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1068 1029
1069 1030 return IXGBE_SUCCESS;
1070 1031 }
1071 1032
1072 1033 /**
1073 1034 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1074 1035 * @hw: pointer to hardware structure
1075 1036 *
1076 1037 * Clears the VLAN filer table, and the VMDq index associated with the filter
1077 1038 **/
1078 1039 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1079 1040 {
1080 1041 u32 offset;
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
1081 1042 u32 vlanbyte;
1082 1043
1083 1044 DEBUGFUNC("ixgbe_clear_vfta_82598");
1084 1045
1085 1046 for (offset = 0; offset < hw->mac.vft_size; offset++)
1086 1047 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1087 1048
1088 1049 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1089 1050 for (offset = 0; offset < hw->mac.vft_size; offset++)
1090 1051 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1091 - 0);
1052 + 0);
1092 1053
1093 1054 return IXGBE_SUCCESS;
1094 1055 }
1095 1056
1096 1057 /**
1097 1058 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1098 1059 * @hw: pointer to hardware structure
1099 1060 * @reg: analog register to read
1100 1061 * @val: read value
1101 1062 *
1102 1063 * Performs read operation to Atlas analog register specified.
1103 1064 **/
1104 1065 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1105 1066 {
1106 1067 u32 atlas_ctl;
1107 1068
1108 1069 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1109 1070
1110 1071 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1111 - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1072 + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1112 1073 IXGBE_WRITE_FLUSH(hw);
1113 1074 usec_delay(10);
1114 1075 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1115 1076 *val = (u8)atlas_ctl;
1116 1077
1117 1078 return IXGBE_SUCCESS;
1118 1079 }
1119 1080
1120 1081 /**
1121 1082 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1122 1083 * @hw: pointer to hardware structure
1123 1084 * @reg: atlas register to write
1124 1085 * @val: value to write
1125 1086 *
1126 1087 * Performs write operation to Atlas analog register specified.
1127 1088 **/
1128 1089 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1129 1090 {
1130 1091 u32 atlas_ctl;
1131 1092
1132 1093 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1133 1094
1134 1095 atlas_ctl = (reg << 8) | val;
1135 1096 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1136 1097 IXGBE_WRITE_FLUSH(hw);
1137 1098 usec_delay(10);
1138 1099
1139 1100 return IXGBE_SUCCESS;
1140 1101 }
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1141 1102
1142 1103 /**
1143 1104 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1144 1105 * @hw: pointer to hardware structure
1145 1106 * @byte_offset: EEPROM byte offset to read
1146 1107 * @eeprom_data: value read
1147 1108 *
1148 1109 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1149 1110 **/
1150 1111 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1151 - u8 *eeprom_data)
1112 + u8 *eeprom_data)
1152 1113 {
1153 1114 s32 status = IXGBE_SUCCESS;
1154 1115 u16 sfp_addr = 0;
1155 1116 u16 sfp_data = 0;
1156 1117 u16 sfp_stat = 0;
1157 1118 u32 i;
1158 1119
1159 1120 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1160 1121
1161 1122 if (hw->phy.type == ixgbe_phy_nl) {
1162 1123 /*
1163 1124 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1164 1125 * 0xC30D. These registers are used to talk to the SFP+
1165 1126 * module's EEPROM through the SDA/SCL (I2C) interface.
1166 1127 */
1167 1128 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1168 1129 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1169 1130 hw->phy.ops.write_reg(hw,
1170 - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1171 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1172 - sfp_addr);
1131 + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1132 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1133 + sfp_addr);
1173 1134
1174 1135 /* Poll status */
1175 1136 for (i = 0; i < 100; i++) {
1176 1137 hw->phy.ops.read_reg(hw,
1177 - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1178 - IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1179 - &sfp_stat);
1138 + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1139 + IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1140 + &sfp_stat);
1180 1141 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1181 1142 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1182 1143 break;
1183 1144 msec_delay(10);
1184 1145 }
1185 1146
1186 1147 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1187 1148 DEBUGOUT("EEPROM read did not pass.\n");
1188 1149 status = IXGBE_ERR_SFP_NOT_PRESENT;
1189 1150 goto out;
1190 1151 }
1191 1152
1192 1153 /* Read data */
1193 1154 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1194 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1155 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1195 1156
1196 1157 *eeprom_data = (u8)(sfp_data >> 8);
1197 1158 } else {
1198 1159 status = IXGBE_ERR_PHY;
1199 1160 goto out;
1200 1161 }
1201 1162
1202 1163 out:
1203 1164 return status;
1204 1165 }
1205 1166
1206 1167 /**
1207 1168 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1208 1169 * @hw: pointer to hardware structure
1209 1170 *
1210 1171 * Determines physical layer capabilities of the current configuration.
1211 1172 **/
1212 1173 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1213 1174 {
1214 1175 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1215 1176 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1216 1177 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1217 1178 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
1218 1179 u16 ext_ability = 0;
1219 1180
1220 1181 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1221 1182
1222 1183 hw->phy.ops.identify(hw);
1223 1184
1224 1185 /* Copper PHY must be checked before AUTOC LMS to determine correct
1225 1186 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1226 1187 switch (hw->phy.type) {
1227 1188 case ixgbe_phy_tn:
1228 - case ixgbe_phy_aq:
1229 1189 case ixgbe_phy_cu_unknown:
1230 1190 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1231 1191 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1232 1192 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1233 1193 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1234 1194 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1235 1195 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1236 1196 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1237 1197 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1238 1198 goto out;
1239 1199 default:
1240 1200 break;
1241 1201 }
1242 1202
1243 1203 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1244 1204 case IXGBE_AUTOC_LMS_1G_AN:
1245 1205 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1246 1206 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1247 1207 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1248 1208 else
1249 1209 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1250 1210 break;
1251 1211 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1252 1212 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1253 1213 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1254 1214 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1255 1215 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1256 1216 else /* XAUI */
1257 1217 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1258 1218 break;
1259 1219 case IXGBE_AUTOC_LMS_KX4_AN:
1260 1220 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1261 1221 if (autoc & IXGBE_AUTOC_KX_SUPP)
1262 1222 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1263 1223 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1264 1224 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1265 1225 break;
1266 1226 default:
1267 1227 break;
1268 1228 }
1269 1229
1270 1230 if (hw->phy.type == ixgbe_phy_nl) {
1271 1231 hw->phy.ops.identify_sfp(hw);
1272 1232
1273 1233 switch (hw->phy.sfp_type) {
1274 1234 case ixgbe_sfp_type_da_cu:
1275 1235 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1276 1236 break;
1277 1237 case ixgbe_sfp_type_sr:
1278 1238 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1279 1239 break;
1280 1240 case ixgbe_sfp_type_lr:
1281 1241 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1282 1242 break;
1283 1243 default:
1284 1244 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1285 1245 break;
1286 1246 }
1287 1247 }
1288 1248
1289 1249 switch (hw->device_id) {
1290 1250 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1291 1251 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1292 1252 break;
1293 1253 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1294 1254 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1295 1255 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1296 1256 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1297 1257 break;
1298 1258 case IXGBE_DEV_ID_82598EB_XF_LR:
1299 1259 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1300 1260 break;
1301 1261 default:
1302 1262 break;
1303 1263 }
1304 1264
1305 1265 out:
1306 1266 return physical_layer;
1307 1267 }
1308 1268
1309 1269 /**
1310 1270 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1311 1271 * port devices.
1312 1272 * @hw: pointer to the HW structure
1313 1273 *
1314 1274 * Calls common function and corrects issue with some single port devices
1315 1275 * that enable LAN1 but not LAN0.
1316 1276 **/
1317 1277 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1318 1278 {
1319 1279 struct ixgbe_bus_info *bus = &hw->bus;
1320 1280 u16 pci_gen = 0;
1321 1281 u16 pci_ctrl2 = 0;
1322 1282
1323 1283 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1324 1284
1325 1285 ixgbe_set_lan_id_multi_port_pcie(hw);
1326 1286
1327 1287 /* check if LAN0 is disabled */
1328 1288 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1329 1289 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1330 1290
1331 1291 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1332 1292
1333 1293 /* if LAN0 is completely disabled force function to 0 */
1334 1294 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1335 1295 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1336 1296 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1337 1297
1338 1298 bus->func = 0;
1339 1299 }
1340 1300 }
1341 1301 }
1342 1302
1343 1303 /**
1344 1304 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1345 1305 * @hw: pointer to hardware structure
1346 1306 *
1347 1307 **/
1348 1308 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
|
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
1349 1309 {
1350 1310 u32 regval;
1351 1311 u32 i;
1352 1312
1353 1313 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1354 1314
1355 1315 /* Enable relaxed ordering */
1356 1316 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1357 1317 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1358 1318 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1359 - regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1319 + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1360 1320 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1361 1321 }
1362 1322
1363 1323 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1364 1324 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1365 1325 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1366 - regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1367 - IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1326 + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1327 + IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1368 1328 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1369 1329 }
1370 1330
1331 +}
1332 +
1333 +/**
1334 + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1335 + * @hw: pointer to hardware structure
1336 + * @num_pb: number of packet buffers to allocate
1337 + * @headroom: reserve n KB of headroom
1338 + * @strategy: packet buffer allocation strategy
1339 + **/
1340 +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1341 + u32 headroom, int strategy)
1342 +{
1343 + u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1344 + u8 i = 0;
1345 + UNREFERENCED_1PARAMETER(headroom);
1346 +
1347 + if (!num_pb)
1348 + return;
1349 +
1350 + /* Setup Rx packet buffer sizes */
1351 + switch (strategy) {
1352 + case PBA_STRATEGY_WEIGHTED:
1353 + /* Setup the first four at 80KB */
1354 + rxpktsize = IXGBE_RXPBSIZE_80KB;
1355 + for (; i < 4; i++)
1356 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1357 + /* Setup the last four at 48KB...don't re-init i */
1358 + rxpktsize = IXGBE_RXPBSIZE_48KB;
1359 + /* Fall Through */
1360 + case PBA_STRATEGY_EQUAL:
1361 + default:
1362 + /* Divide the remaining Rx packet buffer evenly among the TCs */
1363 + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1364 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1365 + break;
1366 + }
1367 +
1368 + /* Setup Tx packet buffer sizes */
1369 + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1370 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1371 +
1372 + return;
1371 1373 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX