415 /* Start the HW */
416 status = hw->mac.ops.start_hw(hw);
417 }
418
419 return status;
420 }
421
422 /**
423 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424 * @hw: pointer to hardware structure
425 *
426 * Clears all hardware statistics counters by reading them from the hardware
427 * Statistics counters are clear on read.
428 **/
429 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 {
431 u16 i = 0;
432
433 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434
435 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 for (i = 0; i < 8; i++)
440 IXGBE_READ_REG(hw, IXGBE_MPC(i));
441
442 IXGBE_READ_REG(hw, IXGBE_MLFC);
443 IXGBE_READ_REG(hw, IXGBE_MRFC);
444 IXGBE_READ_REG(hw, IXGBE_RLEC);
445 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 if (hw->mac.type >= ixgbe_mac_82599EB) {
448 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 } else {
451 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 }
454
455 for (i = 0; i < 8; i++) {
456 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 if (hw->mac.type >= ixgbe_mac_82599EB) {
459 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 } else {
462 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 }
465 }
466 if (hw->mac.type >= ixgbe_mac_82599EB)
467 for (i = 0; i < 8; i++)
468 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 IXGBE_READ_REG(hw, IXGBE_PRC64);
470 IXGBE_READ_REG(hw, IXGBE_PRC127);
471 IXGBE_READ_REG(hw, IXGBE_PRC255);
472 IXGBE_READ_REG(hw, IXGBE_PRC511);
473 IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 IXGBE_READ_REG(hw, IXGBE_GPRC);
476 IXGBE_READ_REG(hw, IXGBE_BPRC);
477 IXGBE_READ_REG(hw, IXGBE_MPRC);
478 IXGBE_READ_REG(hw, IXGBE_GPTC);
479 IXGBE_READ_REG(hw, IXGBE_GORCL);
480 IXGBE_READ_REG(hw, IXGBE_GORCH);
481 IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 if (hw->mac.type == ixgbe_mac_82598EB)
484 for (i = 0; i < 8; i++)
485 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 IXGBE_READ_REG(hw, IXGBE_RUC);
487 IXGBE_READ_REG(hw, IXGBE_RFC);
488 IXGBE_READ_REG(hw, IXGBE_ROC);
489 IXGBE_READ_REG(hw, IXGBE_RJC);
490 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 IXGBE_READ_REG(hw, IXGBE_TORL);
494 IXGBE_READ_REG(hw, IXGBE_TORH);
495 IXGBE_READ_REG(hw, IXGBE_TPR);
496 IXGBE_READ_REG(hw, IXGBE_TPT);
497 IXGBE_READ_REG(hw, IXGBE_PTC64);
498 IXGBE_READ_REG(hw, IXGBE_PTC127);
499 IXGBE_READ_REG(hw, IXGBE_PTC255);
500 IXGBE_READ_REG(hw, IXGBE_PTC511);
501 IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 IXGBE_READ_REG(hw, IXGBE_MPTC);
504 IXGBE_READ_REG(hw, IXGBE_BPTC);
505 for (i = 0; i < 16; i++) {
506 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 } else {
515 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 }
518 }
519
520 if (hw->mac.type == ixgbe_mac_X540) {
521 if (hw->phy.id == 0)
522 ixgbe_identify_phy(hw);
523 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 }
532
533 return IXGBE_SUCCESS;
534 }
535
536 /**
537 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538 * @hw: pointer to hardware structure
539 * @pba_num: stores the part number string from the EEPROM
540 * @pba_num_size: part number string buffer length
541 *
542 * Reads the part number string from the EEPROM.
799 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 {
801 u32 reg_val;
802 u16 i;
803
804 DEBUGFUNC("ixgbe_stop_adapter_generic");
805
806 /*
807 * Set the adapter_stopped flag so other driver functions stop touching
808 * the hardware
809 */
810 hw->adapter_stopped = TRUE;
811
812 /* Disable the receive unit */
813 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
814
815 /* Clear interrupt mask to stop interrupts from being generated */
816 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817
818 /* Clear any pending interrupts, flush previous writes */
819 IXGBE_READ_REG(hw, IXGBE_EICR);
820
821 /* Disable the transmit unit. Each queue must be disabled. */
822 for (i = 0; i < hw->mac.max_tx_queues; i++)
823 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824
825 /* Disable the receive unit by stopping each queue */
826 for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 reg_val |= IXGBE_RXDCTL_SWFLSH;
830 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 }
832
833 /* flush all queues disables */
834 IXGBE_WRITE_FLUSH(hw);
835 msec_delay(2);
836
837 /*
838 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
839 * access and verify no pending requests
952 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953
954 hw->eeprom.ops.init_params(hw);
955
956 if (words == 0) {
957 status = IXGBE_ERR_INVALID_ARGUMENT;
958 goto out;
959 }
960
961 if (offset + words > hw->eeprom.word_size) {
962 status = IXGBE_ERR_EEPROM;
963 goto out;
964 }
965
966 /*
967 * The EEPROM page size cannot be queried from the chip. We do lazy
968 * initialization. It is worth to do that when we write large buffer.
969 */
970 if ((hw->eeprom.word_page_size == 0) &&
971 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 ixgbe_detect_eeprom_page_size_generic(hw, offset);
973
974 /*
975 * We cannot hold synchronization semaphores for too long
976 * to avoid other entity starvation. However it is more efficient
977 * to read in bursts than synchronizing access for each word.
978 */
979 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
980 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
981 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
982 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
983 count, &data[i]);
984
985 if (status != IXGBE_SUCCESS)
986 break;
987 }
988
989 out:
990 return status;
991 }
992
2119 }
2120 hw->addr_ctrl.overflow_promisc = 0;
2121
2122 hw->addr_ctrl.rar_used_count = 1;
2123
2124 /* Zero out the other receive addresses. */
2125 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2126 for (i = 1; i < rar_entries; i++) {
2127 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2128 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2129 }
2130
2131 /* Clear the MTA */
2132 hw->addr_ctrl.mta_in_use = 0;
2133 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2134
2135 DEBUGOUT(" Clearing MTA\n");
2136 for (i = 0; i < hw->mac.mcft_size; i++)
2137 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2138
2139 ixgbe_init_uta_tables(hw);
2140
2141 return IXGBE_SUCCESS;
2142 }
2143
2144 /**
2145 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2146 * @hw: pointer to hardware structure
2147 * @addr: new address
2148 *
2149 * Adds it to unused receive address register or goes into promiscuous mode.
2150 **/
2151 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2152 {
2153 u32 rar_entries = hw->mac.num_rar_entries;
2154 u32 rar;
2155
2156 DEBUGFUNC("ixgbe_add_uc_addr");
2157
2158 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2159 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2160
2161 /*
2333 **/
2334 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2335 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2336 bool clear)
2337 {
2338 u32 i;
2339 u32 vmdq;
2340
2341 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2342
2343 /*
2344 * Set the new number of MC addresses that we are being requested to
2345 * use.
2346 */
2347 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2348 hw->addr_ctrl.mta_in_use = 0;
2349
2350 /* Clear mta_shadow */
2351 if (clear) {
2352 DEBUGOUT(" Clearing MTA\n");
2353 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2354 }
2355
2356 /* Update mta_shadow */
2357 for (i = 0; i < mc_addr_count; i++) {
2358 DEBUGOUT(" Adding the multicast addresses:\n");
2359 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2360 }
2361
2362 /* Enable mta */
2363 for (i = 0; i < hw->mac.mcft_size; i++)
2364 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2365 hw->mac.mta_shadow[i]);
2366
2367 if (hw->addr_ctrl.mta_in_use > 0)
2368 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2369 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2370
2371 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2372 return IXGBE_SUCCESS;
2373 }
2853
2854 ixgbe_release_eeprom_semaphore(hw);
2855 return IXGBE_SUCCESS;
2856 }
2857
2858 /**
2859 * ixgbe_release_swfw_sync - Release SWFW semaphore
2860 * @hw: pointer to hardware structure
2861 * @mask: Mask to specify which semaphore to release
2862 *
2863 * Releases the SWFW semaphore through the GSSR register for the specified
2864 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2865 **/
2866 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2867 {
2868 u32 gssr;
2869 u32 swmask = mask;
2870
2871 DEBUGFUNC("ixgbe_release_swfw_sync");
2872
2873 ixgbe_get_eeprom_semaphore(hw);
2874
2875 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2876 gssr &= ~swmask;
2877 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2878
2879 ixgbe_release_eeprom_semaphore(hw);
2880 }
2881
2882 /**
2883 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2884 * @hw: pointer to hardware structure
2885 *
2886 * Stops the receive data path and waits for the HW to internally empty
2887 * the Rx security block
2888 **/
2889 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2890 {
2891 #define IXGBE_MAX_SECRX_POLL 40
2892
2893 int i;
3041 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3042 * @hw: pointer to hardware structure
3043 * @san_mac_addr: SAN MAC address
3044 *
3045 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3046 * per-port, so set_lan_id() must be called before reading the addresses.
3047 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3048 * upon for non-SFP connections, so we must call it here.
3049 **/
3050 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3051 {
3052 u16 san_mac_data, san_mac_offset;
3053 u8 i;
3054
3055 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3056
3057 /*
3058 * First read the EEPROM pointer to see if the MAC addresses are
3059 * available. If they're not, no point in calling set_lan_id() here.
3060 */
3061 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3062
3063 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3064 /*
3065 * No addresses available in this EEPROM. It's not an
3066 * error though, so just wipe the local address and return.
3067 */
3068 for (i = 0; i < 6; i++)
3069 san_mac_addr[i] = 0xFF;
3070
3071 goto san_mac_addr_out;
3072 }
3073
3074 /* make sure we know which port we need to program */
3075 hw->mac.ops.set_lan_id(hw);
3076 /* apply the port offset to the address offset */
3077 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3078 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3079 for (i = 0; i < 3; i++) {
3080 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3081 san_mac_addr[i * 2] = (u8)(san_mac_data);
3086 san_mac_addr_out:
3087 return IXGBE_SUCCESS;
3088 }
3089
3090 /**
3091 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3092 * @hw: pointer to hardware structure
3093 * @san_mac_addr: SAN MAC address
3094 *
3095 * Write a SAN MAC address to the EEPROM.
3096 **/
3097 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3098 {
3099 s32 status = IXGBE_SUCCESS;
3100 u16 san_mac_data, san_mac_offset;
3101 u8 i;
3102
3103 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3104
3105 /* Look for SAN mac address pointer. If not defined, return */
3106 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3107
3108 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3109 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3110 goto san_mac_addr_out;
3111 }
3112
3113 /* Make sure we know which port we need to write */
3114 hw->mac.ops.set_lan_id(hw);
3115 /* Apply the port offset to the address offset */
3116 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3117 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3118
3119 for (i = 0; i < 3; i++) {
3120 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3121 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3122 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3123 san_mac_offset++;
3124 }
3125
3126 san_mac_addr_out:
3196 * Either find the mac_id in rar or find the first empty space.
3197 * rar_highwater points to just after the highest currently used
3198 * rar in order to shorten the search. It grows when we add a new
3199 * rar to the top.
3200 */
3201 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3202 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3203
3204 if (((IXGBE_RAH_AV & rar_high) == 0)
3205 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3206 first_empty_rar = rar;
3207 } else if ((rar_high & 0xFFFF) == addr_high) {
3208 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3209 if (rar_low == addr_low)
3210 break; /* found it already in the rars */
3211 }
3212 }
3213
3214 if (rar < hw->mac.rar_highwater) {
3215 /* already there so just add to the pool bits */
3216 ixgbe_set_vmdq(hw, rar, vmdq);
3217 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3218 /* stick it into first empty RAR slot we found */
3219 rar = first_empty_rar;
3220 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3221 } else if (rar == hw->mac.rar_highwater) {
3222 /* add it to the top of the list and inc the highwater mark */
3223 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3224 hw->mac.rar_highwater++;
3225 } else if (rar >= hw->mac.num_rar_entries) {
3226 return IXGBE_ERR_INVALID_MAC_ADDR;
3227 }
3228
3229 /*
3230 * If we found rar[0], make sure the default pool bit (we use pool 0)
3231 * remains cleared to be sure default pool packets will get delivered
3232 */
3233 if (rar == 0)
3234 ixgbe_clear_vmdq(hw, rar, 0);
3235
3236 return rar;
3237 }
3238
3239 /**
3240 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3241 * @hw: pointer to hardware struct
3242 * @rar: receive address register index to disassociate
3243 * @vmdq: VMDq pool index to remove from the rar
3244 **/
3245 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3246 {
3247 u32 mpsar_lo, mpsar_hi;
3248 u32 rar_entries = hw->mac.num_rar_entries;
3249
3250 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3251
3252 /* Make sure we are using a valid rar index range */
3253 if (rar >= rar_entries) {
3254 DEBUGOUT1("RAR index %d is out of range.\n", rar);
4020 != IXGBE_SUCCESS) {
4021 ret_val = IXGBE_ERR_SWFW_SYNC;
4022 goto out;
4023 }
4024
4025 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4026 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4027 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4028 fw_cmd.port_num = (u8)hw->bus.func;
4029 fw_cmd.ver_maj = maj;
4030 fw_cmd.ver_min = min;
4031 fw_cmd.ver_build = build;
4032 fw_cmd.ver_sub = sub;
4033 fw_cmd.hdr.checksum = 0;
4034 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4035 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4036 fw_cmd.pad = 0;
4037 fw_cmd.pad2 = 0;
4038
4039 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4040 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4041 sizeof(fw_cmd));
4042 if (ret_val != IXGBE_SUCCESS)
4043 continue;
4044
4045 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4046 FW_CEM_RESP_STATUS_SUCCESS)
4047 ret_val = IXGBE_SUCCESS;
4048 else
4049 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4050
4051 break;
4052 }
4053
4054 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4055 out:
4056 return ret_val;
4057 }
4058
4059 /**
4073 /* Reserve headroom */
4074 pbsize -= headroom;
4075
4076 if (!num_pb)
4077 num_pb = 1;
4078
4079 /* Divide remaining packet buffer space amongst the number of packet
4080 * buffers requested using supplied strategy.
4081 */
4082 switch (strategy) {
4083 case PBA_STRATEGY_WEIGHTED:
4084 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4085 * buffer with 5/8 of the packet buffer space.
4086 */
4087 rxpktsize = (pbsize * 5) / (num_pb * 4);
4088 pbsize -= rxpktsize * (num_pb / 2);
4089 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4090 for (; i < (num_pb / 2); i++)
4091 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4092 /* Fall through to configure remaining packet buffers */
4093 case PBA_STRATEGY_EQUAL:
4094 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4095 for (; i < num_pb; i++)
4096 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4097 break;
4098 default:
4099 break;
4100 }
4101
4102 /* Only support an equally distributed Tx packet buffer strategy. */
4103 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4104 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4105 for (i = 0; i < num_pb; i++) {
4106 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4107 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4108 }
4109
4110 /* Clear unused TCs, if any, to zero buffer size*/
4111 for (; i < IXGBE_MAX_PB; i++) {
4112 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
|
415 /* Start the HW */
416 status = hw->mac.ops.start_hw(hw);
417 }
418
419 return status;
420 }
421
422 /**
423 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424 * @hw: pointer to hardware structure
425 *
426 * Clears all hardware statistics counters by reading them from the hardware
427 * Statistics counters are clear on read.
428 **/
429 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 {
431 u16 i = 0;
432
433 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434
435 (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 for (i = 0; i < 8; i++)
440 (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
441
442 (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
443 (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
444 (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
445 (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 if (hw->mac.type >= ixgbe_mac_82599EB) {
448 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 } else {
451 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 }
454
455 for (i = 0; i < 8; i++) {
456 (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 if (hw->mac.type >= ixgbe_mac_82599EB) {
459 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 } else {
462 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 }
465 }
466 if (hw->mac.type >= ixgbe_mac_82599EB)
467 for (i = 0; i < 8; i++)
468 (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
470 (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
471 (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
472 (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
473 (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
476 (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
477 (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
478 (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
479 (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
480 (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
481 (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 if (hw->mac.type == ixgbe_mac_82598EB)
484 for (i = 0; i < 8; i++)
485 (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 (void) IXGBE_READ_REG(hw, IXGBE_RUC);
487 (void) IXGBE_READ_REG(hw, IXGBE_RFC);
488 (void) IXGBE_READ_REG(hw, IXGBE_ROC);
489 (void) IXGBE_READ_REG(hw, IXGBE_RJC);
490 (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 (void) IXGBE_READ_REG(hw, IXGBE_TORL);
494 (void) IXGBE_READ_REG(hw, IXGBE_TORH);
495 (void) IXGBE_READ_REG(hw, IXGBE_TPR);
496 (void) IXGBE_READ_REG(hw, IXGBE_TPT);
497 (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
498 (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
499 (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
500 (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
501 (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
504 (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
505 for (i = 0; i < 16; i++) {
506 (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 } else {
515 (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 }
518 }
519
520 if (hw->mac.type == ixgbe_mac_X540) {
521 if (hw->phy.id == 0)
522 (void) ixgbe_identify_phy(hw);
523 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 }
532
533 return IXGBE_SUCCESS;
534 }
535
536 /**
537 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538 * @hw: pointer to hardware structure
539 * @pba_num: stores the part number string from the EEPROM
540 * @pba_num_size: part number string buffer length
541 *
542 * Reads the part number string from the EEPROM.
799 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 {
801 u32 reg_val;
802 u16 i;
803
804 DEBUGFUNC("ixgbe_stop_adapter_generic");
805
806 /*
807 * Set the adapter_stopped flag so other driver functions stop touching
808 * the hardware
809 */
810 hw->adapter_stopped = TRUE;
811
812 /* Disable the receive unit */
813 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
814
815 /* Clear interrupt mask to stop interrupts from being generated */
816 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817
818 /* Clear any pending interrupts, flush previous writes */
819 (void) IXGBE_READ_REG(hw, IXGBE_EICR);
820
821 /* Disable the transmit unit. Each queue must be disabled. */
822 for (i = 0; i < hw->mac.max_tx_queues; i++)
823 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824
825 /* Disable the receive unit by stopping each queue */
826 for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 reg_val |= IXGBE_RXDCTL_SWFLSH;
830 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 }
832
833 /* flush all queues disables */
834 IXGBE_WRITE_FLUSH(hw);
835 msec_delay(2);
836
837 /*
838 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
839 * access and verify no pending requests
952 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953
954 hw->eeprom.ops.init_params(hw);
955
956 if (words == 0) {
957 status = IXGBE_ERR_INVALID_ARGUMENT;
958 goto out;
959 }
960
961 if (offset + words > hw->eeprom.word_size) {
962 status = IXGBE_ERR_EEPROM;
963 goto out;
964 }
965
966 /*
967 * The EEPROM page size cannot be queried from the chip. We do lazy
968 * initialization. It is worth to do that when we write large buffer.
969 */
970 if ((hw->eeprom.word_page_size == 0) &&
971 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 status = ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 if (status != IXGBE_SUCCESS)
974 goto out;
975
976 /*
977 * We cannot hold synchronization semaphores for too long
978 * to avoid other entity starvation. However it is more efficient
979 * to read in bursts than synchronizing access for each word.
980 */
981 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
982 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
983 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
984 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
985 count, &data[i]);
986
987 if (status != IXGBE_SUCCESS)
988 break;
989 }
990
991 out:
992 return status;
993 }
994
2121 }
2122 hw->addr_ctrl.overflow_promisc = 0;
2123
2124 hw->addr_ctrl.rar_used_count = 1;
2125
2126 /* Zero out the other receive addresses. */
2127 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2128 for (i = 1; i < rar_entries; i++) {
2129 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2130 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2131 }
2132
2133 /* Clear the MTA */
2134 hw->addr_ctrl.mta_in_use = 0;
2135 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2136
2137 DEBUGOUT(" Clearing MTA\n");
2138 for (i = 0; i < hw->mac.mcft_size; i++)
2139 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2140
2141 /* Should always be IXGBE_SUCCESS. */
2142 return ixgbe_init_uta_tables(hw);
2143 }
2144
2145 /**
2146 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2147 * @hw: pointer to hardware structure
2148 * @addr: new address
2149 *
2150 * Adds it to unused receive address register or goes into promiscuous mode.
2151 **/
2152 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2153 {
2154 u32 rar_entries = hw->mac.num_rar_entries;
2155 u32 rar;
2156
2157 DEBUGFUNC("ixgbe_add_uc_addr");
2158
2159 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2160 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2161
2162 /*
2334 **/
2335 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2336 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2337 bool clear)
2338 {
2339 u32 i;
2340 u32 vmdq;
2341
2342 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2343
2344 /*
2345 * Set the new number of MC addresses that we are being requested to
2346 * use.
2347 */
2348 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2349 hw->addr_ctrl.mta_in_use = 0;
2350
2351 /* Clear mta_shadow */
2352 if (clear) {
2353 DEBUGOUT(" Clearing MTA\n");
2354 (void) memset(&hw->mac.mta_shadow, 0,
2355 sizeof(hw->mac.mta_shadow));
2356 }
2357
2358 /* Update mta_shadow */
2359 for (i = 0; i < mc_addr_count; i++) {
2360 DEBUGOUT(" Adding the multicast addresses:\n");
2361 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2362 }
2363
2364 /* Enable mta */
2365 for (i = 0; i < hw->mac.mcft_size; i++)
2366 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2367 hw->mac.mta_shadow[i]);
2368
2369 if (hw->addr_ctrl.mta_in_use > 0)
2370 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2371 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2372
2373 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2374 return IXGBE_SUCCESS;
2375 }
2855
2856 ixgbe_release_eeprom_semaphore(hw);
2857 return IXGBE_SUCCESS;
2858 }
2859
2860 /**
2861 * ixgbe_release_swfw_sync - Release SWFW semaphore
2862 * @hw: pointer to hardware structure
2863 * @mask: Mask to specify which semaphore to release
2864 *
2865 * Releases the SWFW semaphore through the GSSR register for the specified
2866 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2867 **/
2868 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2869 {
2870 u32 gssr;
2871 u32 swmask = mask;
2872
2873 DEBUGFUNC("ixgbe_release_swfw_sync");
2874
2875 (void) ixgbe_get_eeprom_semaphore(hw);
2876
2877 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2878 gssr &= ~swmask;
2879 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2880
2881 ixgbe_release_eeprom_semaphore(hw);
2882 }
2883
2884 /**
2885 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2886 * @hw: pointer to hardware structure
2887 *
2888 * Stops the receive data path and waits for the HW to internally empty
2889 * the Rx security block
2890 **/
2891 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2892 {
2893 #define IXGBE_MAX_SECRX_POLL 40
2894
2895 int i;
3043 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3044 * @hw: pointer to hardware structure
3045 * @san_mac_addr: SAN MAC address
3046 *
3047 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3048 * per-port, so set_lan_id() must be called before reading the addresses.
3049 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3050 * upon for non-SFP connections, so we must call it here.
3051 **/
3052 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3053 {
3054 u16 san_mac_data, san_mac_offset;
3055 u8 i;
3056
3057 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3058
3059 /*
3060 * First read the EEPROM pointer to see if the MAC addresses are
3061 * available. If they're not, no point in calling set_lan_id() here.
3062 */
3063 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3064
3065 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3066 /*
3067 * No addresses available in this EEPROM. It's not an
3068 * error though, so just wipe the local address and return.
3069 */
3070 for (i = 0; i < 6; i++)
3071 san_mac_addr[i] = 0xFF;
3072
3073 goto san_mac_addr_out;
3074 }
3075
3076 /* make sure we know which port we need to program */
3077 hw->mac.ops.set_lan_id(hw);
3078 /* apply the port offset to the address offset */
3079 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3080 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3081 for (i = 0; i < 3; i++) {
3082 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3083 san_mac_addr[i * 2] = (u8)(san_mac_data);
3088 san_mac_addr_out:
3089 return IXGBE_SUCCESS;
3090 }
3091
3092 /**
3093 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3094 * @hw: pointer to hardware structure
3095 * @san_mac_addr: SAN MAC address
3096 *
3097 * Write a SAN MAC address to the EEPROM.
3098 **/
3099 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3100 {
3101 s32 status = IXGBE_SUCCESS;
3102 u16 san_mac_data, san_mac_offset;
3103 u8 i;
3104
3105 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3106
3107 /* Look for SAN mac address pointer. If not defined, return */
3108 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3109
3110 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3111 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3112 goto san_mac_addr_out;
3113 }
3114
3115 /* Make sure we know which port we need to write */
3116 hw->mac.ops.set_lan_id(hw);
3117 /* Apply the port offset to the address offset */
3118 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3119 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3120
3121 for (i = 0; i < 3; i++) {
3122 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3123 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3124 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3125 san_mac_offset++;
3126 }
3127
3128 san_mac_addr_out:
3198 * Either find the mac_id in rar or find the first empty space.
3199 * rar_highwater points to just after the highest currently used
3200 * rar in order to shorten the search. It grows when we add a new
3201 * rar to the top.
3202 */
3203 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3204 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3205
3206 if (((IXGBE_RAH_AV & rar_high) == 0)
3207 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3208 first_empty_rar = rar;
3209 } else if ((rar_high & 0xFFFF) == addr_high) {
3210 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3211 if (rar_low == addr_low)
3212 break; /* found it already in the rars */
3213 }
3214 }
3215
3216 if (rar < hw->mac.rar_highwater) {
3217 /* already there so just add to the pool bits */
3218 (void) ixgbe_set_vmdq(hw, rar, vmdq);
3219 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3220 /* stick it into first empty RAR slot we found */
3221 rar = first_empty_rar;
3222 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3223 } else if (rar == hw->mac.rar_highwater) {
3224 /* add it to the top of the list and inc the highwater mark */
3225 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3226 hw->mac.rar_highwater++;
3227 } else if (rar >= hw->mac.num_rar_entries) {
3228 return IXGBE_ERR_INVALID_MAC_ADDR;
3229 }
3230
3231 /*
3232 * If we found rar[0], make sure the default pool bit (we use pool 0)
3233 * remains cleared to be sure default pool packets will get delivered
3234 */
3235 if (rar == 0)
3236 (void) ixgbe_clear_vmdq(hw, rar, 0);
3237
3238 return rar;
3239 }
3240
3241 /**
3242 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3243 * @hw: pointer to hardware struct
3244 * @rar: receive address register index to disassociate
3245 * @vmdq: VMDq pool index to remove from the rar
3246 **/
3247 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3248 {
3249 u32 mpsar_lo, mpsar_hi;
3250 u32 rar_entries = hw->mac.num_rar_entries;
3251
3252 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3253
3254 /* Make sure we are using a valid rar index range */
3255 if (rar >= rar_entries) {
3256 DEBUGOUT1("RAR index %d is out of range.\n", rar);
4022 != IXGBE_SUCCESS) {
4023 ret_val = IXGBE_ERR_SWFW_SYNC;
4024 goto out;
4025 }
4026
4027 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4028 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4029 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4030 fw_cmd.port_num = (u8)hw->bus.func;
4031 fw_cmd.ver_maj = maj;
4032 fw_cmd.ver_min = min;
4033 fw_cmd.ver_build = build;
4034 fw_cmd.ver_sub = sub;
4035 fw_cmd.hdr.checksum = 0;
4036 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4037 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4038 fw_cmd.pad = 0;
4039 fw_cmd.pad2 = 0;
4040
4041 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4042 /* LINTED */
4043 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4044 sizeof(fw_cmd));
4045 if (ret_val != IXGBE_SUCCESS)
4046 continue;
4047
4048 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4049 FW_CEM_RESP_STATUS_SUCCESS)
4050 ret_val = IXGBE_SUCCESS;
4051 else
4052 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4053
4054 break;
4055 }
4056
4057 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4058 out:
4059 return ret_val;
4060 }
4061
4062 /**
4076 /* Reserve headroom */
4077 pbsize -= headroom;
4078
4079 if (!num_pb)
4080 num_pb = 1;
4081
4082 /* Divide remaining packet buffer space amongst the number of packet
4083 * buffers requested using supplied strategy.
4084 */
4085 switch (strategy) {
4086 case PBA_STRATEGY_WEIGHTED:
4087 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4088 * buffer with 5/8 of the packet buffer space.
4089 */
4090 rxpktsize = (pbsize * 5) / (num_pb * 4);
4091 pbsize -= rxpktsize * (num_pb / 2);
4092 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4093 for (; i < (num_pb / 2); i++)
4094 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4095 /* Fall through to configure remaining packet buffers */
4096 /* FALLTHRU */
4097 case PBA_STRATEGY_EQUAL:
4098 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4099 for (; i < num_pb; i++)
4100 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4101 break;
4102 default:
4103 break;
4104 }
4105
4106 /* Only support an equally distributed Tx packet buffer strategy. */
4107 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4108 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4109 for (i = 0; i < num_pb; i++) {
4110 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4111 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4112 }
4113
4114 /* Clear unused TCs, if any, to zero buffer size*/
4115 for (; i < IXGBE_MAX_PB; i++) {
4116 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
|