226 } else if (phy_id) {
227 hw->phy.id = phy_id;
228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 goto out;
230 }
231
232 /* In case the PHY needs to be in mdio slow mode,
233 * set slow mode and try to get the PHY id again.
234 */
235 if (hw->mac.type < e1000_pch_lpt) {
236 hw->phy.ops.release(hw);
237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 if (!ret_val)
239 ret_val = e1000_get_phy_id(hw);
240 hw->phy.ops.acquire(hw);
241 }
242
243 if (ret_val)
244 return FALSE;
245 out:
246 if ((hw->mac.type == e1000_pch_lpt) ||
247 (hw->mac.type == e1000_pch_spt)) {
248 /* Only unforce SMBus if ME is not active */
249 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250 E1000_ICH_FWSM_FW_VALID)) {
251 /* Unforce SMBus mode in PHY */
252 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256 /* Unforce SMBus mode in MAC */
257 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260 }
261 }
262
263 return TRUE;
264 }
265
266 /**
267 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
328
329 /* It is not possible to be certain of the current state of ULP
330 * so forcibly disable it.
331 */
332 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333 e1000_disable_ulp_lpt_lp(hw, TRUE);
334
335 ret_val = hw->phy.ops.acquire(hw);
336 if (ret_val) {
337 DEBUGOUT("Failed to initialize PHY flow\n");
338 goto out;
339 }
340
341 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
342 * inaccessible and resetting the PHY is not blocked, toggle the
343 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344 */
345 switch (hw->mac.type) {
346 case e1000_pch_lpt:
347 case e1000_pch_spt:
348 if (e1000_phy_is_accessible_pchlan(hw))
349 break;
350
351 /* Before toggling LANPHYPC, see if PHY is accessible by
352 * forcing MAC to SMBus mode first.
353 */
354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357
358 /* Wait 50 milliseconds for MAC to finish any retries
359 * that it might be trying to perform from previous
360 * attempts to acknowledge any phy read requests.
361 */
362 msec_delay(50);
363
364 /* fall-through */
365 case e1000_pch2lan:
366 if (e1000_phy_is_accessible_pchlan(hw))
367 break;
476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477
478 phy->id = e1000_phy_unknown;
479
480 ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 if (ret_val)
482 return ret_val;
483
484 if (phy->id == e1000_phy_unknown)
485 switch (hw->mac.type) {
486 default:
487 ret_val = e1000_get_phy_id(hw);
488 if (ret_val)
489 return ret_val;
490 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 break;
492 /* fall-through */
493 case e1000_pch2lan:
494 case e1000_pch_lpt:
495 case e1000_pch_spt:
496 /* In case the PHY needs to be in mdio slow mode,
497 * set slow mode and try to get the PHY id again.
498 */
499 ret_val = e1000_set_mdio_slow_mode_hv(hw);
500 if (ret_val)
501 return ret_val;
502 ret_val = e1000_get_phy_id(hw);
503 if (ret_val)
504 return ret_val;
505 break;
506 }
507 phy->type = e1000_get_phy_type_from_id(phy->id);
508
509 switch (phy->type) {
510 case e1000_phy_82577:
511 case e1000_phy_82579:
512 case e1000_phy_i217:
513 phy->ops.check_polarity = e1000_check_polarity_82577;
514 phy->ops.force_speed_duplex =
515 e1000_phy_force_speed_duplex_82577;
624
625 /**
626 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
627 * @hw: pointer to the HW structure
628 *
629 * Initialize family-specific NVM parameters and function
630 * pointers.
631 **/
632 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
633 {
634 struct e1000_nvm_info *nvm = &hw->nvm;
635 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
636 u32 gfpreg, sector_base_addr, sector_end_addr;
637 u16 i;
638 u32 nvm_size;
639
640 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
641
642 nvm->type = e1000_nvm_flash_sw;
643
644 if (hw->mac.type == e1000_pch_spt) {
645 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
646 * STRAP register. This is because in SPT the GbE Flash region
647 * is no longer accessed through the flash registers. Instead,
648 * the mechanism has changed, and the Flash region access
649 * registers are now implemented in GbE memory space.
650 */
651 nvm->flash_base_addr = 0;
652 nvm_size =
653 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
654 * NVM_SIZE_MULTIPLIER;
655 nvm->flash_bank_size = nvm_size / 2;
656 /* Adjust to word count */
657 nvm->flash_bank_size /= sizeof(u16);
658 /* Set the base address for flash register access */
659 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
660 } else {
661 /* Can't read flash registers if register set isn't mapped. */
662 if (!hw->flash_address) {
663 DEBUGOUT("ERROR: Flash registers not mapped\n");
664 return -E1000_ERR_CONFIG;
684 << FLASH_SECTOR_ADDR_SHIFT);
685 nvm->flash_bank_size /= 2;
686 /* Adjust to word count */
687 nvm->flash_bank_size /= sizeof(u16);
688 }
689
690 nvm->word_size = E1000_SHADOW_RAM_WORDS;
691
692 /* Clear shadow ram */
693 for (i = 0; i < nvm->word_size; i++) {
694 dev_spec->shadow_ram[i].modified = FALSE;
695 dev_spec->shadow_ram[i].value = 0xFFFF;
696 }
697
698 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
699 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
700
701 /* Function Pointers */
702 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
703 nvm->ops.release = e1000_release_nvm_ich8lan;
704 if (hw->mac.type == e1000_pch_spt) {
705 nvm->ops.read = e1000_read_nvm_spt;
706 nvm->ops.update = e1000_update_nvm_checksum_spt;
707 } else {
708 nvm->ops.read = e1000_read_nvm_ich8lan;
709 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
710 }
711 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
712 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
713 nvm->ops.write = e1000_write_nvm_ich8lan;
714
715 return E1000_SUCCESS;
716 }
717
718 /**
719 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
720 * @hw: pointer to the HW structure
721 *
722 * Initialize family-specific MAC parameters and function
723 * pointers.
724 **/
777 /* check management mode */
778 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
779 /* ID LED init */
780 mac->ops.id_led_init = e1000_id_led_init_generic;
781 /* blink LED */
782 mac->ops.blink_led = e1000_blink_led_generic;
783 /* setup LED */
784 mac->ops.setup_led = e1000_setup_led_generic;
785 /* cleanup LED */
786 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
787 /* turn on/off LED */
788 mac->ops.led_on = e1000_led_on_ich8lan;
789 mac->ops.led_off = e1000_led_off_ich8lan;
790 break;
791 case e1000_pch2lan:
792 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
793 mac->ops.rar_set = e1000_rar_set_pch2lan;
794 /* fall-through */
795 case e1000_pch_lpt:
796 case e1000_pch_spt:
797 /* multicast address update for pch2 */
798 mac->ops.update_mc_addr_list =
799 e1000_update_mc_addr_list_pch2lan;
800 /* fall-through */
801 case e1000_pchlan:
802 /* check management mode */
803 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
804 /* ID LED init */
805 mac->ops.id_led_init = e1000_id_led_init_pchlan;
806 /* setup LED */
807 mac->ops.setup_led = e1000_setup_led_pchlan;
808 /* cleanup LED */
809 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
810 /* turn on/off LED */
811 mac->ops.led_on = e1000_led_on_pchlan;
812 mac->ops.led_off = e1000_led_off_pchlan;
813 break;
814 default:
815 break;
816 }
817
818 if ((mac->type == e1000_pch_lpt) ||
819 (mac->type == e1000_pch_spt)) {
820 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
821 mac->ops.rar_set = e1000_rar_set_pch_lpt;
822 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
823 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
824 }
825
826 /* Enable PCS Lock-loss workaround for ICH8 */
827 if (mac->type == e1000_ich8lan)
828 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
829
830 return E1000_SUCCESS;
831 }
832
833 /**
834 * __e1000_access_emi_reg_locked - Read/write EMI register
835 * @hw: pointer to the HW structure
836 * @addr: EMI address to program
837 * @data: pointer to value to read/write from/to the EMI address
838 * @read: boolean flag to indicate read or write
839 *
1559 return E1000_SUCCESS;
1560
1561 /* First we want to see if the MII Status Register reports
1562 * link. If so, then we want to get the current speed/duplex
1563 * of the PHY.
1564 */
1565 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1566 if (ret_val)
1567 return ret_val;
1568
1569 if (hw->mac.type == e1000_pchlan) {
1570 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1571 if (ret_val)
1572 return ret_val;
1573 }
1574
1575 /* When connected at 10Mbps half-duplex, some parts are excessively
1576 * aggressive resulting in many collisions. To avoid this, increase
1577 * the IPG and reduce Rx latency in the PHY.
1578 */
1579 if (((hw->mac.type == e1000_pch2lan) ||
1580 (hw->mac.type == e1000_pch_lpt) ||
1581 (hw->mac.type == e1000_pch_spt)) && link) {
1582 u16 speed, duplex;
1583
1584 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1585 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1586 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1587
1588 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1589 tipg_reg |= 0xFF;
1590 /* Reduce Rx latency in analog PHY */
1591 emi_val = 0;
1592 } else if (hw->mac.type == e1000_pch_spt &&
1593 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1594 tipg_reg |= 0xC;
1595 emi_val = 1;
1596 } else {
1597 /* Roll back the default values */
1598 tipg_reg |= 0x08;
1599 emi_val = 1;
1600 }
1601
1602 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1603
1604 ret_val = hw->phy.ops.acquire(hw);
1605 if (ret_val)
1606 return ret_val;
1607
1608 if (hw->mac.type == e1000_pch2lan)
1609 emi_addr = I82579_RX_CONFIG;
1610 else
1611 emi_addr = I217_RX_CONFIG;
1612 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1613
1614 if (hw->mac.type == e1000_pch_lpt ||
1615 hw->mac.type == e1000_pch_spt) {
1616 u16 phy_reg;
1617
1618 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1619 &phy_reg);
1620 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1621 if (speed == SPEED_100 || speed == SPEED_10)
1622 phy_reg |= 0x3E8;
1623 else
1624 phy_reg |= 0xFA;
1625 hw->phy.ops.write_reg_locked(hw,
1626 I217_PLL_CLOCK_GATE_REG,
1627 phy_reg);
1628
1629 if (speed == SPEED_1000) {
1630 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1631 &phy_reg);
1632
1633 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1634
1635 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1636 phy_reg);
1637 }
1638 }
1639 hw->phy.ops.release(hw);
1640
1641 if (ret_val)
1642 return ret_val;
1643
1644 if (hw->mac.type == e1000_pch_spt) {
1645 u16 data;
1646 u16 ptr_gap;
1647
1648 if (speed == SPEED_1000) {
1649 ret_val = hw->phy.ops.acquire(hw);
1650 if (ret_val)
1651 return ret_val;
1652
1653 ret_val = hw->phy.ops.read_reg_locked(hw,
1654 PHY_REG(776, 20),
1655 &data);
1656 if (ret_val) {
1657 hw->phy.ops.release(hw);
1658 return ret_val;
1659 }
1660
1661 ptr_gap = (data & (0x3FF << 2)) >> 2;
1662 if (ptr_gap < 0x18) {
1663 data &= ~(0x3FF << 2);
1664 data |= (0x18 << 2);
1673 ret_val = hw->phy.ops.acquire(hw);
1674 if (ret_val)
1675 return ret_val;
1676
1677 ret_val = hw->phy.ops.write_reg_locked(hw,
1678 PHY_REG(776, 20),
1679 0xC023);
1680 hw->phy.ops.release(hw);
1681 if (ret_val)
1682 return ret_val;
1683
1684 }
1685 }
1686 }
1687
1688 /* I217 Packet Loss issue:
1689 * ensure that FEXTNVM4 Beacon Duration is set correctly
1690 * on power up.
1691 * Set the Beacon Duration for I217 to 8 usec
1692 */
1693 if ((hw->mac.type == e1000_pch_lpt) ||
1694 (hw->mac.type == e1000_pch_spt)) {
1695 u32 mac_reg;
1696
1697 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1698 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1699 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1700 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1701 }
1702
1703 /* Work-around I218 hang issue */
1704 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1705 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1706 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1707 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1708 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1709 if (ret_val)
1710 return ret_val;
1711 }
1712 if ((hw->mac.type == e1000_pch_lpt) ||
1713 (hw->mac.type == e1000_pch_spt)) {
1714 /* Set platform power management values for
1715 * Latency Tolerance Reporting (LTR)
1716 * Optimized Buffer Flush/Fill (OBFF)
1717 */
1718 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1719 if (ret_val)
1720 return ret_val;
1721 }
1722
1723 /* Clear link partner's EEE ability */
1724 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1725
1726 /* FEXTNVM6 K1-off workaround */
1727 if (hw->mac.type == e1000_pch_spt) {
1728 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1729 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1730
1731 if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
1732 (hw->dev_spec.ich8lan.disable_k1_off == FALSE))
1733 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1734 else
1735 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1736
1737 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1738 }
1739
1740 if (!link)
1741 return E1000_SUCCESS; /* No link detected */
1742
1743 mac->get_link_status = FALSE;
1744
1745 switch (hw->mac.type) {
1746 case e1000_pch2lan:
1814 * @hw: pointer to the HW structure
1815 *
1816 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1817 **/
1818 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1819 {
1820 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1821
1822 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1823 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1824 switch (hw->mac.type) {
1825 case e1000_ich8lan:
1826 case e1000_ich9lan:
1827 case e1000_ich10lan:
1828 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1829 break;
1830 case e1000_pchlan:
1831 case e1000_pch2lan:
1832 case e1000_pch_lpt:
1833 case e1000_pch_spt:
1834 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1835 break;
1836 default:
1837 break;
1838 }
1839 }
1840
1841 /**
1842 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1843 * @hw: pointer to the HW structure
1844 *
1845 * Acquires the mutex for performing NVM operations.
1846 **/
1847 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1848 {
1849 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1850
1851 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1852
1853 return E1000_SUCCESS;
2278 * is needed due to an issue where the NVM configuration is
2279 * not properly autoloaded after power transitions.
2280 * Therefore, after each PHY reset, we will load the
2281 * configuration data out of the NVM manually.
2282 */
2283 switch (hw->mac.type) {
2284 case e1000_ich8lan:
2285 if (phy->type != e1000_phy_igp_3)
2286 return ret_val;
2287
2288 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2289 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2290 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2291 break;
2292 }
2293 /* Fall-thru */
2294 case e1000_pchlan:
2295 case e1000_pch2lan:
2296 case e1000_pch_lpt:
2297 case e1000_pch_spt:
2298 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2299 break;
2300 default:
2301 return ret_val;
2302 }
2303
2304 ret_val = hw->phy.ops.acquire(hw);
2305 if (ret_val)
2306 return ret_val;
2307
2308 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2309 if (!(data & sw_cfg_mask))
2310 goto release;
2311
2312 /* Make sure HW does not configure LCD from PHY
2313 * extended configuration before SW configuration
2314 */
2315 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2316 if ((hw->mac.type < e1000_pch2lan) &&
2317 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
3395 * @hw: pointer to the HW structure
3396 * @bank: pointer to the variable that returns the active bank
3397 *
3398 * Reads signature byte from the NVM using the flash access registers.
3399 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3400 **/
3401 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3402 {
3403 u32 eecd;
3404 struct e1000_nvm_info *nvm = &hw->nvm;
3405 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3406 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3407 u32 nvm_dword = 0;
3408 u8 sig_byte = 0;
3409 s32 ret_val;
3410
3411 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3412
3413 switch (hw->mac.type) {
3414 case e1000_pch_spt:
3415 bank1_offset = nvm->flash_bank_size;
3416 act_offset = E1000_ICH_NVM_SIG_WORD;
3417
3418 /* set bank to 0 in case flash read fails */
3419 *bank = 0;
3420
3421 /* Check bank 0 */
3422 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3423 &nvm_dword);
3424 if (ret_val)
3425 return ret_val;
3426 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3427 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3428 E1000_ICH_NVM_SIG_VALUE) {
3429 *bank = 0;
3430 return E1000_SUCCESS;
3431 }
3432
3433 /* Check bank 1 */
3434 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3654 * can be started.
3655 **/
3656 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3657 {
3658 union ich8_hws_flash_status hsfsts;
3659 s32 ret_val = -E1000_ERR_NVM;
3660
3661 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3662
3663 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3664
3665 /* Check if the flash descriptor is valid */
3666 if (!hsfsts.hsf_status.fldesvalid) {
3667 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3668 return -E1000_ERR_NVM;
3669 }
3670
3671 /* Clear FCERR and DAEL in hw status by writing 1 */
3672 hsfsts.hsf_status.flcerr = 1;
3673 hsfsts.hsf_status.dael = 1;
3674 if (hw->mac.type == e1000_pch_spt)
3675 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3676 hsfsts.regval & 0xFFFF);
3677 else
3678 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3679
3680 /* Either we should have a hardware SPI cycle in progress
3681 * bit to check against, in order to start a new cycle or
3682 * FDONE bit should be changed in the hardware so that it
3683 * is 1 after hardware reset, which can then be used as an
3684 * indication whether a cycle is in progress or has been
3685 * completed.
3686 */
3687
3688 if (!hsfsts.hsf_status.flcinprog) {
3689 /* There is no cycle running at present,
3690 * so we can start a cycle.
3691 * Begin by setting Flash Cycle Done.
3692 */
3693 hsfsts.hsf_status.flcdone = 1;
3694 if (hw->mac.type == e1000_pch_spt)
3695 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3696 hsfsts.regval & 0xFFFF);
3697 else
3698 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3699 hsfsts.regval);
3700 ret_val = E1000_SUCCESS;
3701 } else {
3702 s32 i;
3703
3704 /* Otherwise poll for sometime so the current
3705 * cycle has a chance to end before giving up.
3706 */
3707 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3708 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3709 ICH_FLASH_HSFSTS);
3710 if (!hsfsts.hsf_status.flcinprog) {
3711 ret_val = E1000_SUCCESS;
3712 break;
3713 }
3714 usec_delay(1);
3715 }
3716 if (ret_val == E1000_SUCCESS) {
3717 /* Successful in waiting for previous cycle to timeout,
3718 * now set the Flash Cycle Done.
3719 */
3720 hsfsts.hsf_status.flcdone = 1;
3721 if (hw->mac.type == e1000_pch_spt)
3722 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3723 hsfsts.regval & 0xFFFF);
3724 else
3725 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3726 hsfsts.regval);
3727 } else {
3728 DEBUGOUT("Flash controller busy, cannot get access\n");
3729 }
3730 }
3731
3732 return ret_val;
3733 }
3734
3735 /**
3736 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3737 * @hw: pointer to the HW structure
3738 * @timeout: maximum time to wait for completion
3739 *
3740 * This function starts a flash cycle and waits for its completion.
3741 **/
3742 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3743 {
3744 union ich8_hws_flash_ctrl hsflctl;
3745 union ich8_hws_flash_status hsfsts;
3746 u32 i = 0;
3747
3748 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3749
3750 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3751 if (hw->mac.type == e1000_pch_spt)
3752 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3753 else
3754 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3755 hsflctl.hsf_ctrl.flcgo = 1;
3756
3757 if (hw->mac.type == e1000_pch_spt)
3758 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3759 hsflctl.regval << 16);
3760 else
3761 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3762
3763 /* wait till FDONE bit is set to 1 */
3764 do {
3765 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3766 if (hsfsts.hsf_status.flcdone)
3767 break;
3768 usec_delay(1);
3769 } while (i++ < timeout);
3770
3771 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3772 return E1000_SUCCESS;
3773
3774 return -E1000_ERR_NVM;
3775 }
3776
3777 /**
3820 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3821 }
3822
3823 /**
3824 * e1000_read_flash_byte_ich8lan - Read byte from flash
3825 * @hw: pointer to the HW structure
3826 * @offset: The offset of the byte to read.
3827 * @data: Pointer to a byte to store the value read.
3828 *
3829 * Reads a single byte from the NVM using the flash access registers.
3830 **/
3831 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3832 u8 *data)
3833 {
3834 s32 ret_val;
3835 u16 word = 0;
3836
3837 /* In SPT, only 32 bits access is supported,
3838 * so this function should not be called.
3839 */
3840 if (hw->mac.type == e1000_pch_spt)
3841 return -E1000_ERR_NVM;
3842 else
3843 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3844
3845 if (ret_val)
3846 return ret_val;
3847
3848 *data = (u8)word;
3849
3850 return E1000_SUCCESS;
3851 }
3852
3853 /**
3854 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3855 * @hw: pointer to the HW structure
3856 * @offset: The offset (in bytes) of the byte or word to read.
3857 * @size: Size of data to read, 1=byte 2=word
3858 * @data: Pointer to the word to store the value read.
3859 *
3860 * Reads a byte or word from the NVM using the flash access registers.
3928
3929 /**
3930 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3931 * @hw: pointer to the HW structure
3932 * @offset: The offset (in bytes) of the dword to read.
3933 * @data: Pointer to the dword to store the value read.
3934 *
3935 * Reads a byte or word from the NVM using the flash access registers.
3936 **/
3937 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3938 u32 *data)
3939 {
3940 union ich8_hws_flash_status hsfsts;
3941 union ich8_hws_flash_ctrl hsflctl;
3942 u32 flash_linear_addr;
3943 s32 ret_val = -E1000_ERR_NVM;
3944 u8 count = 0;
3945
3946 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3947
3948 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3949 hw->mac.type != e1000_pch_spt)
3950 return -E1000_ERR_NVM;
3951 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3952 hw->nvm.flash_base_addr);
3953
3954 do {
3955 usec_delay(1);
3956 /* Steps */
3957 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3958 if (ret_val != E1000_SUCCESS)
3959 break;
3960 /* In SPT, This register is in Lan memory space, not flash.
3961 * Therefore, only 32 bit access is supported
3962 */
3963 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3964
3965 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3966 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3967 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3968 /* In SPT, This register is in Lan memory space, not flash.
3969 * Therefore, only 32 bit access is supported
4370 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4371 * calculated, in which case we need to calculate the checksum and set bit 6.
4372 **/
4373 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4374 {
4375 s32 ret_val;
4376 u16 data;
4377 u16 word;
4378 u16 valid_csum_mask;
4379
4380 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4381
4382 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4383 * the checksum needs to be fixed. This bit is an indication that
4384 * the NVM was prepared by OEM software and did not calculate
4385 * the checksum...a likely scenario.
4386 */
4387 switch (hw->mac.type) {
4388 case e1000_pch_lpt:
4389 case e1000_pch_spt:
4390 word = NVM_COMPAT;
4391 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4392 break;
4393 default:
4394 word = NVM_FUTURE_INIT_WORD1;
4395 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4396 break;
4397 }
4398
4399 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4400 if (ret_val)
4401 return ret_val;
4402
4403 if (!(data & valid_csum_mask)) {
4404 data |= valid_csum_mask;
4405 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4406 if (ret_val)
4407 return ret_val;
4408 ret_val = hw->nvm.ops.update(hw);
4409 if (ret_val)
4417 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4418 * @hw: pointer to the HW structure
4419 * @offset: The offset (in bytes) of the byte/word to read.
4420 * @size: Size of data to read, 1=byte 2=word
4421 * @data: The byte(s) to write to the NVM.
4422 *
4423 * Writes one/two bytes to the NVM using the flash access registers.
4424 **/
4425 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4426 u8 size, u16 data)
4427 {
4428 union ich8_hws_flash_status hsfsts;
4429 union ich8_hws_flash_ctrl hsflctl;
4430 u32 flash_linear_addr;
4431 u32 flash_data = 0;
4432 s32 ret_val;
4433 u8 count = 0;
4434
4435 DEBUGFUNC("e1000_write_ich8_data");
4436
4437 if (hw->mac.type == e1000_pch_spt) {
4438 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4439 return -E1000_ERR_NVM;
4440 } else {
4441 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4442 return -E1000_ERR_NVM;
4443 }
4444
4445 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4446 hw->nvm.flash_base_addr);
4447
4448 do {
4449 usec_delay(1);
4450 /* Steps */
4451 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4452 if (ret_val != E1000_SUCCESS)
4453 break;
4454 /* In SPT, This register is in Lan memory space, not
4455 * flash. Therefore, only 32 bit access is supported
4456 */
4457 if (hw->mac.type == e1000_pch_spt)
4458 hsflctl.regval =
4459 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4460 else
4461 hsflctl.regval =
4462 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4463
4464 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4465 hsflctl.hsf_ctrl.fldbcount = size - 1;
4466 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4467 /* In SPT, This register is in Lan memory space,
4468 * not flash. Therefore, only 32 bit access is
4469 * supported
4470 */
4471 if (hw->mac.type == e1000_pch_spt)
4472 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4473 hsflctl.regval << 16);
4474 else
4475 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4476 hsflctl.regval);
4477
4478 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4479
4480 if (size == 1)
4481 flash_data = (u32)data & 0x00FF;
4482 else
4483 flash_data = (u32)data;
4484
4485 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4486
4487 /* check if FCERR is set to 1 , if set to 1, clear it
4488 * and try the whole sequence a few more times else done
4489 */
4490 ret_val =
4491 e1000_flash_cycle_ich8lan(hw,
4513
4514 /**
4515 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4516 * @hw: pointer to the HW structure
4517 * @offset: The offset (in bytes) of the dwords to read.
4518 * @data: The 4 bytes to write to the NVM.
4519 *
4520 * Writes one/two/four bytes to the NVM using the flash access registers.
4521 **/
4522 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4523 u32 data)
4524 {
4525 union ich8_hws_flash_status hsfsts;
4526 union ich8_hws_flash_ctrl hsflctl;
4527 u32 flash_linear_addr;
4528 s32 ret_val;
4529 u8 count = 0;
4530
4531 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4532
4533 if (hw->mac.type == e1000_pch_spt) {
4534 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4535 return -E1000_ERR_NVM;
4536 }
4537 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4538 hw->nvm.flash_base_addr);
4539 do {
4540 usec_delay(1);
4541 /* Steps */
4542 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4543 if (ret_val != E1000_SUCCESS)
4544 break;
4545
4546 /* In SPT, This register is in Lan memory space, not
4547 * flash. Therefore, only 32 bit access is supported
4548 */
4549 if (hw->mac.type == e1000_pch_spt)
4550 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4551 ICH_FLASH_HSFSTS)
4552 >> 16;
4553 else
4554 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4555 ICH_FLASH_HSFCTL);
4556
4557 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4558 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4559
4560 /* In SPT, This register is in Lan memory space,
4561 * not flash. Therefore, only 32 bit access is
4562 * supported
4563 */
4564 if (hw->mac.type == e1000_pch_spt)
4565 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4566 hsflctl.regval << 16);
4567 else
4568 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4569 hsflctl.regval);
4570
4571 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4572
4573 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4574
4575 /* check if FCERR is set to 1 , if set to 1, clear it
4576 * and try the whole sequence a few more times else done
4577 */
4578 ret_val = e1000_flash_cycle_ich8lan(hw,
4579 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4580
4581 if (ret_val == E1000_SUCCESS)
4582 break;
4583
4584 /* If we're here, then things are most likely
4746 default:
4747 return -E1000_ERR_NVM;
4748 }
4749
4750 /* Start with the base address, then add the sector offset. */
4751 flash_linear_addr = hw->nvm.flash_base_addr;
4752 flash_linear_addr += (bank) ? flash_bank_size : 0;
4753
4754 for (j = 0; j < iteration; j++) {
4755 do {
4756 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4757
4758 /* Steps */
4759 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4760 if (ret_val)
4761 return ret_val;
4762
4763 /* Write a value 11 (block Erase) in Flash
4764 * Cycle field in hw flash control
4765 */
4766 if (hw->mac.type == e1000_pch_spt)
4767 hsflctl.regval =
4768 E1000_READ_FLASH_REG(hw,
4769 ICH_FLASH_HSFSTS)>>16;
4770 else
4771 hsflctl.regval =
4772 E1000_READ_FLASH_REG16(hw,
4773 ICH_FLASH_HSFCTL);
4774
4775 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4776 if (hw->mac.type == e1000_pch_spt)
4777 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4778 hsflctl.regval << 16);
4779 else
4780 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4781 hsflctl.regval);
4782
4783 /* Write the last 24 bits of an index within the
4784 * block into Flash Linear address field in Flash
4785 * Address.
4786 */
4787 flash_linear_addr += (j * sector_size);
4788 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4789 flash_linear_addr);
4790
4791 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4792 if (ret_val == E1000_SUCCESS)
4793 break;
4794
4795 /* Check if FCERR is set to 1. If 1,
4796 * clear it and try the whole sequence
5194 if (hw->mac.type == e1000_ich8lan) {
5195 reg = E1000_READ_REG(hw, E1000_STATUS);
5196 reg &= ~(1UL << 31);
5197 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5198 }
5199
5200 /* work-around descriptor data corruption issue during nfs v2 udp
5201 * traffic, just disable the nfs filtering capability
5202 */
5203 reg = E1000_READ_REG(hw, E1000_RFCTL);
5204 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5205
5206 /* Disable IPv6 extension header parsing because some malformed
5207 * IPv6 headers can hang the Rx.
5208 */
5209 if (hw->mac.type == e1000_ich8lan)
5210 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5211 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5212
5213 /* Enable ECC on Lynxpoint */
5214 if ((hw->mac.type == e1000_pch_lpt) ||
5215 (hw->mac.type == e1000_pch_spt)) {
5216 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5217 reg |= E1000_PBECCSTS_ECC_ENABLE;
5218 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5219
5220 reg = E1000_READ_REG(hw, E1000_CTRL);
5221 reg |= E1000_CTRL_MEHE;
5222 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5223 }
5224
5225 return;
5226 }
5227
5228 /**
5229 * e1000_setup_link_ich8lan - Setup flow control and link settings
5230 * @hw: pointer to the HW structure
5231 *
5232 * Determines which flow control settings to use, then configures flow
5233 * control. Calls the appropriate media-specific link configuration
5234 * function. Assuming the adapter has a valid link partner, a valid link
5235 * should be established. Assumes the hardware has previously been reset
5628 * than 10Mbps w/o EEE.
5629 **/
5630 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5631 {
5632 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5633 u32 phy_ctrl;
5634 s32 ret_val;
5635
5636 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5637
5638 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5639 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5640
5641 if (hw->phy.type == e1000_phy_i217) {
5642 u16 phy_reg, device_id = hw->device_id;
5643
5644 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5645 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5646 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5647 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5648 (hw->mac.type == e1000_pch_spt)) {
5649 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5650
5651 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5652 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5653 }
5654
5655 ret_val = hw->phy.ops.acquire(hw);
5656 if (ret_val)
5657 goto out;
5658
5659 if (!dev_spec->eee_disable) {
5660 u16 eee_advert;
5661
5662 ret_val =
5663 e1000_read_emi_reg_locked(hw,
5664 I217_EEE_ADVERTISEMENT,
5665 &eee_advert);
5666 if (ret_val)
5667 goto release;
5668
|
226 } else if (phy_id) {
227 hw->phy.id = phy_id;
228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 goto out;
230 }
231
232 /* In case the PHY needs to be in mdio slow mode,
233 * set slow mode and try to get the PHY id again.
234 */
235 if (hw->mac.type < e1000_pch_lpt) {
236 hw->phy.ops.release(hw);
237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 if (!ret_val)
239 ret_val = e1000_get_phy_id(hw);
240 hw->phy.ops.acquire(hw);
241 }
242
243 if (ret_val)
244 return FALSE;
245 out:
246 if (hw->mac.type >= e1000_pch_lpt) {
247 /* Only unforce SMBus if ME is not active */
248 if (!(E1000_READ_REG(hw, E1000_FWSM) &
249 E1000_ICH_FWSM_FW_VALID)) {
250 /* Unforce SMBus mode in PHY */
251 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
252 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
253 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
254
255 /* Unforce SMBus mode in MAC */
256 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
257 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
258 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
259 }
260 }
261
262 return TRUE;
263 }
264
265 /**
266 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
327
328 /* It is not possible to be certain of the current state of ULP
329 * so forcibly disable it.
330 */
331 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
332 e1000_disable_ulp_lpt_lp(hw, TRUE);
333
334 ret_val = hw->phy.ops.acquire(hw);
335 if (ret_val) {
336 DEBUGOUT("Failed to initialize PHY flow\n");
337 goto out;
338 }
339
340 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
341 * inaccessible and resetting the PHY is not blocked, toggle the
342 * LANPHYPC Value bit to force the interconnect to PCIe mode.
343 */
344 switch (hw->mac.type) {
345 case e1000_pch_lpt:
346 case e1000_pch_spt:
347 case e1000_pch_cnp:
348 if (e1000_phy_is_accessible_pchlan(hw))
349 break;
350
351 /* Before toggling LANPHYPC, see if PHY is accessible by
352 * forcing MAC to SMBus mode first.
353 */
354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357
358 /* Wait 50 milliseconds for MAC to finish any retries
359 * that it might be trying to perform from previous
360 * attempts to acknowledge any phy read requests.
361 */
362 msec_delay(50);
363
364 /* fall-through */
365 case e1000_pch2lan:
366 if (e1000_phy_is_accessible_pchlan(hw))
367 break;
476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477
478 phy->id = e1000_phy_unknown;
479
480 ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 if (ret_val)
482 return ret_val;
483
484 if (phy->id == e1000_phy_unknown)
485 switch (hw->mac.type) {
486 default:
487 ret_val = e1000_get_phy_id(hw);
488 if (ret_val)
489 return ret_val;
490 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 break;
492 /* fall-through */
493 case e1000_pch2lan:
494 case e1000_pch_lpt:
495 case e1000_pch_spt:
496 case e1000_pch_cnp:
497 /* In case the PHY needs to be in mdio slow mode,
498 * set slow mode and try to get the PHY id again.
499 */
500 ret_val = e1000_set_mdio_slow_mode_hv(hw);
501 if (ret_val)
502 return ret_val;
503 ret_val = e1000_get_phy_id(hw);
504 if (ret_val)
505 return ret_val;
506 break;
507 }
508 phy->type = e1000_get_phy_type_from_id(phy->id);
509
510 switch (phy->type) {
511 case e1000_phy_82577:
512 case e1000_phy_82579:
513 case e1000_phy_i217:
514 phy->ops.check_polarity = e1000_check_polarity_82577;
515 phy->ops.force_speed_duplex =
516 e1000_phy_force_speed_duplex_82577;
625
626 /**
627 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628 * @hw: pointer to the HW structure
629 *
630 * Initialize family-specific NVM parameters and function
631 * pointers.
632 **/
633 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635 struct e1000_nvm_info *nvm = &hw->nvm;
636 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637 u32 gfpreg, sector_base_addr, sector_end_addr;
638 u16 i;
639 u32 nvm_size;
640
641 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642
643 nvm->type = e1000_nvm_flash_sw;
644
645 if (hw->mac.type >= e1000_pch_spt) {
646 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
647 * STRAP register. This is because in SPT the GbE Flash region
648 * is no longer accessed through the flash registers. Instead,
649 * the mechanism has changed, and the Flash region access
650 * registers are now implemented in GbE memory space.
651 */
652 nvm->flash_base_addr = 0;
653 nvm_size =
654 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655 * NVM_SIZE_MULTIPLIER;
656 nvm->flash_bank_size = nvm_size / 2;
657 /* Adjust to word count */
658 nvm->flash_bank_size /= sizeof(u16);
659 /* Set the base address for flash register access */
660 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661 } else {
662 /* Can't read flash registers if register set isn't mapped. */
663 if (!hw->flash_address) {
664 DEBUGOUT("ERROR: Flash registers not mapped\n");
665 return -E1000_ERR_CONFIG;
685 << FLASH_SECTOR_ADDR_SHIFT);
686 nvm->flash_bank_size /= 2;
687 /* Adjust to word count */
688 nvm->flash_bank_size /= sizeof(u16);
689 }
690
691 nvm->word_size = E1000_SHADOW_RAM_WORDS;
692
693 /* Clear shadow ram */
694 for (i = 0; i < nvm->word_size; i++) {
695 dev_spec->shadow_ram[i].modified = FALSE;
696 dev_spec->shadow_ram[i].value = 0xFFFF;
697 }
698
699 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
700 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
701
702 /* Function Pointers */
703 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
704 nvm->ops.release = e1000_release_nvm_ich8lan;
705 if (hw->mac.type >= e1000_pch_spt) {
706 nvm->ops.read = e1000_read_nvm_spt;
707 nvm->ops.update = e1000_update_nvm_checksum_spt;
708 } else {
709 nvm->ops.read = e1000_read_nvm_ich8lan;
710 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
711 }
712 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
713 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
714 nvm->ops.write = e1000_write_nvm_ich8lan;
715
716 return E1000_SUCCESS;
717 }
718
719 /**
720 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
721 * @hw: pointer to the HW structure
722 *
723 * Initialize family-specific MAC parameters and function
724 * pointers.
725 **/
778 /* check management mode */
779 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
780 /* ID LED init */
781 mac->ops.id_led_init = e1000_id_led_init_generic;
782 /* blink LED */
783 mac->ops.blink_led = e1000_blink_led_generic;
784 /* setup LED */
785 mac->ops.setup_led = e1000_setup_led_generic;
786 /* cleanup LED */
787 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
788 /* turn on/off LED */
789 mac->ops.led_on = e1000_led_on_ich8lan;
790 mac->ops.led_off = e1000_led_off_ich8lan;
791 break;
792 case e1000_pch2lan:
793 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
794 mac->ops.rar_set = e1000_rar_set_pch2lan;
795 /* fall-through */
796 case e1000_pch_lpt:
797 case e1000_pch_spt:
798 case e1000_pch_cnp:
799 /* multicast address update for pch2 */
800 mac->ops.update_mc_addr_list =
801 e1000_update_mc_addr_list_pch2lan;
802 /* fall-through */
803 case e1000_pchlan:
804 /* check management mode */
805 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
806 /* ID LED init */
807 mac->ops.id_led_init = e1000_id_led_init_pchlan;
808 /* setup LED */
809 mac->ops.setup_led = e1000_setup_led_pchlan;
810 /* cleanup LED */
811 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
812 /* turn on/off LED */
813 mac->ops.led_on = e1000_led_on_pchlan;
814 mac->ops.led_off = e1000_led_off_pchlan;
815 break;
816 default:
817 break;
818 }
819
820 if (mac->type >= e1000_pch_lpt) {
821 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
822 mac->ops.rar_set = e1000_rar_set_pch_lpt;
823 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
824 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
825 }
826
827 /* Enable PCS Lock-loss workaround for ICH8 */
828 if (mac->type == e1000_ich8lan)
829 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
830
831 return E1000_SUCCESS;
832 }
833
834 /**
835 * __e1000_access_emi_reg_locked - Read/write EMI register
836 * @hw: pointer to the HW structure
837 * @addr: EMI address to program
838 * @data: pointer to value to read/write from/to the EMI address
839 * @read: boolean flag to indicate read or write
840 *
1560 return E1000_SUCCESS;
1561
1562 /* First we want to see if the MII Status Register reports
1563 * link. If so, then we want to get the current speed/duplex
1564 * of the PHY.
1565 */
1566 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1567 if (ret_val)
1568 return ret_val;
1569
1570 if (hw->mac.type == e1000_pchlan) {
1571 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1572 if (ret_val)
1573 return ret_val;
1574 }
1575
1576 /* When connected at 10Mbps half-duplex, some parts are excessively
1577 * aggressive resulting in many collisions. To avoid this, increase
1578 * the IPG and reduce Rx latency in the PHY.
1579 */
1580 if ((hw->mac.type >= e1000_pch2lan) && link) {
1581 u16 speed, duplex;
1582
1583 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1584 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1585 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1586
1587 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1588 tipg_reg |= 0xFF;
1589 /* Reduce Rx latency in analog PHY */
1590 emi_val = 0;
1591 } else if (hw->mac.type >= e1000_pch_spt &&
1592 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1593 tipg_reg |= 0xC;
1594 emi_val = 1;
1595 } else {
1596 /* Roll back the default values */
1597 tipg_reg |= 0x08;
1598 emi_val = 1;
1599 }
1600
1601 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1602
1603 ret_val = hw->phy.ops.acquire(hw);
1604 if (ret_val)
1605 return ret_val;
1606
1607 if (hw->mac.type == e1000_pch2lan)
1608 emi_addr = I82579_RX_CONFIG;
1609 else
1610 emi_addr = I217_RX_CONFIG;
1611 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1612
1613 if (hw->mac.type >= e1000_pch_lpt) {
1614 u16 phy_reg;
1615
1616 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1617 &phy_reg);
1618 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1619 if (speed == SPEED_100 || speed == SPEED_10)
1620 phy_reg |= 0x3E8;
1621 else
1622 phy_reg |= 0xFA;
1623 hw->phy.ops.write_reg_locked(hw,
1624 I217_PLL_CLOCK_GATE_REG,
1625 phy_reg);
1626
1627 if (speed == SPEED_1000) {
1628 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1629 &phy_reg);
1630
1631 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1632
1633 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1634 phy_reg);
1635 }
1636 }
1637 hw->phy.ops.release(hw);
1638
1639 if (ret_val)
1640 return ret_val;
1641
1642 if (hw->mac.type >= e1000_pch_spt) {
1643 u16 data;
1644 u16 ptr_gap;
1645
1646 if (speed == SPEED_1000) {
1647 ret_val = hw->phy.ops.acquire(hw);
1648 if (ret_val)
1649 return ret_val;
1650
1651 ret_val = hw->phy.ops.read_reg_locked(hw,
1652 PHY_REG(776, 20),
1653 &data);
1654 if (ret_val) {
1655 hw->phy.ops.release(hw);
1656 return ret_val;
1657 }
1658
1659 ptr_gap = (data & (0x3FF << 2)) >> 2;
1660 if (ptr_gap < 0x18) {
1661 data &= ~(0x3FF << 2);
1662 data |= (0x18 << 2);
1671 ret_val = hw->phy.ops.acquire(hw);
1672 if (ret_val)
1673 return ret_val;
1674
1675 ret_val = hw->phy.ops.write_reg_locked(hw,
1676 PHY_REG(776, 20),
1677 0xC023);
1678 hw->phy.ops.release(hw);
1679 if (ret_val)
1680 return ret_val;
1681
1682 }
1683 }
1684 }
1685
1686 /* I217 Packet Loss issue:
1687 * ensure that FEXTNVM4 Beacon Duration is set correctly
1688 * on power up.
1689 * Set the Beacon Duration for I217 to 8 usec
1690 */
1691 if (hw->mac.type >= e1000_pch_lpt) {
1692 u32 mac_reg;
1693
1694 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1695 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1696 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1697 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1698 }
1699
1700 /* Work-around I218 hang issue */
1701 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1702 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1703 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1704 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1705 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1706 if (ret_val)
1707 return ret_val;
1708 }
1709 if (hw->mac.type >= e1000_pch_lpt) {
1710 /* Set platform power management values for
1711 * Latency Tolerance Reporting (LTR)
1712 * Optimized Buffer Flush/Fill (OBFF)
1713 */
1714 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1715 if (ret_val)
1716 return ret_val;
1717 }
1718
1719 /* Clear link partner's EEE ability */
1720 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1721
1722 /* FEXTNVM6 K1-off workaround - for SPT only */
1723 if (hw->mac.type == e1000_pch_spt) {
1724 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1725 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1726
1727 if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
1728 (hw->dev_spec.ich8lan.disable_k1_off == FALSE))
1729 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1730 else
1731 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1732
1733 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1734 }
1735
1736 if (!link)
1737 return E1000_SUCCESS; /* No link detected */
1738
1739 mac->get_link_status = FALSE;
1740
1741 switch (hw->mac.type) {
1742 case e1000_pch2lan:
1810 * @hw: pointer to the HW structure
1811 *
1812 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1813 **/
1814 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1815 {
1816 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1817
1818 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1819 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1820 switch (hw->mac.type) {
1821 case e1000_ich8lan:
1822 case e1000_ich9lan:
1823 case e1000_ich10lan:
1824 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1825 break;
1826 case e1000_pchlan:
1827 case e1000_pch2lan:
1828 case e1000_pch_lpt:
1829 case e1000_pch_spt:
1830 case e1000_pch_cnp:
1831 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1832 break;
1833 default:
1834 break;
1835 }
1836 }
1837
1838 /**
1839 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1840 * @hw: pointer to the HW structure
1841 *
1842 * Acquires the mutex for performing NVM operations.
1843 **/
1844 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1845 {
1846 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1847
1848 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1849
1850 return E1000_SUCCESS;
2275 * is needed due to an issue where the NVM configuration is
2276 * not properly autoloaded after power transitions.
2277 * Therefore, after each PHY reset, we will load the
2278 * configuration data out of the NVM manually.
2279 */
2280 switch (hw->mac.type) {
2281 case e1000_ich8lan:
2282 if (phy->type != e1000_phy_igp_3)
2283 return ret_val;
2284
2285 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2286 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2287 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2288 break;
2289 }
2290 /* Fall-thru */
2291 case e1000_pchlan:
2292 case e1000_pch2lan:
2293 case e1000_pch_lpt:
2294 case e1000_pch_spt:
2295 case e1000_pch_cnp:
2296 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2297 break;
2298 default:
2299 return ret_val;
2300 }
2301
2302 ret_val = hw->phy.ops.acquire(hw);
2303 if (ret_val)
2304 return ret_val;
2305
2306 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2307 if (!(data & sw_cfg_mask))
2308 goto release;
2309
2310 /* Make sure HW does not configure LCD from PHY
2311 * extended configuration before SW configuration
2312 */
2313 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2314 if ((hw->mac.type < e1000_pch2lan) &&
2315 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
3393 * @hw: pointer to the HW structure
3394 * @bank: pointer to the variable that returns the active bank
3395 *
3396 * Reads signature byte from the NVM using the flash access registers.
3397 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3398 **/
3399 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3400 {
3401 u32 eecd;
3402 struct e1000_nvm_info *nvm = &hw->nvm;
3403 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3404 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3405 u32 nvm_dword = 0;
3406 u8 sig_byte = 0;
3407 s32 ret_val;
3408
3409 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3410
3411 switch (hw->mac.type) {
3412 case e1000_pch_spt:
3413 case e1000_pch_cnp:
3414 bank1_offset = nvm->flash_bank_size;
3415 act_offset = E1000_ICH_NVM_SIG_WORD;
3416
3417 /* set bank to 0 in case flash read fails */
3418 *bank = 0;
3419
3420 /* Check bank 0 */
3421 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3422 &nvm_dword);
3423 if (ret_val)
3424 return ret_val;
3425 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3426 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3427 E1000_ICH_NVM_SIG_VALUE) {
3428 *bank = 0;
3429 return E1000_SUCCESS;
3430 }
3431
3432 /* Check bank 1 */
3433 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3653 * can be started.
3654 **/
3655 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3656 {
3657 union ich8_hws_flash_status hsfsts;
3658 s32 ret_val = -E1000_ERR_NVM;
3659
3660 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3661
3662 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3663
3664 /* Check if the flash descriptor is valid */
3665 if (!hsfsts.hsf_status.fldesvalid) {
3666 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3667 return -E1000_ERR_NVM;
3668 }
3669
3670 /* Clear FCERR and DAEL in hw status by writing 1 */
3671 hsfsts.hsf_status.flcerr = 1;
3672 hsfsts.hsf_status.dael = 1;
3673 if (hw->mac.type >= e1000_pch_spt)
3674 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3675 hsfsts.regval & 0xFFFF);
3676 else
3677 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3678
3679 /* Either we should have a hardware SPI cycle in progress
3680 * bit to check against, in order to start a new cycle or
3681 * FDONE bit should be changed in the hardware so that it
3682 * is 1 after hardware reset, which can then be used as an
3683 * indication whether a cycle is in progress or has been
3684 * completed.
3685 */
3686
3687 if (!hsfsts.hsf_status.flcinprog) {
3688 /* There is no cycle running at present,
3689 * so we can start a cycle.
3690 * Begin by setting Flash Cycle Done.
3691 */
3692 hsfsts.hsf_status.flcdone = 1;
3693 if (hw->mac.type >= e1000_pch_spt)
3694 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3695 hsfsts.regval & 0xFFFF);
3696 else
3697 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3698 hsfsts.regval);
3699 ret_val = E1000_SUCCESS;
3700 } else {
3701 s32 i;
3702
3703 /* Otherwise poll for sometime so the current
3704 * cycle has a chance to end before giving up.
3705 */
3706 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3707 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3708 ICH_FLASH_HSFSTS);
3709 if (!hsfsts.hsf_status.flcinprog) {
3710 ret_val = E1000_SUCCESS;
3711 break;
3712 }
3713 usec_delay(1);
3714 }
3715 if (ret_val == E1000_SUCCESS) {
3716 /* Successful in waiting for previous cycle to timeout,
3717 * now set the Flash Cycle Done.
3718 */
3719 hsfsts.hsf_status.flcdone = 1;
3720 if (hw->mac.type >= e1000_pch_spt)
3721 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3722 hsfsts.regval & 0xFFFF);
3723 else
3724 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3725 hsfsts.regval);
3726 } else {
3727 DEBUGOUT("Flash controller busy, cannot get access\n");
3728 }
3729 }
3730
3731 return ret_val;
3732 }
3733
3734 /**
3735 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3736 * @hw: pointer to the HW structure
3737 * @timeout: maximum time to wait for completion
3738 *
3739 * This function starts a flash cycle and waits for its completion.
3740 **/
3741 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3742 {
3743 union ich8_hws_flash_ctrl hsflctl;
3744 union ich8_hws_flash_status hsfsts;
3745 u32 i = 0;
3746
3747 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3748
3749 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3750 if (hw->mac.type >= e1000_pch_spt)
3751 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3752 else
3753 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3754 hsflctl.hsf_ctrl.flcgo = 1;
3755
3756 if (hw->mac.type >= e1000_pch_spt)
3757 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3758 hsflctl.regval << 16);
3759 else
3760 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3761
3762 /* wait till FDONE bit is set to 1 */
3763 do {
3764 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3765 if (hsfsts.hsf_status.flcdone)
3766 break;
3767 usec_delay(1);
3768 } while (i++ < timeout);
3769
3770 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3771 return E1000_SUCCESS;
3772
3773 return -E1000_ERR_NVM;
3774 }
3775
3776 /**
3819 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3820 }
3821
3822 /**
3823 * e1000_read_flash_byte_ich8lan - Read byte from flash
3824 * @hw: pointer to the HW structure
3825 * @offset: The offset of the byte to read.
3826 * @data: Pointer to a byte to store the value read.
3827 *
3828 * Reads a single byte from the NVM using the flash access registers.
3829 **/
3830 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3831 u8 *data)
3832 {
3833 s32 ret_val;
3834 u16 word = 0;
3835
3836 /* In SPT, only 32 bits access is supported,
3837 * so this function should not be called.
3838 */
3839 if (hw->mac.type >= e1000_pch_spt)
3840 return -E1000_ERR_NVM;
3841 else
3842 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3843
3844 if (ret_val)
3845 return ret_val;
3846
3847 *data = (u8)word;
3848
3849 return E1000_SUCCESS;
3850 }
3851
3852 /**
3853 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3854 * @hw: pointer to the HW structure
3855 * @offset: The offset (in bytes) of the byte or word to read.
3856 * @size: Size of data to read, 1=byte 2=word
3857 * @data: Pointer to the word to store the value read.
3858 *
3859 * Reads a byte or word from the NVM using the flash access registers.
3927
3928 /**
3929 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3930 * @hw: pointer to the HW structure
3931 * @offset: The offset (in bytes) of the dword to read.
3932 * @data: Pointer to the dword to store the value read.
3933 *
3934 * Reads a byte or word from the NVM using the flash access registers.
3935 **/
3936 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3937 u32 *data)
3938 {
3939 union ich8_hws_flash_status hsfsts;
3940 union ich8_hws_flash_ctrl hsflctl;
3941 u32 flash_linear_addr;
3942 s32 ret_val = -E1000_ERR_NVM;
3943 u8 count = 0;
3944
3945 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3946
3947 if (offset > ICH_FLASH_LINEAR_ADDR_MASK && hw->mac.type < e1000_pch_spt)
3948 return -E1000_ERR_NVM;
3949 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3950 hw->nvm.flash_base_addr);
3951
3952 do {
3953 usec_delay(1);
3954 /* Steps */
3955 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3956 if (ret_val != E1000_SUCCESS)
3957 break;
3958 /* In SPT, This register is in Lan memory space, not flash.
3959 * Therefore, only 32 bit access is supported
3960 */
3961 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3962
3963 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3964 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3965 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3966 /* In SPT, This register is in Lan memory space, not flash.
3967 * Therefore, only 32 bit access is supported
4368 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4369 * calculated, in which case we need to calculate the checksum and set bit 6.
4370 **/
4371 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4372 {
4373 s32 ret_val;
4374 u16 data;
4375 u16 word;
4376 u16 valid_csum_mask;
4377
4378 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4379
4380 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4381 * the checksum needs to be fixed. This bit is an indication that
4382 * the NVM was prepared by OEM software and did not calculate
4383 * the checksum...a likely scenario.
4384 */
4385 switch (hw->mac.type) {
4386 case e1000_pch_lpt:
4387 case e1000_pch_spt:
4388 case e1000_pch_cnp:
4389 word = NVM_COMPAT;
4390 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4391 break;
4392 default:
4393 word = NVM_FUTURE_INIT_WORD1;
4394 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4395 break;
4396 }
4397
4398 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4399 if (ret_val)
4400 return ret_val;
4401
4402 if (!(data & valid_csum_mask)) {
4403 data |= valid_csum_mask;
4404 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4405 if (ret_val)
4406 return ret_val;
4407 ret_val = hw->nvm.ops.update(hw);
4408 if (ret_val)
4416 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4417 * @hw: pointer to the HW structure
4418 * @offset: The offset (in bytes) of the byte/word to read.
4419 * @size: Size of data to read, 1=byte 2=word
4420 * @data: The byte(s) to write to the NVM.
4421 *
4422 * Writes one/two bytes to the NVM using the flash access registers.
4423 **/
4424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4425 u8 size, u16 data)
4426 {
4427 union ich8_hws_flash_status hsfsts;
4428 union ich8_hws_flash_ctrl hsflctl;
4429 u32 flash_linear_addr;
4430 u32 flash_data = 0;
4431 s32 ret_val;
4432 u8 count = 0;
4433
4434 DEBUGFUNC("e1000_write_ich8_data");
4435
4436 if (hw->mac.type >= e1000_pch_spt) {
4437 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4438 return -E1000_ERR_NVM;
4439 } else {
4440 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4441 return -E1000_ERR_NVM;
4442 }
4443
4444 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4445 hw->nvm.flash_base_addr);
4446
4447 do {
4448 usec_delay(1);
4449 /* Steps */
4450 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4451 if (ret_val != E1000_SUCCESS)
4452 break;
4453 /* In SPT, This register is in Lan memory space, not
4454 * flash. Therefore, only 32 bit access is supported
4455 */
4456 if (hw->mac.type >= e1000_pch_spt)
4457 hsflctl.regval =
4458 E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4459 else
4460 hsflctl.regval =
4461 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4462
4463 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4464 hsflctl.hsf_ctrl.fldbcount = size - 1;
4465 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4466 /* In SPT, This register is in Lan memory space,
4467 * not flash. Therefore, only 32 bit access is
4468 * supported
4469 */
4470 if (hw->mac.type >= e1000_pch_spt)
4471 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4472 hsflctl.regval << 16);
4473 else
4474 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4475 hsflctl.regval);
4476
4477 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4478
4479 if (size == 1)
4480 flash_data = (u32)data & 0x00FF;
4481 else
4482 flash_data = (u32)data;
4483
4484 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4485
4486 /* check if FCERR is set to 1 , if set to 1, clear it
4487 * and try the whole sequence a few more times else done
4488 */
4489 ret_val =
4490 e1000_flash_cycle_ich8lan(hw,
4512
4513 /**
4514 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4515 * @hw: pointer to the HW structure
4516 * @offset: The offset (in bytes) of the dwords to read.
4517 * @data: The 4 bytes to write to the NVM.
4518 *
4519 * Writes one/two/four bytes to the NVM using the flash access registers.
4520 **/
4521 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4522 u32 data)
4523 {
4524 union ich8_hws_flash_status hsfsts;
4525 union ich8_hws_flash_ctrl hsflctl;
4526 u32 flash_linear_addr;
4527 s32 ret_val;
4528 u8 count = 0;
4529
4530 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4531
4532 if (hw->mac.type >= e1000_pch_spt) {
4533 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4534 return -E1000_ERR_NVM;
4535 }
4536 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4537 hw->nvm.flash_base_addr);
4538 do {
4539 usec_delay(1);
4540 /* Steps */
4541 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4542 if (ret_val != E1000_SUCCESS)
4543 break;
4544
4545 /* In SPT, This register is in Lan memory space, not
4546 * flash. Therefore, only 32 bit access is supported
4547 */
4548 if (hw->mac.type >= e1000_pch_spt)
4549 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4550 ICH_FLASH_HSFSTS)
4551 >> 16;
4552 else
4553 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4554 ICH_FLASH_HSFCTL);
4555
4556 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4557 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4558
4559 /* In SPT, This register is in Lan memory space,
4560 * not flash. Therefore, only 32 bit access is
4561 * supported
4562 */
4563 if (hw->mac.type >= e1000_pch_spt)
4564 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4565 hsflctl.regval << 16);
4566 else
4567 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4568 hsflctl.regval);
4569
4570 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4571
4572 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4573
4574 /* check if FCERR is set to 1 , if set to 1, clear it
4575 * and try the whole sequence a few more times else done
4576 */
4577 ret_val = e1000_flash_cycle_ich8lan(hw,
4578 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4579
4580 if (ret_val == E1000_SUCCESS)
4581 break;
4582
4583 /* If we're here, then things are most likely
4745 default:
4746 return -E1000_ERR_NVM;
4747 }
4748
4749 /* Start with the base address, then add the sector offset. */
4750 flash_linear_addr = hw->nvm.flash_base_addr;
4751 flash_linear_addr += (bank) ? flash_bank_size : 0;
4752
4753 for (j = 0; j < iteration; j++) {
4754 do {
4755 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4756
4757 /* Steps */
4758 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4759 if (ret_val)
4760 return ret_val;
4761
4762 /* Write a value 11 (block Erase) in Flash
4763 * Cycle field in hw flash control
4764 */
4765 if (hw->mac.type >= e1000_pch_spt)
4766 hsflctl.regval =
4767 E1000_READ_FLASH_REG(hw,
4768 ICH_FLASH_HSFSTS)>>16;
4769 else
4770 hsflctl.regval =
4771 E1000_READ_FLASH_REG16(hw,
4772 ICH_FLASH_HSFCTL);
4773
4774 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4775 if (hw->mac.type >= e1000_pch_spt)
4776 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4777 hsflctl.regval << 16);
4778 else
4779 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4780 hsflctl.regval);
4781
4782 /* Write the last 24 bits of an index within the
4783 * block into Flash Linear address field in Flash
4784 * Address.
4785 */
4786 flash_linear_addr += (j * sector_size);
4787 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4788 flash_linear_addr);
4789
4790 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4791 if (ret_val == E1000_SUCCESS)
4792 break;
4793
4794 /* Check if FCERR is set to 1. If 1,
4795 * clear it and try the whole sequence
5193 if (hw->mac.type == e1000_ich8lan) {
5194 reg = E1000_READ_REG(hw, E1000_STATUS);
5195 reg &= ~(1UL << 31);
5196 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5197 }
5198
5199 /* work-around descriptor data corruption issue during nfs v2 udp
5200 * traffic, just disable the nfs filtering capability
5201 */
5202 reg = E1000_READ_REG(hw, E1000_RFCTL);
5203 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5204
5205 /* Disable IPv6 extension header parsing because some malformed
5206 * IPv6 headers can hang the Rx.
5207 */
5208 if (hw->mac.type == e1000_ich8lan)
5209 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5210 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5211
5212 /* Enable ECC on Lynxpoint */
5213 if (hw->mac.type >= e1000_pch_lpt) {
5214 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5215 reg |= E1000_PBECCSTS_ECC_ENABLE;
5216 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5217
5218 reg = E1000_READ_REG(hw, E1000_CTRL);
5219 reg |= E1000_CTRL_MEHE;
5220 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5221 }
5222
5223 return;
5224 }
5225
5226 /**
5227 * e1000_setup_link_ich8lan - Setup flow control and link settings
5228 * @hw: pointer to the HW structure
5229 *
5230 * Determines which flow control settings to use, then configures flow
5231 * control. Calls the appropriate media-specific link configuration
5232 * function. Assuming the adapter has a valid link partner, a valid link
5233 * should be established. Assumes the hardware has previously been reset
5626 * than 10Mbps w/o EEE.
5627 **/
5628 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5629 {
5630 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5631 u32 phy_ctrl;
5632 s32 ret_val;
5633
5634 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5635
5636 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5637 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5638
5639 if (hw->phy.type == e1000_phy_i217) {
5640 u16 phy_reg, device_id = hw->device_id;
5641
5642 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5643 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5644 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5645 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5646 (hw->mac.type >= e1000_pch_spt)) {
5647 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5648
5649 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5650 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5651 }
5652
5653 ret_val = hw->phy.ops.acquire(hw);
5654 if (ret_val)
5655 goto out;
5656
5657 if (!dev_spec->eee_disable) {
5658 u16 eee_advert;
5659
5660 ret_val =
5661 e1000_read_emi_reg_locked(hw,
5662 I217_EEE_ADVERTISEMENT,
5663 &eee_advert);
5664 if (ret_val)
5665 goto release;
5666
|