935 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
936 * @hw: pointer to hardware structure
937 * @speed: new link speed
938 * @autoneg: TRUE if autonegotiation enabled
939 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
940 *
941 * Restarts link on PHY and MAC based on settings passed in.
942 **/
943 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
944 ixgbe_link_speed speed,
945 bool autoneg,
946 bool autoneg_wait_to_complete)
947 {
948 s32 status;
949
950 DEBUGFUNC("ixgbe_setup_copper_link_82599");
951
952 /* Setup the PHY according to input speed */
953 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
954 autoneg_wait_to_complete);
955 /* Set up MAC */
956 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
957
958 return status;
959 }
960
961 /**
962 * ixgbe_reset_hw_82599 - Perform hardware reset
963 * @hw: pointer to hardware structure
964 *
965 * Resets the hardware by resetting the transmit and receive units, masks
966 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
967 * reset.
968 **/
969 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
970 {
971 ixgbe_link_speed link_speed;
972 s32 status;
973 u32 ctrl, i, autoc, autoc2;
974 bool link_up = FALSE;
975
976 DEBUGFUNC("ixgbe_reset_hw_82599");
1157 */
1158 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1159 IXGBE_WRITE_FLUSH(hw);
1160
1161 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1162 IXGBE_WRITE_FLUSH(hw);
1163
1164 /* Poll init-done after we write FDIRCTRL register */
1165 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1166 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1167 IXGBE_FDIRCTRL_INIT_DONE)
1168 break;
1169 usec_delay(10);
1170 }
1171 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1172 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1173 return IXGBE_ERR_FDIR_REINIT_FAILED;
1174 }
1175
1176 /* Clear FDIR statistics registers (read to clear) */
1177 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1178 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1179 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1180 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1181 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1182
1183 return IXGBE_SUCCESS;
1184 }
1185
1186 /**
1187 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1188 * @hw: pointer to hardware structure
1189 * @fdirctrl: value to write to flow director control register
1190 **/
1191 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1192 {
1193 int i;
1194
1195 DEBUGFUNC("ixgbe_fdir_enable_82599");
1196
1197 /* Prime the keys for hashing */
1198 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1199 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1200
1201 /*
1272 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1273 IXGBE_FDIRCTRL_REPORT_STATUS |
1274 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1275 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1276 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1277 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1278
1279 /* write hashes and fdirctrl register, poll for completion */
1280 ixgbe_fdir_enable_82599(hw, fdirctrl);
1281
1282 return IXGBE_SUCCESS;
1283 }
1284
1285 /*
1286 * These defines allow us to quickly generate all of the necessary instructions
1287 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1288 * for values 0 through 15
1289 */
1290 #define IXGBE_ATR_COMMON_HASH_KEY \
1291 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1292 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1293 do { \
1294 u32 n = (_n); \
1295 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1296 common_hash ^= lo_hash_dword >> n; \
1297 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1298 bucket_hash ^= lo_hash_dword >> n; \
1299 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1300 sig_hash ^= lo_hash_dword << (16 - n); \
1301 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1302 common_hash ^= hi_hash_dword >> n; \
1303 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1304 bucket_hash ^= hi_hash_dword >> n; \
1305 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1306 sig_hash ^= hi_hash_dword << (16 - n); \
1307 } while (0);
1308
1309 /**
1310 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1311 * @stream: input bitstream to compute the hash on
1312 *
1313 * This function is almost identical to the function above but contains
1314 * several optomizations such as unwinding all of the loops, letting the
1315 * compiler work out all of the conditional ifs since the keys are static
1316 * defines, and computing two keys at once since the hashed dword stream
1317 * will be the same for both keys.
1318 **/
1319 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1320 union ixgbe_atr_hash_dword common)
1321 {
1322 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1323 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1324
1325 /* record the flow_vm_vlan bits as they are a key part to the hash */
1326 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1327
1408
1409 /* configure FDIRCMD register */
1410 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1411 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1412 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1413 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1414
1415 /*
1416 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1417 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1418 */
1419 fdirhashcmd = (u64)fdircmd << 32;
1420 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1421 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1422
1423 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1424
1425 return IXGBE_SUCCESS;
1426 }
1427
1428 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1429 do { \
1430 u32 n = (_n); \
1431 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1432 bucket_hash ^= lo_hash_dword >> n; \
1433 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1434 bucket_hash ^= hi_hash_dword >> n; \
1435 } while (0);
1436
1437 /**
1438 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1439 * @atr_input: input bitstream to compute the hash on
1440 * @input_mask: mask for the input bitstream
1441 *
1442 * This function serves two main purposes. First it applys the input_mask
1443 * to the atr_input resulting in a cleaned up atr_input data stream.
1444 * Secondly it computes the hash and stores it in the bkt_hash field at
1445 * the end of the input byte stream. This way it will be available for
1446 * future use without needing to recompute the hash.
1447 **/
1448 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1449 union ixgbe_atr_input *input_mask)
1450 {
1451
1452 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1453 u32 bucket_hash = 0;
1454
1455 /* Apply masks to input data */
1456 input->dword_stream[0] &= input_mask->dword_stream[0];
1567 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1568
1569 /*
1570 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1571 * are zero, then assume a full mask for that field. Also assume that
1572 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1573 * cannot be masked out in this implementation.
1574 *
1575 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1576 * point in time.
1577 */
1578
1579 /* verify bucket hash is cleared on hash generation */
1580 if (input_mask->formatted.bkt_hash)
1581 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1582
1583 /* Program FDIRM and verify partial masks */
1584 switch (input_mask->formatted.vm_pool & 0x7F) {
1585 case 0x0:
1586 fdirm |= IXGBE_FDIRM_POOL;
1587 case 0x7F:
1588 break;
1589 default:
1590 DEBUGOUT(" Error on vm pool mask\n");
1591 return IXGBE_ERR_CONFIG;
1592 }
1593
1594 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1595 case 0x0:
1596 fdirm |= IXGBE_FDIRM_L4P;
1597 if (input_mask->formatted.dst_port ||
1598 input_mask->formatted.src_port) {
1599 DEBUGOUT(" Error on src/dst port mask\n");
1600 return IXGBE_ERR_CONFIG;
1601 }
1602 case IXGBE_ATR_L4TYPE_MASK:
1603 break;
1604 default:
1605 DEBUGOUT(" Error on flow type mask\n");
1606 return IXGBE_ERR_CONFIG;
1607 }
1608
1609 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1610 case 0x0000:
1611 /* mask VLAN ID, fall through to mask VLAN priority */
1612 fdirm |= IXGBE_FDIRM_VLANID;
1613 case 0x0FFF:
1614 /* mask VLAN priority */
1615 fdirm |= IXGBE_FDIRM_VLANP;
1616 break;
1617 case 0xE000:
1618 /* mask VLAN ID only, fall through */
1619 fdirm |= IXGBE_FDIRM_VLANID;
1620 case 0xEFFF:
1621 /* no VLAN fields masked */
1622 break;
1623 default:
1624 DEBUGOUT(" Error on VLAN mask\n");
1625 return IXGBE_ERR_CONFIG;
1626 }
1627
1628 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1629 case 0x0000:
1630 /* Mask Flex Bytes, fall through */
1631 fdirm |= IXGBE_FDIRM_FLEX;
1632 case 0xFFFF:
1633 break;
1634 default:
1635 DEBUGOUT(" Error on flexible byte mask\n");
1636 return IXGBE_ERR_CONFIG;
1637 }
1638
1639 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1640 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1641
1642 /* store the TCP/UDP port masks, bit reversed from port layout */
1643 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1644
1645 /* write both the same so that UDP and TCP use the same mask */
1646 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1647 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1648
1649 /* store source and destination IP masks (big-enian) */
1650 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1651 ~input_mask->formatted.src_ip[0]);
1777
1778 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1779
1780 /*
1781 * Check flow_type formatting, and bail out before we touch the hardware
1782 * if there's a configuration issue
1783 */
1784 switch (input->formatted.flow_type) {
1785 case IXGBE_ATR_FLOW_TYPE_IPV4:
1786 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1787 if (input->formatted.dst_port || input->formatted.src_port) {
1788 DEBUGOUT(" Error on src/dst port\n");
1789 return IXGBE_ERR_CONFIG;
1790 }
1791 break;
1792 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1793 if (input->formatted.dst_port || input->formatted.src_port) {
1794 DEBUGOUT(" Error on src/dst port\n");
1795 return IXGBE_ERR_CONFIG;
1796 }
1797 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1798 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1799 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1800 IXGBE_ATR_L4TYPE_MASK;
1801 break;
1802 default:
1803 DEBUGOUT(" Error on flow type input\n");
1804 return err;
1805 }
1806
1807 /* program input mask into the HW */
1808 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1809 if (err)
1810 return err;
1811
1812 /* apply mask and compute/store hash */
1813 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1814
1815 /* program filters to filter memory */
1816 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
|
935 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
936 * @hw: pointer to hardware structure
937 * @speed: new link speed
938 * @autoneg: TRUE if autonegotiation enabled
939 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
940 *
941 * Restarts link on PHY and MAC based on settings passed in.
942 **/
943 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
944 ixgbe_link_speed speed,
945 bool autoneg,
946 bool autoneg_wait_to_complete)
947 {
948 s32 status;
949
950 DEBUGFUNC("ixgbe_setup_copper_link_82599");
951
952 /* Setup the PHY according to input speed */
953 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
954 autoneg_wait_to_complete);
955 if (status == IXGBE_SUCCESS) {
956 /* Set up MAC */
957 status =
958 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
959 }
960
961 return status;
962 }
963
964 /**
965 * ixgbe_reset_hw_82599 - Perform hardware reset
966 * @hw: pointer to hardware structure
967 *
968 * Resets the hardware by resetting the transmit and receive units, masks
969 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
970 * reset.
971 **/
972 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
973 {
974 ixgbe_link_speed link_speed;
975 s32 status;
976 u32 ctrl, i, autoc, autoc2;
977 bool link_up = FALSE;
978
979 DEBUGFUNC("ixgbe_reset_hw_82599");
1160 */
1161 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1162 IXGBE_WRITE_FLUSH(hw);
1163
1164 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1165 IXGBE_WRITE_FLUSH(hw);
1166
1167 /* Poll init-done after we write FDIRCTRL register */
1168 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1169 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1170 IXGBE_FDIRCTRL_INIT_DONE)
1171 break;
1172 usec_delay(10);
1173 }
1174 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1175 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1176 return IXGBE_ERR_FDIR_REINIT_FAILED;
1177 }
1178
1179 /* Clear FDIR statistics registers (read to clear) */
1180 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1181 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1182 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1183 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1184 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1185
1186 return IXGBE_SUCCESS;
1187 }
1188
1189 /**
1190 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1191 * @hw: pointer to hardware structure
1192 * @fdirctrl: value to write to flow director control register
1193 **/
1194 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1195 {
1196 int i;
1197
1198 DEBUGFUNC("ixgbe_fdir_enable_82599");
1199
1200 /* Prime the keys for hashing */
1201 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1202 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1203
1204 /*
1275 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1276 IXGBE_FDIRCTRL_REPORT_STATUS |
1277 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1278 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1279 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1280 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1281
1282 /* write hashes and fdirctrl register, poll for completion */
1283 ixgbe_fdir_enable_82599(hw, fdirctrl);
1284
1285 return IXGBE_SUCCESS;
1286 }
1287
1288 /*
1289 * These defines allow us to quickly generate all of the necessary instructions
1290 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1291 * for values 0 through 15
1292 */
1293 #define IXGBE_ATR_COMMON_HASH_KEY \
1294 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1295 #if lint
1296 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n)
1297 #else
1298 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1299 do { \
1300 u32 n = (_n); \
1301 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1302 common_hash ^= lo_hash_dword >> n; \
1303 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1304 bucket_hash ^= lo_hash_dword >> n; \
1305 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1306 sig_hash ^= lo_hash_dword << (16 - n); \
1307 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1308 common_hash ^= hi_hash_dword >> n; \
1309 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1310 bucket_hash ^= hi_hash_dword >> n; \
1311 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1312 sig_hash ^= hi_hash_dword << (16 - n); \
1313 } while (0);
1314 #endif
1315
1316 /**
1317 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1318 * @stream: input bitstream to compute the hash on
1319 *
1320 * This function is almost identical to the function above but contains
1321 * several optomizations such as unwinding all of the loops, letting the
1322 * compiler work out all of the conditional ifs since the keys are static
1323 * defines, and computing two keys at once since the hashed dword stream
1324 * will be the same for both keys.
1325 **/
1326 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1327 union ixgbe_atr_hash_dword common)
1328 {
1329 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1330 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1331
1332 /* record the flow_vm_vlan bits as they are a key part to the hash */
1333 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1334
1415
1416 /* configure FDIRCMD register */
1417 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1418 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1419 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1420 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1421
1422 /*
1423 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1424 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1425 */
1426 fdirhashcmd = (u64)fdircmd << 32;
1427 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1428 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1429
1430 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1431
1432 return IXGBE_SUCCESS;
1433 }
1434
1435 #if lint
1436 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n)
1437 #else
1438 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1439 do { \
1440 u32 n = (_n); \
1441 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1442 bucket_hash ^= lo_hash_dword >> n; \
1443 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1444 bucket_hash ^= hi_hash_dword >> n; \
1445 } while (0);
1446 #endif
1447 /**
1448 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1449 * @atr_input: input bitstream to compute the hash on
1450 * @input_mask: mask for the input bitstream
1451 *
1452 * This function serves two main purposes. First it applys the input_mask
1453 * to the atr_input resulting in a cleaned up atr_input data stream.
1454 * Secondly it computes the hash and stores it in the bkt_hash field at
1455 * the end of the input byte stream. This way it will be available for
1456 * future use without needing to recompute the hash.
1457 **/
1458 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1459 union ixgbe_atr_input *input_mask)
1460 {
1461
1462 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1463 u32 bucket_hash = 0;
1464
1465 /* Apply masks to input data */
1466 input->dword_stream[0] &= input_mask->dword_stream[0];
1577 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1578
1579 /*
1580 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1581 * are zero, then assume a full mask for that field. Also assume that
1582 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1583 * cannot be masked out in this implementation.
1584 *
1585 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1586 * point in time.
1587 */
1588
1589 /* verify bucket hash is cleared on hash generation */
1590 if (input_mask->formatted.bkt_hash)
1591 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1592
1593 /* Program FDIRM and verify partial masks */
1594 switch (input_mask->formatted.vm_pool & 0x7F) {
1595 case 0x0:
1596 fdirm |= IXGBE_FDIRM_POOL;
1597 /* FALLTHRU */
1598 case 0x7F:
1599 break;
1600 default:
1601 DEBUGOUT(" Error on vm pool mask\n");
1602 return IXGBE_ERR_CONFIG;
1603 }
1604
1605 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1606 case 0x0:
1607 fdirm |= IXGBE_FDIRM_L4P;
1608 if (input_mask->formatted.dst_port ||
1609 input_mask->formatted.src_port) {
1610 DEBUGOUT(" Error on src/dst port mask\n");
1611 return IXGBE_ERR_CONFIG;
1612 }
1613 /* FALLTHRU */
1614 case IXGBE_ATR_L4TYPE_MASK:
1615 break;
1616 default:
1617 DEBUGOUT(" Error on flow type mask\n");
1618 return IXGBE_ERR_CONFIG;
1619 }
1620
1621 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1622 case 0x0000:
1623 /* mask VLAN ID, fall through to mask VLAN priority */
1624 fdirm |= IXGBE_FDIRM_VLANID;
1625 /* FALLTHRU */
1626 case 0x0FFF:
1627 /* mask VLAN priority */
1628 fdirm |= IXGBE_FDIRM_VLANP;
1629 break;
1630 case 0xE000:
1631 /* mask VLAN ID only, fall through */
1632 fdirm |= IXGBE_FDIRM_VLANID;
1633 /* FALLTHRU */
1634 case 0xEFFF:
1635 /* no VLAN fields masked */
1636 break;
1637 default:
1638 DEBUGOUT(" Error on VLAN mask\n");
1639 return IXGBE_ERR_CONFIG;
1640 }
1641
1642 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1643 case 0x0000:
1644 /* Mask Flex Bytes, fall through */
1645 fdirm |= IXGBE_FDIRM_FLEX;
1646 /* FALLTHRU */
1647 case 0xFFFF:
1648 break;
1649 default:
1650 DEBUGOUT(" Error on flexible byte mask\n");
1651 return IXGBE_ERR_CONFIG;
1652 }
1653
1654 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1655 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1656
1657 /* store the TCP/UDP port masks, bit reversed from port layout */
1658 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1659
1660 /* write both the same so that UDP and TCP use the same mask */
1661 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1662 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1663
1664 /* store source and destination IP masks (big-enian) */
1665 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1666 ~input_mask->formatted.src_ip[0]);
1792
1793 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1794
1795 /*
1796 * Check flow_type formatting, and bail out before we touch the hardware
1797 * if there's a configuration issue
1798 */
1799 switch (input->formatted.flow_type) {
1800 case IXGBE_ATR_FLOW_TYPE_IPV4:
1801 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1802 if (input->formatted.dst_port || input->formatted.src_port) {
1803 DEBUGOUT(" Error on src/dst port\n");
1804 return IXGBE_ERR_CONFIG;
1805 }
1806 break;
1807 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1808 if (input->formatted.dst_port || input->formatted.src_port) {
1809 DEBUGOUT(" Error on src/dst port\n");
1810 return IXGBE_ERR_CONFIG;
1811 }
1812 /* FALLTHRU */
1813 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1814 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1815 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1816 IXGBE_ATR_L4TYPE_MASK;
1817 break;
1818 default:
1819 DEBUGOUT(" Error on flow type input\n");
1820 return err;
1821 }
1822
1823 /* program input mask into the HW */
1824 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1825 if (err)
1826 return err;
1827
1828 /* apply mask and compute/store hash */
1829 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1830
1831 /* program filters to filter memory */
1832 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
|