summaryrefslogtreecommitdiff
path: root/drivers/net/niu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/niu.c')
-rw-r--r--drivers/net/niu.c1059
1 files changed, 921 insertions, 138 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 0c0b752315c..50c11126a3d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1115,6 +1115,130 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p)
return 0;
}
+static int link_status_mii(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ int err;
+ int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
+ int supported, advertising, active_speed, active_duplex;
+
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (unlikely(err < 0))
+ return err;
+ bmcr = err;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (unlikely(err < 0))
+ return err;
+ bmsr = err;
+
+ err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+ if (unlikely(err < 0))
+ return err;
+ advert = err;
+
+ err = mii_read(np, np->phy_addr, MII_LPA);
+ if (unlikely(err < 0))
+ return err;
+ lpa = err;
+
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (unlikely(err < 0))
+ return err;
+ estatus = err;
+
+ err = mii_read(np, np->phy_addr, MII_CTRL1000);
+ if (unlikely(err < 0))
+ return err;
+ ctrl1000 = err;
+
+ err = mii_read(np, np->phy_addr, MII_STAT1000);
+ if (unlikely(err < 0))
+ return err;
+ stat1000 = err;
+ } else
+ estatus = ctrl1000 = stat1000 = 0;
+
+ supported = 0;
+ if (bmsr & BMSR_ANEGCAPABLE)
+ supported |= SUPPORTED_Autoneg;
+ if (bmsr & BMSR_10HALF)
+ supported |= SUPPORTED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ supported |= SUPPORTED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ supported |= SUPPORTED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ supported |= SUPPORTED_100baseT_Full;
+ if (estatus & ESTATUS_1000_THALF)
+ supported |= SUPPORTED_1000baseT_Half;
+ if (estatus & ESTATUS_1000_TFULL)
+ supported |= SUPPORTED_1000baseT_Full;
+ lp->supported = supported;
+
+ advertising = 0;
+ if (advert & ADVERTISE_10HALF)
+ advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl1000 & ADVERTISE_1000HALF)
+ advertising |= ADVERTISED_1000baseT_Half;
+ if (ctrl1000 & ADVERTISE_1000FULL)
+ advertising |= ADVERTISED_1000baseT_Full;
+
+ if (bmcr & BMCR_ANENABLE) {
+ int neg, neg1000;
+
+ lp->active_autoneg = 1;
+ advertising |= ADVERTISED_Autoneg;
+
+ neg = advert & lpa;
+ neg1000 = (ctrl1000 << 2) & stat1000;
+
+ if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
+ active_speed = SPEED_1000;
+ else if (neg & LPA_100)
+ active_speed = SPEED_100;
+ else if (neg & (LPA_10HALF | LPA_10FULL))
+ active_speed = SPEED_10;
+ else
+ active_speed = SPEED_INVALID;
+
+ if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
+ active_duplex = DUPLEX_FULL;
+ else if (active_speed != SPEED_INVALID)
+ active_duplex = DUPLEX_HALF;
+ else
+ active_duplex = DUPLEX_INVALID;
+ } else {
+ lp->active_autoneg = 0;
+
+ if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
+ active_speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ active_speed = SPEED_100;
+ else
+ active_speed = SPEED_10;
+
+ if (bmcr & BMCR_FULLDPLX)
+ active_duplex = DUPLEX_FULL;
+ else
+ active_duplex = DUPLEX_HALF;
+ }
+
+ lp->active_advertising = advertising;
+ lp->active_speed = active_speed;
+ lp->active_duplex = active_duplex;
+ *link_up_p = !!(bmsr & BMSR_LSTATUS);
+
+ return 0;
+}
+
static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
{
struct niu_link_config *lp = &np->link_config;
@@ -1171,6 +1295,22 @@ out:
return err;
}
+static int link_status_1g(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = link_status_mii(np, link_up_p);
+ lp->supported |= SUPPORTED_TP;
+ lp->active_advertising |= ADVERTISED_TP;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return err;
+}
+
static int bcm8704_reset(struct niu *np)
{
int err, limit;
@@ -1676,39 +1816,88 @@ static int mii_init_common(struct niu *np)
return err;
}
- /* XXX configurable XXX */
- /* XXX for now don't advertise half-duplex or asym pause... XXX */
- adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
- if (bmsr & BMSR_10FULL)
- adv |= ADVERTISE_10FULL;
- if (bmsr & BMSR_100FULL)
- adv |= ADVERTISE_100FULL;
- err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
- if (err)
- return err;
-
- if (bmsr & BMSR_ESTATEN) {
- u16 ctrl1000 = 0;
-
- if (estat & ESTATUS_1000_TFULL)
- ctrl1000 |= ADVERTISE_1000FULL;
- err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
+ if (lp->autoneg) {
+ u16 ctrl1000;
+
+ adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+ if ((bmsr & BMSR_10HALF) &&
+ (lp->advertising & ADVERTISED_10baseT_Half))
+ adv |= ADVERTISE_10HALF;
+ if ((bmsr & BMSR_10FULL) &&
+ (lp->advertising & ADVERTISED_10baseT_Full))
+ adv |= ADVERTISE_10FULL;
+ if ((bmsr & BMSR_100HALF) &&
+ (lp->advertising & ADVERTISED_100baseT_Half))
+ adv |= ADVERTISE_100HALF;
+ if ((bmsr & BMSR_100FULL) &&
+ (lp->advertising & ADVERTISED_100baseT_Full))
+ adv |= ADVERTISE_100FULL;
+ err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
if (err)
return err;
+
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ ctrl1000 = 0;
+ if ((estat & ESTATUS_1000_THALF) &&
+ (lp->advertising & ADVERTISED_1000baseT_Half))
+ ctrl1000 |= ADVERTISE_1000HALF;
+ if ((estat & ESTATUS_1000_TFULL) &&
+ (lp->advertising & ADVERTISED_1000baseT_Full))
+ ctrl1000 |= ADVERTISE_1000FULL;
+ err = mii_write(np, np->phy_addr,
+ MII_CTRL1000, ctrl1000);
+ if (err)
+ return err;
+ }
+
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ } else {
+ /* !lp->autoneg */
+ int fulldpx;
+
+ if (lp->duplex == DUPLEX_FULL) {
+ bmcr |= BMCR_FULLDPLX;
+ fulldpx = 1;
+ } else if (lp->duplex == DUPLEX_HALF)
+ fulldpx = 0;
+ else
+ return -EINVAL;
+
+ if (lp->speed == SPEED_1000) {
+ /* if X-full requested while not supported, or
+ X-half requested while not supported... */
+ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
+ (!fulldpx && !(estat & ESTATUS_1000_THALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED1000;
+ } else if (lp->speed == SPEED_100) {
+ if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_100HALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED100;
+ } else if (lp->speed == SPEED_10) {
+ if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_10HALF)))
+ return -EINVAL;
+ } else
+ return -EINVAL;
}
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
if (err)
return err;
+#if 0
err = mii_read(np, np->phy_addr, MII_BMCR);
if (err < 0)
return err;
+ bmcr = err;
+
err = mii_read(np, np->phy_addr, MII_BMSR);
if (err < 0)
return err;
-#if 0
+ bmsr = err;
+
pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
np->port, bmcr, bmsr);
#endif
@@ -2054,87 +2243,6 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
return err;
}
-static int link_status_1g(struct niu *np, int *link_up_p)
-{
- struct niu_link_config *lp = &np->link_config;
- u16 current_speed, bmsr;
- unsigned long flags;
- u8 current_duplex;
- int err, link_up;
-
- link_up = 0;
- current_speed = SPEED_INVALID;
- current_duplex = DUPLEX_INVALID;
-
- spin_lock_irqsave(&np->lock, flags);
-
- err = -EINVAL;
- if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
- goto out;
-
- err = mii_read(np, np->phy_addr, MII_BMSR);
- if (err < 0)
- goto out;
-
- bmsr = err;
- if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa, common, estat;
-
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
-
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
-
- common = adv & lpa;
-
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
- estat = err;
-
- link_up = 1;
- if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
- current_speed = SPEED_1000;
- if (estat & ESTATUS_1000_TFULL)
- current_duplex = DUPLEX_FULL;
- else
- current_duplex = DUPLEX_HALF;
- } else {
- if (common & ADVERTISE_100BASE4) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_HALF;
- } else if (common & ADVERTISE_100FULL) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_FULL;
- } else if (common & ADVERTISE_100HALF) {
- current_speed = SPEED_100;
- current_duplex = DUPLEX_HALF;
- } else if (common & ADVERTISE_10FULL) {
- current_speed = SPEED_10;
- current_duplex = DUPLEX_FULL;
- } else if (common & ADVERTISE_10HALF) {
- current_speed = SPEED_10;
- current_duplex = DUPLEX_HALF;
- } else
- link_up = 0;
- }
- }
- lp->active_speed = current_speed;
- lp->active_duplex = current_duplex;
- err = 0;
-
-out:
- spin_unlock_irqrestore(&np->lock, flags);
-
- *link_up_p = link_up;
- return err;
-}
-
static int niu_link_status(struct niu *np, int *link_up_p)
{
const struct niu_phy_ops *ops = np->phy_ops;
@@ -2284,7 +2392,6 @@ static int serdes_init_10g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
u64 ctrl_val, test_cfg_val, sig, mask, val;
- int err;
u64 reset_val;
switch (np->port) {
@@ -2337,6 +2444,7 @@ static int serdes_init_10g_serdes(struct niu *np)
/* Initialize all 4 lanes of the SERDES. */
for (i = 0; i < 4; i++) {
u32 rxtx_ctrl, glue0;
+ int err;
err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
if (err)
@@ -2456,7 +2564,7 @@ static int niu_determine_phy_disposition(struct niu *np)
case NIU_FLAGS_10G:
/* 10G copper */
- tp = &phy_template_1g_copper;
+ tp = &phy_template_10g_copper;
break;
case NIU_FLAGS_FIBER:
@@ -2873,7 +2981,6 @@ static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
return 0;
}
-#if 0
static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
int ipv6, u64 protocol_id,
u64 tos_mask, u64 tos_val)
@@ -2901,7 +3008,6 @@ static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
return 0;
}
-#endif
static int tcam_early_init(struct niu *np)
{
@@ -3168,6 +3274,27 @@ static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
return 0;
}
+/* Entries for the ports are interleaved in the TCAM */
+static u16 tcam_get_index(struct niu *np, u16 idx)
+{
+ /* One entry reserved for IP fragment rule */
+ if (idx >= (np->clas.tcam_sz - 1))
+ idx = 0;
+ return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports));
+}
+
+static u16 tcam_get_size(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_sz - 1;
+}
+
+static u16 tcam_get_valid_entry_cnt(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_valid_entries - 1;
+}
+
static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
u32 offset, u32 size)
{
@@ -3390,6 +3517,7 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
rp->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, np->dev);
+ skb_record_rx_queue(skb, rp->rx_channel);
netif_receive_skb(skb);
return num_rcr;
@@ -3669,7 +3797,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
@@ -4088,12 +4216,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
u64 v0, u64 v1, u64 v2)
{
- if (likely(netif_rx_schedule_prep(&lp->napi))) {
+ if (likely(napi_schedule_prep(&lp->napi))) {
lp->v0 = v0;
lp->v1 = v1;
lp->v2 = v2;
__niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(&lp->napi);
+ __napi_schedule(&lp->napi);
}
}
@@ -4890,8 +5018,7 @@ static int niu_set_ip_frag_rule(struct niu *np)
struct niu_tcam_entry *tp;
int index, err;
- /* XXX fix this allocation scheme XXX */
- index = cp->tcam_index;
+ index = cp->tcam_top;
tp = &parent->tcam[index];
/* Note that the noport bit is the same in both ipv4 and
@@ -4908,6 +5035,8 @@ static int niu_set_ip_frag_rule(struct niu *np)
err = tcam_assoc_write(np, index, tp->assoc_data);
if (err)
return err;
+ tp->valid = 1;
+ cp->tcam_valid_entries++;
return 0;
}
@@ -5211,10 +5340,10 @@ static void niu_init_xif_xmac(struct niu *np)
if (np->flags & NIU_FLAGS_10G) {
val |= XMAC_CONFIG_MODE_XGMII;
} else {
- if (lp->active_speed == SPEED_100)
- val |= XMAC_CONFIG_MODE_MII;
- else
+ if (lp->active_speed == SPEED_1000)
val |= XMAC_CONFIG_MODE_GMII;
+ else
+ val |= XMAC_CONFIG_MODE_MII;
}
nw64_mac(XMAC_CONFIG, val);
@@ -6446,11 +6575,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
ipv6 = ihl = 0;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case cpu_to_be16(ETH_P_IP):
ip_proto = ip_hdr(skb)->protocol;
ihl = ip_hdr(skb)->ihl;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
ip_proto = ipv6_hdr(skb)->nexthdr;
ihl = (40 >> 2);
ipv6 = 1;
@@ -6702,17 +6831,27 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
memset(cmd, 0, sizeof(*cmd));
cmd->phy_address = np->phy_addr;
cmd->supported = lp->supported;
- cmd->advertising = lp->advertising;
- cmd->autoneg = lp->autoneg;
+ cmd->advertising = lp->active_advertising;
+ cmd->autoneg = lp->active_autoneg;
cmd->speed = lp->active_speed;
cmd->duplex = lp->active_duplex;
+ cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
+ cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
return 0;
}
static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- return -EINVAL;
+ struct niu *np = netdev_priv(dev);
+ struct niu_link_config *lp = &np->link_config;
+
+ lp->advertising = cmd->advertising;
+ lp->speed = cmd->speed;
+ lp->duplex = cmd->duplex;
+ lp->autoneg = cmd->autoneg;
+ return niu_init_link(np);
}
static u32 niu_get_msglevel(struct net_device *dev)
@@ -6727,6 +6866,16 @@ static void niu_set_msglevel(struct net_device *dev, u32 value)
np->msg_enable = value;
}
+static int niu_nway_reset(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ if (np->link_config.autoneg)
+ return niu_init_link(np);
+
+ return 0;
+}
+
static int niu_get_eeprom_len(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
@@ -6778,6 +6927,75 @@ static int niu_get_eeprom(struct net_device *dev,
return 0;
}
+static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ *pid = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ *pid = IPPROTO_UDP;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ *pid = IPPROTO_SCTP;
+ break;
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ *pid = IPPROTO_AH;
+ break;
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ *pid = IPPROTO_ESP;
+ break;
+ default:
+ *pid = 0;
+ break;
+ }
+}
+
+static int niu_class_to_ethflow(u64 class, int *flow_type)
+{
+ switch (class) {
+ case CLASS_CODE_TCP_IPV4:
+ *flow_type = TCP_V4_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV4:
+ *flow_type = UDP_V4_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV4:
+ *flow_type = AH_V4_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV4:
+ *flow_type = SCTP_V4_FLOW;
+ break;
+ case CLASS_CODE_TCP_IPV6:
+ *flow_type = TCP_V6_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV6:
+ *flow_type = UDP_V6_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV6:
+ *flow_type = AH_V6_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV6:
+ *flow_type = SCTP_V6_FLOW;
+ break;
+ case CLASS_CODE_USER_PROG1:
+ case CLASS_CODE_USER_PROG2:
+ case CLASS_CODE_USER_PROG3:
+ case CLASS_CODE_USER_PROG4:
+ *flow_type = IP_USER_FLOW;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
static int niu_ethflow_to_class(int flow_type, u64 *class)
{
switch (flow_type) {
@@ -6787,7 +7005,8 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
case UDP_V4_FLOW:
*class = CLASS_CODE_UDP_IPV4;
break;
- case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
*class = CLASS_CODE_AH_ESP_IPV4;
break;
case SCTP_V4_FLOW:
@@ -6799,7 +7018,8 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
case UDP_V6_FLOW:
*class = CLASS_CODE_UDP_IPV6;
break;
- case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
*class = CLASS_CODE_AH_ESP_IPV6;
break;
case SCTP_V6_FLOW:
@@ -6816,8 +7036,6 @@ static u64 niu_flowkey_to_ethflow(u64 flow_key)
{
u64 ethflow = 0;
- if (flow_key & FLOW_KEY_PORT)
- ethflow |= RXH_DEV_PORT;
if (flow_key & FLOW_KEY_L2DA)
ethflow |= RXH_L2DA;
if (flow_key & FLOW_KEY_VLAN)
@@ -6841,8 +7059,6 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
{
u64 key = 0;
- if (ethflow & RXH_DEV_PORT)
- key |= FLOW_KEY_PORT;
if (ethflow & RXH_L2DA)
key |= FLOW_KEY_L2DA;
if (ethflow & RXH_VLAN)
@@ -6864,41 +7080,279 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
}
-static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
{
- struct niu *np = netdev_priv(dev);
u64 class;
- cmd->data = 0;
+ nfc->data = 0;
- if (!niu_ethflow_to_class(cmd->flow_type, &class))
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
return -EINVAL;
if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
TCAM_KEY_DISC)
- cmd->data = RXH_DISCARD;
+ nfc->data = RXH_DISCARD;
else
-
- cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
+ nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
CLASS_CODE_USER_PROG1]);
return 0;
}
-static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
+ struct ethtool_rx_flow_spec *fsp)
+{
+
+ fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >>
+ TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >>
+ TCAM_V4KEY3_DADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >>
+ TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >>
+ TCAM_V4KEY3_DADDR_SHIFT;
+
+ fsp->h_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src);
+ fsp->m_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src);
+ fsp->h_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst);
+ fsp->m_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+ fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+ fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.psrc =
+ ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->h_u.tcp_ip4_spec.pdst =
+ ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+ fsp->m_u.tcp_ip4_spec.psrc =
+ ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->m_u.tcp_ip4_spec.pdst =
+ ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.spi =
+ (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.ah_ip4_spec.spi =
+ (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+
+ fsp->h_u.ah_ip4_spec.spi =
+ cpu_to_be32(fsp->h_u.ah_ip4_spec.spi);
+ fsp->m_u.ah_ip4_spec.spi =
+ cpu_to_be32(fsp->m_u.ah_ip4_spec.spi);
+ break;
+ case IP_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.l4_4_bytes =
+ (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes =
+ (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+
+ fsp->h_u.usr_ip4_spec.l4_4_bytes =
+ cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+ fsp->m_u.usr_ip4_spec.l4_4_bytes =
+ cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+
+ fsp->h_u.usr_ip4_spec.proto =
+ (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ fsp->m_u.usr_ip4_spec.proto =
+ (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ break;
+ default:
+ break;
+ }
+}
+
+static int niu_get_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ u16 idx;
+ u64 class;
+ int ret = 0;
+
+ idx = tcam_get_index(np, (u16)nfc->fs.location);
+
+ tp = &parent->tcam[idx];
+ if (!tp->valid) {
+ pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
+ parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ return -EINVAL;
+ }
+
+ /* fill the flow spec entry */
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ ret = niu_class_to_ethflow(class, &fsp->flow_type);
+
+ if (ret < 0) {
+ pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
+ parent->index, np->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
+ u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ if (proto == IPPROTO_ESP) {
+ if (fsp->flow_type == AH_V4_FLOW)
+ fsp->flow_type = ESP_V4_FLOW;
+ else
+ fsp->flow_type = ESP_V6_FLOW;
+ }
+ }
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ ret = -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ goto out;
+
+ if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
+ TCAM_ASSOCDATA_OFFSET_SHIFT;
+
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
+out:
+ return ret;
+}
+
+static int niu_get_ethtool_tcam_all(struct niu *np,
+ struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ int i, idx, cnt;
+ u16 n_entries;
+ unsigned long flags;
+
+
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
+
+ niu_lock_parent(np, flags);
+ n_entries = nfc->rule_cnt;
+ for (cnt = 0, i = 0; i < nfc->data; i++) {
+ idx = tcam_get_index(np, i);
+ tp = &parent->tcam[idx];
+ if (!tp->valid)
+ continue;
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ niu_unlock_parent(np, flags);
+
+ if (n_entries != cnt) {
+ /* print warning, this should not happen */
+ pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
+ "n_entries[%d] != cnt[%d]!!!\n\n",
+ np->parent->index, np->dev->name, n_entries, cnt);
+ }
+
+ return 0;
+}
+
+static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ void *rule_locs)
{
struct niu *np = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = niu_get_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_GRXRINGS:
+ cmd->data = np->num_rx_rings;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = niu_get_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
u64 class;
u64 flow_key = 0;
unsigned long flags;
- if (!niu_ethflow_to_class(cmd->flow_type, &class))
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
return -EINVAL;
if (class < CLASS_CODE_USER_PROG1 ||
class > CLASS_CODE_SCTP_IPV6)
return -EINVAL;
- if (cmd->data & RXH_DISCARD) {
+ if (nfc->data & RXH_DISCARD) {
niu_lock_parent(np, flags);
flow_key = np->parent->tcam_key[class -
CLASS_CODE_USER_PROG1];
@@ -6923,7 +7377,7 @@ static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
}
- if (!niu_ethflow_to_flowkey(cmd->data, &flow_key))
+ if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
return -EINVAL;
niu_lock_parent(np, flags);
@@ -6934,6 +7388,331 @@ static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd)
return 0;
}
+static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
+ struct niu_tcam_entry *tp,
+ int l2_rdc_tab, u64 class)
+{
+ u8 pid = 0;
+ u32 sip, dip, sipm, dipm, spi, spim;
+ u16 sport, dport, spm, dpm;
+
+ sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
+ sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
+ dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
+ dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+ tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
+ tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
+ tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
+
+ tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key[3] |= dip;
+
+ tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key_mask[3] |= dipm;
+
+ tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+
+ tp->key[2] |= (((u64)sport << 16) | dport);
+ tp->key_mask[2] |= (((u64)spm << 16) | dpm);
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
+ spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
+
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case IP_USER_FLOW:
+ spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+ spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ pid = fsp->h_u.usr_ip4_spec.proto;
+ break;
+ default:
+ break;
+ }
+
+ tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
+ if (pid) {
+ tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
+ }
+}
+
+static int niu_add_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
+ int l2_rdc_table = rdc_table->first_table_num;
+ u16 idx;
+ u64 class;
+ unsigned long flags;
+ int err, ret;
+
+ ret = 0;
+
+ idx = nfc->fs.location;
+ if (idx >= tcam_get_size(np))
+ return -EINVAL;
+
+ if (fsp->flow_type == IP_USER_FLOW) {
+ int i;
+ int add_usr_cls = 0;
+ int ipv6 = 0;
+ struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+
+ niu_lock_parent(np, flags);
+
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i]) {
+ if (uspec->proto == parent->l3_cls_pid[i]) {
+ class = parent->l3_cls[i];
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ } else {
+ /* Program new user IP class */
+ switch (i) {
+ case 0:
+ class = CLASS_CODE_USER_PROG1;
+ break;
+ case 1:
+ class = CLASS_CODE_USER_PROG2;
+ break;
+ case 2:
+ class = CLASS_CODE_USER_PROG3;
+ break;
+ case 3:
+ class = CLASS_CODE_USER_PROG4;
+ break;
+ default:
+ break;
+ }
+ if (uspec->ip_ver == ETH_RX_NFC_IP6)
+ ipv6 = 1;
+ ret = tcam_user_ip_class_set(np, class, ipv6,
+ uspec->proto,
+ uspec->tos,
+ umask->tos);
+ if (ret)
+ goto out;
+
+ ret = tcam_user_ip_class_enable(np, class, 1);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = class;
+ parent->l3_cls_pid[i] = uspec->proto;
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ }
+ if (!add_usr_cls) {
+ pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
+ "Could not find/insert class for pid %d\n",
+ parent->index, np->dev->name, uspec->proto);
+ ret = -EINVAL;
+ goto out;
+ }
+ niu_unlock_parent(np, flags);
+ } else {
+ if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
+ return -EINVAL;
+ }
+ }
+
+ niu_lock_parent(np, flags);
+
+ idx = tcam_get_index(np, idx);
+ tp = &parent->tcam[idx];
+
+ memset(tp, 0, sizeof(*tp));
+
+ /* fill in the tcam key and mask */
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "flow %d for IPv6 not implemented\n\n",
+ parent->index, np->dev->name, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ case IP_USER_FLOW:
+ if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) {
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
+ class);
+ } else {
+ /* Not yet implemented */
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "usr flow for IPv6 not implemented\n\n",
+ parent->index, np->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "Unknown flow type %d\n\n",
+ parent->index, np->dev->name, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* fill in the assoc data */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ tp->assoc_data = TCAM_ASSOCDATA_DISC;
+ } else {
+ if (fsp->ring_cookie >= np->num_rx_rings) {
+ pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
+ "Invalid RX ring %lld\n\n",
+ parent->index, np->dev->name,
+ (long long) fsp->ring_cookie);
+ ret = -EINVAL;
+ goto out;
+ }
+ tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+ (fsp->ring_cookie <<
+ TCAM_ASSOCDATA_OFFSET_SHIFT));
+ }
+
+ err = tcam_write(np, idx, tp->key, tp->key_mask);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+ err = tcam_assoc_write(np, idx, tp->assoc_data);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* validate the entry */
+ tp->valid = 1;
+ np->clas.tcam_valid_entries++;
+out:
+ niu_unlock_parent(np, flags);
+
+ return ret;
+}
+
+static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ u16 idx;
+ unsigned long flags;
+ u64 class;
+ int ret = 0;
+
+ if (loc >= tcam_get_size(np))
+ return -EINVAL;
+
+ niu_lock_parent(np, flags);
+
+ idx = tcam_get_index(np, loc);
+ tp = &parent->tcam[idx];
+
+ /* if the entry is of a user defined class, then update*/
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
+
+ if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
+ int i;
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i] == class) {
+ parent->l3_cls_refcnt[i]--;
+ if (!parent->l3_cls_refcnt[i]) {
+ /* disable class */
+ ret = tcam_user_ip_class_enable(np,
+ class,
+ 0);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = 0;
+ parent->l3_cls_pid[i] = 0;
+ }
+ break;
+ }
+ }
+ if (i == NIU_L3_PROG_CLS) {
+ pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
+ "Usr class 0x%llx not found \n",
+ parent->index, np->dev->name,
+ (unsigned long long) class);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = tcam_flush(np, idx);
+ if (ret)
+ goto out;
+
+ /* invalidate the entry */
+ tp->valid = 0;
+ np->clas.tcam_valid_entries--;
+out:
+ niu_unlock_parent(np, flags);
+
+ return ret;
+}
+
+static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct niu *np = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = niu_set_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = niu_add_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct {
const char string[ETH_GSTRING_LEN];
} niu_xmac_stat_keys[] = {
@@ -7158,6 +7937,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_msglevel = niu_get_msglevel,
.set_msglevel = niu_set_msglevel,
+ .nway_reset = niu_nway_reset,
.get_eeprom_len = niu_get_eeprom_len,
.get_eeprom = niu_get_eeprom,
.get_settings = niu_get_settings,
@@ -7166,8 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.get_stats_count = niu_get_stats_count,
.get_ethtool_stats = niu_get_ethtool_stats,
.phys_id = niu_phys_id,
- .get_rxhash = niu_get_hash_opts,
- .set_rxhash = niu_set_hash_opts,
+ .get_rxnfc = niu_get_nfc,
+ .set_rxnfc = niu_set_nfc,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -8237,7 +9017,8 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
np->parent->tcam_num_entries);
- cp->tcam_index = (u16) np->port;
+ cp->tcam_top = (u16) np->port;
+ cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
cp->h2_init = 0xffff;
@@ -8257,7 +9038,9 @@ static void __devinit niu_link_config_init(struct niu *np)
ADVERTISED_10000baseT_Full |
ADVERTISED_Autoneg);
lp->speed = lp->active_speed = SPEED_INVALID;
- lp->duplex = lp->active_duplex = DUPLEX_INVALID;
+ lp->duplex = DUPLEX_FULL;
+ lp->active_duplex = DUPLEX_INVALID;
+ lp->autoneg = 1;
#if 0
lp->loopback_mode = LOOPBACK_MAC;
lp->active_speed = SPEED_10000;